org.apache.zookeeper.KeeperException.NoNodeException Scala Examples

The following examples show how to use org.apache.zookeeper.KeeperException.NoNodeException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: MesosClusterPersistenceEngine.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 2
Source File: ZkUtils.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.utils

import java.nio.charset.StandardCharsets

import org.apache.curator.framework.CuratorFramework
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.{NoNodeException, NodeExistsException}
import org.apache.zookeeper.data.Stat


  def replicaAssignmentZkData(map: Map[String, Seq[Int]]): String = {
    toJson(Map("version" -> 1, "partitions" -> map))
  }

  def readData(curator: CuratorFramework, path: String): (String, Stat) = {
    val stat: Stat = new Stat()
    val dataStr: String = curator.getData.storingStatIn(stat).forPath(path)
    (dataStr, stat)
  }
  
  def readDataMaybeNull(curator: CuratorFramework, path: String): (Option[String], Stat) = {
    val stat: Stat = new Stat()
    try {
      val dataStr: String = curator.getData.storingStatIn(stat).forPath(path)
      (Option(dataStr), stat)
    } catch {
      case e: NoNodeException => {
        (None, stat)
      }
      case e2: Throwable => throw e2
    }
  }


  def getPartitionReassignmentZkData(partitionsToBeReassigned: Map[TopicPartition, Seq[Int]]): String = {
    toJson(Map("version" -> 1, "partitions" -> partitionsToBeReassigned.map(e => Map("topic" -> e._1.topic, "partition" -> e._1.partition,
      "replicas" -> e._2))))
  }
} 
Example 3
Source File: ZooKeeperManager.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.server.recovery

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.api.UnhandledErrorListener
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.retry.RetryNTimes
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.livy.LivyConf
import org.apache.livy.Logging
import org.apache.livy.utils.LivyUncaughtException

class ZooKeeperManager(
    livyConf: LivyConf,
    mockCuratorClient: Option[CuratorFramework] = None)
  extends JsonMapper with Logging {

  def this(livyConf: LivyConf) {
    this(livyConf, None)
  }

  private val zkAddress = Option(livyConf.get(LivyConf.ZOOKEEPER_URL)).
    orElse(Option(livyConf.get(LivyConf.RECOVERY_STATE_STORE_URL))).
    map(_.trim).orNull

  require(zkAddress != null && !zkAddress.isEmpty,
    s"Please config ${LivyConf.ZOOKEEPER_URL.key}.")

  private val retryValue = Option(livyConf.get(LivyConf.ZK_RETRY_POLICY)).
    orElse(Option(livyConf.get(LivyConf.RECOVERY_ZK_STATE_STORE_RETRY_POLICY))).
    map(_.trim).orNull

  require(retryValue != null && !retryValue.isEmpty,
    s"Please config ${LivyConf.ZK_RETRY_POLICY.key}.")

  // a regex to match patterns like "m, n" where m and n both are integer values
  private val retryPattern = """\s*(\d+)\s*,\s*(\d+)\s*""".r
  private[recovery] val retryPolicy = retryValue match {
    case retryPattern(n, sleepMs) => new RetryNTimes(n.toInt, sleepMs.toInt)
    case _ => throw new IllegalArgumentException(
      s"contains bad value: $retryValue. " +
        "Correct format is <max retry count>,<sleep ms between retry>. e.g. 5,100")
  }

  private val curatorClient = mockCuratorClient.getOrElse {
    CuratorFrameworkFactory.newClient(zkAddress, retryPolicy)
  }

  curatorClient.getUnhandledErrorListenable().addListener(new UnhandledErrorListener {
    def unhandledError(message: String, e: Throwable): Unit = {
      error(s"Fatal Zookeeper error: ${message}.", e)
      throw new LivyUncaughtException(e.getMessage)
    }
  })

  def start(): Unit = {
    curatorClient.start()
  }

  def stop(): Unit = {
    curatorClient.close()
  }

  // TODO Make sure ZK path has proper secure permissions so that other users cannot read its
  // contents.
  def set(key: String, value: Object): Unit = {
    val data = serializeToBytes(value)
    if (curatorClient.checkExists().forPath(key) == null) {
      curatorClient.create().creatingParentsIfNeeded().forPath(key, data)
    } else {
      curatorClient.setData().forPath(key, data)
    }
  }

  def get[T: ClassTag](key: String): Option[T] = {
    if (curatorClient.checkExists().forPath(key) == null) {
      None
    } else {
      Option(deserialize[T](curatorClient.getData().forPath(key)))
    }
  }

  def getChildren(key: String): Seq[String] = {
    if (curatorClient.checkExists().forPath(key) == null) {
      Seq.empty[String]
    } else {
      curatorClient.getChildren.forPath(key).asScala
    }
  }

  def remove(key: String): Unit = {
    try {
      curatorClient.delete().guaranteed().forPath(key)
    } catch {
      case _: NoNodeException => warn(s"Fail to remove non-existed zookeeper node: ${key}")
    }
  }
} 
Example 4
Source File: MesosClusterPersistenceEngine.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 5
Source File: MesosClusterPersistenceEngine.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 6
Source File: MesosClusterPersistenceEngine.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConversions._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
      }
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).map(fetch[T]).flatten
  }
} 
Example 7
Source File: MesosClusterPersistenceEngine.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 8
Source File: MesosClusterPersistenceEngine.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
      }
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 9
Source File: LoadContentOnStartup.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package repository

import akka.actor.{Actor, ActorLogging, Stash}
import mesosphere.marathon.StoreCommandFailedException
import org.apache.zookeeper.KeeperException.NoNodeException

import scala.concurrent.Future
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

trait LoadContentOnStartup[Id, Model] extends Actor with Stash with ActorLogging {
  import LoadContentOnStartup._

  //TODO: change me to zk ec
  import context.dispatcher

  override def preStart(): Unit = {
    super.preStart()
    context.become(waitForInit)
    loadAll()
  }

  def repo: Repository[Id, Model]
  def initialize(specs: List[Model]): Unit

  def waitForInit: Receive = {
    case init: Init[Model] =>
      initialize(init.result)
      context.become(receive)
      unstashAll()
    case _ => stash()
  }

  def loadAll(): Unit = {
    val loadAllFuture = repo.ids().flatMap { ids =>
      Future.sequence(ids.map(id => getModel(id))).map(_.flatten.toList)
    }
    val me = self
    loadAllFuture.onComplete {
      case Success(result) => me ! Init(result)
      case Failure(ex) =>
        log.error(ex, "Can not load initial data. Give up.")
        System.exit(-1)
    }
  }

  private def getModel(id: Id): Future[Option[Model]] = {
    repo.get(id).recoverWith {
      case ex: StoreCommandFailedException =>
        ex.getCause match {
          case cause: NoNodeException =>
            log.error(
              s"ID $id or job-specs znode missing. Zk will need to be manually repaired.  Exception message: ${cause.getMessage}"
            )
            Future.successful(None)
          case NonFatal(cause) =>
            log
              .error(s"Unexpected exception occurred in reading zk at startup.  Exception message: ${cause.getMessage}")
            // We need crash strategy similar to marathon, for now we can NOT continue with such a zk failure.
            System.exit(-1)
            Future.failed(cause)
        }
    }
  }
}

object LoadContentOnStartup {
  case class Init[T](result: List[T])
}