org.apache.curator.framework.CuratorFramework Scala Examples

The following examples show how to use org.apache.curator.framework.CuratorFramework. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ZooKeeperLeaderElectionAgent.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import org.apache.spark.{ Logging, SparkConf }
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{ LeaderLatchListener, LeaderLatch }
import org.apache.spark.deploy.SparkCuratorUtil

private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging {
  //zooKeeper保存恢复状态的目录,缺省为/spark
  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this) //它实现LeanderLatchListener
    leaderLatch.start() //启动Lead的竞争与选举
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      //有可能状态已经再次改变,即Leader再次变化,因此需要再次确认
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      //已经被选举Leader
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      //有可能状态已经再次改变,即Leader再次变化,因此需要再次确认
      if (leaderLatch.hasLeadership) {
        return
      }
      //被剥夺Leader
      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader() //Master已经被选举为Leader,
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership() //Master已经被剥夺Leader
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 2
Source File: ZooKeeperPersistenceEngine.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import akka.serialization.Serialization

import scala.collection.JavaConversions._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.{Logging, SparkConf}


private[spark] class ZooKeeperPersistenceEngine(conf: SparkConf, val serialization: Serialization)
  extends PersistenceEngine
  with Logging
{
  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String) = {
    val file = zk.getChildren.forPath(WORKING_DIR).filter(_.startsWith(prefix))
    file.map(deserializeFromFile[T]).flatten
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serializer = serialization.findSerializerFor(value)
    val serialized = serializer.toBinary(value)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, serialized)
  }

  def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    val clazz = m.runtimeClass.asInstanceOf[Class[T]]
    val serializer = serialization.serializerFor(clazz)
    try {
      Some(serializer.fromBinary(fileData).asInstanceOf[T])
    } catch {
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
      }
    }
  }
} 
Example 3
Source File: Conf.scala    From Adenium   with Apache License 2.0 5 votes vote down vote up
package com.adenium.app.config

import com.adenium.externals.kafka.KfConsumer
import com.adenium.externals.zookeeper.ZkClient
import com.adenium.utils.May.{maybeWarn, maybe}
import org.apache.curator.framework.CuratorFramework


  def ctrlWatcher( conf: Conf)
  : Option[(CuratorFramework, String)]
  = ZkClient(conf.zkstr).curator.map ( _ -> conf.ctrl_zknode )

  /// don't change
  ////////////////////////////////////////////////////////////////////
  val zk_nodebyte = 524255    // 512 KByte
  val zk_sepLine ="\n"
  val zk_sepItem ="\t"
  val zk_sepLiveDetectItem = "\t:\t"
//  val zk_sepLiveDetectItem = ","
} 
Example 4
Source File: ZkWatcher.scala    From Adenium   with Apache License 2.0 5 votes vote down vote up
package com.adenium.externals.zookeeper

import java.nio.charset.StandardCharsets

import com.adenium.externals.zookeeper.ZkUtil.setPersistent
import com.adenium.utils.Logger
import com.adenium.utils.May._
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.Watcher.Event.EventType
import org.apache.zookeeper.data.Stat
import org.apache.zookeeper.{KeeperException, WatchedEvent, Watcher}

import scala.language.reflectiveCalls



object ZkWatcher {

  def onZkChange(cur: CuratorFramework, path: String)(handler: (String, Stat) => Unit) {

    Logger.logInfo("[  watchNodeOrChidlrenChange ] == zknode : " + path)

    def watcher = new Watcher {
      def process(event: WatchedEvent) {
        Logger.logDebug("[ watchNodeOrChidlrenChange ] == callback invoked " + path + "\ttype: " + event.getType)
        event.getType match {
          case EventType.NodeDataChanged | EventType.NodeChildrenChanged => updated()
          case _ => reset()
        }
      }
    }

    def updated() {
      try {
        val stat = new Stat()
        val msg = cur.getData.storingStatIn(stat).forPath(path)

        setPersistent(cur, path, "")

        val str = new String(msg, StandardCharsets.UTF_8)

        if (str.nonEmpty) {
          state("[ Watching ] == arrived msg: " + new String(msg, StandardCharsets.UTF_8))
          handler(str, stat)
        }

        if (str.startsWith("stop zkctrl")) {
          Logger.logWarning("[ Watching ] == stopped by 'stop zkctrl' message : path =" + path)
        } else {
          /// create and attach next msg watcher
          cur.checkExists.usingWatcher(watcher).forPath(path)
        }

      } catch {
        case e: KeeperException =>
          Logger.logWarning("[ watchNodeOrChidlrenChange ] == read node: " + path + "\te: " + e)
          reset()
      }
    }

    def reset() {
      setPersistent(cur, path, "")
      updated()
    }

    reset()
  }

} 
Example 5
Source File: DLock.scala    From etl-light   with MIT License 5 votes vote down vote up
package yamrcraft.etlite.utils

import java.util.concurrent.TimeUnit

import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.slf4j.LoggerFactory


class DLock(zkConnect: String, lockFile: String, waitForLockSeconds: Int) {

  val logger = LoggerFactory.getLogger(this.getClass)

  private var zkClient: Option[CuratorFramework] = None
  private var lock: Option[InterProcessSemaphoreMutex] = None

  def tryLock(): Boolean = {
    require(lock.isEmpty, "lock can't be reused")
    logger.info("acquiring lock...")
    zkClient = Some(CuratorFrameworkFactory.newClient(zkConnect, new ExponentialBackoffRetry(1000, 3)))
    zkClient.get.start()
    lock = Some(new InterProcessSemaphoreMutex(zkClient.get, lockFile))
    lock.get.acquire(waitForLockSeconds, TimeUnit.SECONDS)
  }

  def release() = {
    require(lock.nonEmpty, "lock wasn't acquired")
    logger.info("releasing lock")
    lock.foreach(_.release())
    zkClient.foreach(_.close())
  }

}

class FakeLock extends DLock("", "", 0) {
  override def tryLock() = true

  override def release() = {}
} 
Example 6
Source File: MesosClusterPersistenceEngine.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 7
Source File: SparkCuratorUtil.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import scala.collection.JavaConverters._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: SparkConf,
      zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path).asScala) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 8
Source File: ZooKeeperLeaderElectionAgent.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging

private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 9
Source File: ZooKeeperPersistenceEngine.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import java.nio.ByteBuffer

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.Serializer


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
  extends PersistenceEngine
  with Logging {

  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala
      .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T])
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
    }
  }
} 
Example 10
Source File: SparkCuratorUtil.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import scala.collection.JavaConversions._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.{Logging, SparkConf}

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: SparkConf,
      zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path)) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 11
Source File: ZooKeeperLeaderElectionAgent.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import akka.actor.ActorRef

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.master.MasterMessages._
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatchListener, LeaderLatch}
import org.apache.spark.deploy.SparkCuratorUtil

private[master] class ZooKeeperLeaderElectionAgent(val masterActor: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterActor.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterActor.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 12
Source File: ZooKeeperPersistenceEngine.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import akka.serialization.Serialization

import scala.collection.JavaConversions._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serialization: Serialization)
  extends PersistenceEngine
  with Logging {

  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    val file = zk.getChildren.forPath(WORKING_DIR).filter(_.startsWith(prefix))
    file.map(deserializeFromFile[T]).flatten
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serializer = serialization.findSerializerFor(value)
    val serialized = serializer.toBinary(value)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, serialized)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    val clazz = m.runtimeClass.asInstanceOf[Class[T]]
    val serializer = serialization.serializerFor(clazz)
    try {
      Some(serializer.fromBinary(fileData).asInstanceOf[T])
    } catch {
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
      }
    }
  }
} 
Example 13
Source File: MesosClusterPersistenceEngine.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConversions._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
      }
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).map(fetch[T]).flatten
  }
} 
Example 14
Source File: ZookeeperFunSuite.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.ha

import com.google.common.io.Files
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.curator.test.TestingServer
import org.apache.spark.{KyuubiConf, KyuubiSparkUtil, SparkConf, SparkFunSuite}
import org.apache.spark.KyuubiConf._

trait ZookeeperFunSuite extends SparkFunSuite{

  var zkServer: TestingServer = _
  var connectString: String = _
  val conf = new SparkConf(loadDefaults = true)
  KyuubiSparkUtil.setupCommonConfig(conf)
  conf.set(KyuubiConf.FRONTEND_BIND_PORT.key, "0")

  var zooKeeperClient: CuratorFramework = _

  override def beforeAll(): Unit = {
    zkServer = new TestingServer(2181, Files.createTempDir(), true)
    connectString = zkServer.getConnectString
    conf.set(HA_ZOOKEEPER_QUORUM.key, connectString)
    conf.set(HA_ZOOKEEPER_CONNECTION_BASESLEEPTIME.key, "100ms")
    conf.set(HA_ZOOKEEPER_SESSION_TIMEOUT.key, "15s")
    conf.set(HA_ZOOKEEPER_CONNECTION_MAX_RETRIES.key, "0")
    zooKeeperClient = CuratorFrameworkFactory.builder().connectString(connectString)
        .retryPolicy(new ExponentialBackoffRetry(1000, 3))
        .build()
    zooKeeperClient.start()
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    Option(zooKeeperClient).foreach(_.close())
    Option(zkServer).foreach(_.stop())
    System.clearProperty(HA_ZOOKEEPER_QUORUM.key)
    System.clearProperty(HA_ENABLED.key)
    super.afterAll()
  }
} 
Example 15
Source File: SparkCuratorUtil.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.spark.{Logging, SparkConf}
import org.apache.zookeeper.KeeperException

import scala.collection.JavaConversions._

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
                 conf: SparkConf,
                 //zookeeper集群URL
                 zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
        // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  
  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path)) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 16
Source File: ZooKeeperLeaderElectionAgent.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import akka.actor.ActorRef

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.master.MasterMessages._
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatchListener, LeaderLatch}

private[spark] class ZooKeeperLeaderElectionAgent(val masterActor: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterActor.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterActor.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 17
Source File: ZooKeeperPersistenceEngine.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import java.nio.ByteBuffer

import scala.collection.JavaConversions._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.serializer.Serializer


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
  extends PersistenceEngine
  with Logging {
  //zooKeeper保存恢复状态的目录,缺省为/spark
  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    val file = zk.getChildren.forPath(WORKING_DIR).filter(_.startsWith(prefix))
    file.map(deserializeFromFile[T]).flatten
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
      }
    }
  }
} 
Example 18
Source File: MesosClusterPersistenceEngine.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 19
Source File: SparkCuratorUtil.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import scala.collection.JavaConverters._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: SparkConf,
      zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path).asScala) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 20
Source File: ZooKeeperLeaderElectionAgent.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging

private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 21
Source File: ZooKeeperPersistenceEngine.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import java.nio.ByteBuffer

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.Serializer


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
  extends PersistenceEngine
  with Logging {

  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala
      .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T])
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
    }
  }
} 
Example 22
Source File: SparkCuratorUtil.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import scala.collection.JavaConverters._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.{Logging, SparkConf}

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: SparkConf,
      zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path).asScala) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 23
Source File: ZooKeeperLeaderElectionAgent.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import org.apache.spark.{Logging, SparkConf}
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatchListener, LeaderLatch}
import org.apache.spark.deploy.SparkCuratorUtil

private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 24
Source File: ZooKeeperPersistenceEngine.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import java.nio.ByteBuffer

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.serializer.Serializer


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
  extends PersistenceEngine
  with Logging {

  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala
      .filter(_.startsWith(prefix)).map(deserializeFromFile[T]).flatten
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
      }
    }
  }
} 
Example 25
Source File: MesosClusterPersistenceEngine.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception => {
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
      }
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 26
Source File: package.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs

import java.net.{URLDecoder, URLEncoder}
import java.nio.ByteBuffer
import java.nio.charset.Charset

import akka.actor.{Address, AddressFromURIString}
import akka.util.ByteString
import com.typesafe.scalalogging.Logger
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NodeExistsException

import scala.language.implicitConversions
import scala.util.Try
import scala.util.control.NonFatal
import scala.collection.JavaConverters._

package object cluster {

  trait SegmentationLogic {
    val segmentsSize:Int
    def segmentation(partitionKey:ByteString): String = s"segment-${Math.abs(partitionKey.hashCode()) % segmentsSize}"
    def partitionZkPath(partitionKey:ByteString): String = s"/segments/${segmentation(partitionKey)}/${keyToPath(partitionKey)}"
    def sizeOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/$$size"
    def servantsOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/servants"
  }

  case class DefaultSegmentationLogic(segmentsSize:Int) extends SegmentationLogic

  def guarantee(path:String, data:Option[Array[Byte]], mode:CreateMode = CreateMode.EPHEMERAL)
               (implicit zkClient:CuratorFramework, logger:Logger):String = {
    try{
      data match {
        case None => zkClient.create.withMode(mode).forPath(path)
        case Some(bytes) => zkClient.create.withMode(mode).forPath(path, bytes)
      }
    }
    catch{
      case e: NodeExistsException =>
        if(data.nonEmpty && data.get.length > 0){
          zkClient.setData().forPath(path, data.get)
        }
        path
      case NonFatal(e) =>
        logger.info("leader znode creation failed due to %s\n", e)
        path
    }
  }

  def safelyDiscard(path:String, recursive: Boolean = true)(implicit zkClient: CuratorFramework): String = Try {
    if(recursive) zkClient.getChildren.forPath(path).asScala.foreach(child => safelyDiscard(s"$path/$child", recursive))
    zkClient.delete.forPath(path)
    path
  } getOrElse path

  def keyToPath(name:String):String = URLEncoder.encode(name, "utf-8")

  def pathToKey(name:String):String = URLDecoder.decode(name, "utf-8")

  private[cluster] val BYTES_OF_INT = Integer.SIZE / java.lang.Byte.SIZE

  implicit def intToBytes(integer:Int):Array[Byte] = {
    val buf = ByteBuffer.allocate(BYTES_OF_INT)
    buf.putInt(integer)
    buf.rewind
    buf.array()
  }

  val UTF_8 = Charset.forName("utf-8")

  implicit class ByteConversions(val bytes: Array[Byte]) extends AnyVal {

    def toAddress: Option[Address] =
      Option(bytes) flatMap (b => if (b.length <= 0) None else Some(AddressFromURIString(new String(b, UTF_8))))

    def toInt: Int = ByteBuffer.wrap(bytes).getInt

    def toUtf8: String = new String(bytes, UTF_8)

    def toByteString: ByteString = ByteString(bytes)

    def toAddressSet: Set[Address] = Try {
      new String(bytes, UTF_8).split("[,]").map(seg => AddressFromURIString(seg.trim)).toSet
    } getOrElse Set.empty
  }


  implicit def byteStringToUtf8(bs:ByteString):String = new String(bs.toArray, UTF_8)

  implicit def addressToBytes(address:Address):Array[Byte] = {
    address.toString.getBytes(UTF_8)
  }

  implicit def addressSetToBytes(members: Set[Address]): Array[Byte] = {
    members.mkString(",").getBytes(UTF_8)
  }
} 
Example 27
Source File: Clients.scala    From cosmos   with Apache License 2.0 5 votes vote down vote up
package com.mesosphere.cosmos.zookeeper

import com.mesosphere.cosmos.model.ZooKeeperUri
import java.nio.charset.StandardCharsets
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.framework.api.ACLProvider
import org.apache.curator.retry.ExponentialBackoffRetry
import org.slf4j.Logger
import scala.annotation.tailrec
import scala.collection.JavaConverters._

object Clients {
  val logger: Logger = org.slf4j.LoggerFactory.getLogger(getClass)

  val retries = 3
  val baseSleepTimeMs = 1000

  def createAndInitialize(zkUri: ZooKeeperUri): CuratorFramework = {
    createAndInitialize(
      zkUri = zkUri,
      zkCredentials = sys.env.get("ZOOKEEPER_USER").zip(sys.env.get("ZOOKEEPER_SECRET")).headOption
    )
  }

  def createAndInitialize(
    zkUri: ZooKeeperUri,
    zkCredentials: Option[(String, String)]
  ): CuratorFramework = {
    val zkClientBuilder = CuratorFrameworkFactory
      .builder()
      .connectString(zkUri.connectString)
      .retryPolicy(new ExponentialBackoffRetry(baseSleepTimeMs, retries))

    val authInfo = zkCredentials.map {
      case (user, secret) =>
        (
          s"$user:$secret".getBytes(StandardCharsets.UTF_8),
          CosmosAclProvider(user, secret)
        )
    }

    authInfo.foreach {
      case (authBytes, aclProvider) =>
        logger.info("Enabling authorization and ACL provider for ZooKeeper client")
        zkClientBuilder
          .authorization("digest", authBytes)
          .aclProvider(aclProvider)
    }

    val zkClient = zkClientBuilder.build

    // Start the client
    zkClient.start()

    authInfo.foreach {
      case (_, aclProvider) =>
        updateAcls(zkUri, zkClient, aclProvider)
    }

    zkClient
  }

  private[this] def updateAcls(
    zkUri: ZooKeeperUri,
    zkClient: CuratorFramework,
    aclProvider: ACLProvider
  ): Unit = {
    updateAcls(
      zkClient,
      aclProvider,
      zkClient.getChildren.forPath(zkUri.path).asScala.toList.map(zkUri.path + "/" + _)
    )
  }

  @tailrec
  private[this] def updateAcls(
    zkClient: CuratorFramework,
    aclProvider: ACLProvider,
    paths: List[String]
  ): Unit = {
    paths match {
      case path :: restOfPaths =>
        logger.info("Updating ZNode ACL during initialization: {}", path)
        zkClient
          .setACL()
          .withACL(aclProvider.getAclForPath(path))
          .forPath(path)

        val childrenPaths = zkClient.getChildren.forPath(
          path
        ).asScala.toList.map { child =>
          path + "/" + child
        }

        updateAcls(zkClient, aclProvider, childrenPaths ++ restOfPaths)
      case Nil =>
        // No paths left. Nothing to do.
    }
  }
} 
Example 28
Source File: CuratorUtil.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.scheduler.master.zookeeper

import scala.collection.JavaConverters._

import me.jrwang.aloha.common.{AlohaConf, Logging}
import me.jrwang.aloha.scheduler._
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

object CuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: AlohaConf,
      zkUrlConf: String = ZOOKEEPER_URL.key): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
        // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path).asScala) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 29
Source File: ZooKeeperLeaderElectionAgent.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.scheduler.master.zookeeper

import me.jrwang.aloha.common.{AlohaConf, Logging}
import me.jrwang.aloha.scheduler._
import me.jrwang.aloha.scheduler.master.{LeaderElectable, LeaderElectionAgent}
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}


private[master] class ZooKeeperLeaderElectionAgent(
    val masterInstance: LeaderElectable,
    conf: AlohaConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  private val workingDir = conf.get(ZOOKEEPER_DIRECTORY).getOrElse("/aloha") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = CuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, workingDir)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 30
Source File: ZooKeeperPersistenceEngine.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.scheduler.master.zookeeper

import java.nio.ByteBuffer

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import me.jrwang.aloha.common.{AlohaConf, Logging}
import me.jrwang.aloha.scheduler._
import me.jrwang.aloha.scheduler.master.PersistenceEngine
import me.jrwang.aloha.rpc.serializer.Serializer
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

private[master] class ZooKeeperPersistenceEngine(
    conf: AlohaConf, val serializer: Serializer
  ) extends PersistenceEngine with Logging {

  private val workingDir = conf.get(ZOOKEEPER_DIRECTORY).getOrElse("/aloha") + "/master_status"
  private val zk: CuratorFramework = CuratorUtil.newClient(conf)

  CuratorUtil.mkdir(zk, workingDir)

  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(workingDir + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(workingDir + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    zk.getChildren.forPath(workingDir).asScala
      .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T](_))
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData.forPath(workingDir + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(workingDir + "/" + filename)
        None
    }
  }
} 
Example 31
Source File: StreamingContextService.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.service

import java.io.File

import com.stratio.sparta.driver.SpartaWorkflow
import com.stratio.sparta.driver.factory.SparkContextFactory._
import com.stratio.sparta.driver.utils.LocalListenerUtils
import com.stratio.sparta.sdk.pipeline.output.Output
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.helpers.PolicyHelper
import com.stratio.sparta.serving.core.models.policy.PolicyModel
import com.stratio.sparta.serving.core.utils.{CheckpointUtils, SchedulerUtils}
import com.typesafe.config.Config
import org.apache.curator.framework.CuratorFramework
import org.apache.spark.streaming.StreamingContext

import scala.util.Try

case class StreamingContextService(curatorFramework: CuratorFramework, generalConfig: Option[Config] = None)
  extends SchedulerUtils with CheckpointUtils with LocalListenerUtils {

  def localStreamingContext(policy: PolicyModel, files: Seq[File]): (SpartaWorkflow, StreamingContext) = {
    killLocalContextListener(policy, policy.name)

    if (autoDeleteCheckpointPath(policy)) deleteCheckpointPath(policy)

    createLocalCheckpointPath(policy)

    val outputsSparkConfig =
      PolicyHelper.getSparkConfigs(policy.outputs, Output.SparkConfigurationMethod, Output.ClassSuffix)
    val policySparkConfig = PolicyHelper.getSparkConfigFromPolicy(policy)
    val propsConfig = Try(PolicyHelper.getSparkConfFromProps(generalConfig.get.getConfig(ConfigLocal)))
      .getOrElse(Map.empty[String, String])

    sparkStandAloneContextInstance(propsConfig ++ policySparkConfig ++ outputsSparkConfig, files)

    val spartaWorkflow = SpartaWorkflow(policy, curatorFramework)
    val ssc = spartaWorkflow.streamingStages()

    setSparkContext(ssc.sparkContext)
    setSparkStreamingContext(ssc)
    setInitialSentences(policy.initSqlSentences.map(modelSentence => modelSentence.sentence))

    (spartaWorkflow, ssc)
  }

  def clusterStreamingContext(policy: PolicyModel, files: Seq[String]): (SpartaWorkflow, StreamingContext) = {
    if (autoDeleteCheckpointPath(policy)) deleteCheckpointPath(policy)
    val spartaWorkflow = SpartaWorkflow(policy, curatorFramework)

    val ssc = StreamingContext.getOrCreate(checkpointPath(policy), () => {
      log.info(s"Nothing in checkpoint path: ${checkpointPath(policy)}")
      val outputsSparkConfig =
        PolicyHelper.getSparkConfigs(policy.outputs, Output.SparkConfigurationMethod, Output.ClassSuffix)
      sparkClusterContextInstance(outputsSparkConfig, files)
      spartaWorkflow.streamingStages()
    })

    setSparkContext(ssc.sparkContext)
    setSparkStreamingContext(ssc)
    setInitialSentences(policy.initSqlSentences.map(modelSentence => modelSentence.sentence))

    (spartaWorkflow, ssc)
  }
} 
Example 32
Source File: SparkCuratorUtil.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import scala.collection.JavaConverters._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: SparkConf,
      zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path).asScala) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 33
Source File: ZooKeeperLeaderElectionAgent.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging

private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 34
Source File: ZooKeeperPersistenceEngine.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import java.nio.ByteBuffer

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.Serializer


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
  extends PersistenceEngine
  with Logging {

  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala
      .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T])
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
    }
  }
} 
Example 35
Source File: CuratorAwareActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.base

import akka.actor.ActorLogging
import kafka.manager.model.CuratorConfig
import org.apache.curator.RetrySleeper
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.BoundedExponentialBackoffRetry

import scala.util.Try

class LoggingRetryPolicy(curatorConfig: CuratorConfig, actorLogging: ActorLogging
                        ) extends BoundedExponentialBackoffRetry(curatorConfig.baseSleepTimeMs
  , curatorConfig.maxSleepTimeMs, curatorConfig.zkMaxRetry) {
  override def allowRetry(retryCount: Int, elapsedTimeMs: Long, sleeper: RetrySleeper): Boolean = {
    actorLogging.log.info(s"retryCount=$retryCount maxRetries=${curatorConfig.zkMaxRetry} zkConnect=${curatorConfig.zkConnect}")
    super.allowRetry(retryCount, elapsedTimeMs, sleeper)
  }
}

trait CuratorAwareActor extends BaseActor {
  
  protected def curatorConfig: CuratorConfig

  protected[this] val curator : CuratorFramework = getCurator(curatorConfig)
  log.info("Starting curator...")
  curator.start()

  protected def getCurator(config: CuratorConfig) : CuratorFramework = {
    val curator: CuratorFramework = CuratorFrameworkFactory.newClient(
      config.zkConnect,
      new LoggingRetryPolicy(config, this))
    curator
  }

  @scala.throws[Exception](classOf[Exception])
  override def postStop(): Unit = {
    log.info("Shutting down curator...")
    Try(curator.close())
    super.postStop()
  }
}

trait BaseZkPath {
  this : CuratorAwareActor =>

  protected def baseZkPath : String

  protected def zkPath(path: String): String = {
    require(path.nonEmpty, "path must be nonempty")
    "%s/%s" format(baseZkPath, path)
  }

  protected def zkPathFrom(parent: String,child: String): String = {
    require(parent.nonEmpty, "parent path must be nonempty")
    require(child.nonEmpty, "child path must be nonempty")
    "%s/%s" format(parent, child)
  }
} 
Example 36
Source File: LogkafkaAdminUtils.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.utils

import java.util.Properties

import grizzled.slf4j.Logging
import kafka.manager.model.{Kafka_0_8_2_0, KafkaVersion, ActorModel}
import org.apache.curator.framework.CuratorFramework

import scala.collection.mutable
import scala.util.Random

class LogkafkaAdminUtils(version: KafkaVersion) extends Logging {

  val rand = new Random

  def isDeleteSupported : Boolean = {
    version match {
      case Kafka_0_8_2_0 => true
      case _ => false
    }
  }

  def deleteLogkafka(curator: CuratorFramework, 
                   logkafka_id: String, 
                   log_path: String, 
                   logkafkaConfigOption: Option[ActorModel.LogkafkaConfig]): Unit = {
    logkafkaConfigOption.map { lcg =>
      lcg.config.map { c => 
        val configMap =kafka.manager.utils.Logkafka.parseJsonStr(logkafka_id, c)
        if (!configMap.isEmpty || !(configMap - log_path).isEmpty ) { 
          writeLogkafkaConfig(curator, logkafka_id, configMap - log_path, -1)
        }
      } getOrElse { LogkafkaErrors.LogkafkaIdNotExists(logkafka_id) }
    } getOrElse { LogkafkaErrors.LogkafkaIdNotExists(logkafka_id) }
  }

  def createLogkafka(curator: CuratorFramework,
                  logkafka_id: String,
                  log_path: String,
                  config: Properties = new Properties,
                  logkafkaConfigOption: Option[ActorModel.LogkafkaConfig] = None
                  ): Unit = {
    createOrUpdateLogkafkaConfigPathInZK(curator, logkafka_id, log_path, config, logkafkaConfigOption)
  }

  def createOrUpdateLogkafkaConfigPathInZK(curator: CuratorFramework,
                                           logkafka_id: String,
                                           log_path: String,
                                           config: Properties = new Properties,
                                           logkafkaConfigOption: Option[ActorModel.LogkafkaConfig],
                                           update: Boolean = false,
                                           readVersion: Int = -1,
                                           checkConfig: Boolean = true 
                                           ) {
    // validate arguments
    Logkafka.validateLogkafkaId(logkafka_id)
    Logkafka.validatePath(log_path)

    if (checkConfig) {
      LogkafkaNewConfigs.validate(version, config)
    }

    val configMap: mutable.Map[String, String] = {
      import scala.collection.JavaConverters._
      config.asScala
    }
    val newConfigMap = Map(log_path -> Map(configMap.toSeq:_*))

    val logkafkaConfigMap = logkafkaConfigOption.map { lcg =>
      lcg.config.map { c =>
        kafka.manager.utils.Logkafka.parseJsonStr(logkafka_id, c)
      } getOrElse { Map.empty }
    } getOrElse { Map.empty }

    if(!update ) {
      // write out the config on create, not update, if there is any
      writeLogkafkaConfig(curator, logkafka_id, logkafkaConfigMap ++ newConfigMap, readVersion)
    } else {
      val merged = logkafkaConfigMap.toSeq ++ newConfigMap.toSeq
      val grouped = merged.groupBy(_._1)
      val cleaned = grouped.mapValues(_.map(_._2).fold(Map.empty)(_ ++ _))
      writeLogkafkaConfig(curator, logkafka_id, cleaned, readVersion)
    }
  }

  
  private def writeLogkafkaConfig(curator: CuratorFramework, logkafka_id: String, configMap: Map[String, Map[String, String]], readVersion: Int = -1) {
    ZkUtils.updatePersistentPath(curator, LogkafkaZkUtils.getLogkafkaConfigPath(logkafka_id), toJson(configMap), readVersion)
  }
} 
Example 37
Source File: ZkUtils.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.utils

import java.nio.charset.StandardCharsets

import org.apache.curator.framework.CuratorFramework
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.{NoNodeException, NodeExistsException}
import org.apache.zookeeper.data.Stat


  def replicaAssignmentZkData(map: Map[String, Seq[Int]]): String = {
    toJson(Map("version" -> 1, "partitions" -> map))
  }

  def readData(curator: CuratorFramework, path: String): (String, Stat) = {
    val stat: Stat = new Stat()
    val dataStr: String = curator.getData.storingStatIn(stat).forPath(path)
    (dataStr, stat)
  }
  
  def readDataMaybeNull(curator: CuratorFramework, path: String): (Option[String], Stat) = {
    val stat: Stat = new Stat()
    try {
      val dataStr: String = curator.getData.storingStatIn(stat).forPath(path)
      (Option(dataStr), stat)
    } catch {
      case e: NoNodeException => {
        (None, stat)
      }
      case e2: Throwable => throw e2
    }
  }


  def getPartitionReassignmentZkData(partitionsToBeReassigned: Map[TopicPartition, Seq[Int]]): String = {
    toJson(Map("version" -> 1, "partitions" -> partitionsToBeReassigned.map(e => Map("topic" -> e._1.topic, "partition" -> e._1.partition,
      "replicas" -> e._2))))
  }
} 
Example 38
Source File: PreferredReplicaLeaderElectionCommand.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.utils.zero81

import grizzled.slf4j.Logging
import kafka.manager.utils._
import org.apache.curator.framework.CuratorFramework
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.KeeperException.NodeExistsException
import org.json4s.JsonAST._


object PreferredReplicaLeaderElectionCommand extends Logging {

  def parsePreferredReplicaElectionData(jsonString: String): Set[TopicPartition] = {
    parseJson(jsonString).findField(_._1 == "partitions") match {
      case Some((_, arr)) =>
        val result: List[TopicPartition] = for {
          JArray(elements) <- arr
          JObject(children) <- elements
          JField("topic", JString(t)) <- children
          JField("partition", JInt(p)) <- children
        } yield new TopicPartition(t, p.toInt)
        checkCondition(result.nonEmpty, PreferredLeaderElectionErrors.ElectionSetEmptyOnRead(jsonString))
        result.toSet
      case None =>
        throwError(PreferredLeaderElectionErrors.ElectionSetEmptyOnRead(jsonString))
    }
  }


  def writePreferredReplicaElectionData(curator: CuratorFramework,
                                        partitionsUndergoingPreferredReplicaElection: Set[TopicPartition]) {
    checkCondition(partitionsUndergoingPreferredReplicaElection.nonEmpty,PreferredLeaderElectionErrors.ElectionSetEmptyOnWrite)
    val zkPath = ZkUtils.PreferredReplicaLeaderElectionPath
    val partitionsList : Set[Map[String,Any]] =
      partitionsUndergoingPreferredReplicaElection.map(e => Map[String,Any]("topic" -> e.topic, "partition" -> e.partition))
    val jsonData = toJson(Map("version" -> 1, "partitions" -> partitionsList))
    try {
      ZkUtils.createPersistentPath(curator, zkPath, jsonData)
      logger.info("Created preferred replica election path with %s".format(jsonData))
    } catch {
      case nee: NodeExistsException =>
        val partitionsUndergoingPreferredReplicaElection =
          PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(ZkUtils.readData(curator, zkPath)._1)
        throwError(PreferredLeaderElectionErrors.ElectionAlreadyInProgress(partitionsUndergoingPreferredReplicaElection))
      case e2: Throwable =>
        throwError(PreferredLeaderElectionErrors.UnhandledException)
    }
  }
}

object PreferredLeaderElectionErrors {
  class ElectionSetEmptyOnWrite private[PreferredLeaderElectionErrors] extends UtilError("Preferred replica election data is empty")
  class ElectionSetEmptyOnRead private[PreferredLeaderElectionErrors] (json: String) extends UtilError(s"Preferred replica election data is empty on read : $json")
  class ElectionAlreadyInProgress private[PreferredLeaderElectionErrors] (partitionsUndergoingPreferredReplicaElection: Set[TopicPartition]) extends UtilError(
    "Preferred replica leader election currently in progress for " +
    "%s. Aborting operation".format(partitionsUndergoingPreferredReplicaElection))
  class UnhandledException private[PreferredLeaderElectionErrors] extends UtilError("Unhandled exception")

  def ElectionSetEmptyOnRead(json: String) = new ElectionSetEmptyOnRead(json)
  val ElectionSetEmptyOnWrite = new ElectionSetEmptyOnWrite
  def ElectionAlreadyInProgress(set: Set[TopicPartition]) = new ElectionAlreadyInProgress(set)
  val UnhandledException = new UnhandledException
} 
Example 39
Source File: LogkafkaCommandActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.logkafka

import kafka.manager.model.{ClusterContext, ActorModel}
import ActorModel._
import kafka.manager.base.{BaseCommandActor, LongRunningPoolActor, LongRunningPoolConfig}
import kafka.manager.utils.LogkafkaAdminUtils
import org.apache.curator.framework.CuratorFramework

import scala.concurrent.Future
import scala.util.Try



case class LogkafkaCommandActorConfig(curator: CuratorFramework, 
                                   longRunningPoolConfig: LongRunningPoolConfig,
                                   askTimeoutMillis: Long = 400, 
                                   clusterContext: ClusterContext)
class LogkafkaCommandActor(logkafkaCommandActorConfig: LogkafkaCommandActorConfig) extends BaseCommandActor with LongRunningPoolActor {

  //private[this] val askTimeout: Timeout = logkafkaCommandActorConfig.askTimeoutMillis.milliseconds

  private[this] val logkafkaAdminUtils = new LogkafkaAdminUtils(logkafkaCommandActorConfig.clusterContext.config.version)

  @scala.throws[Exception](classOf[Exception])
  override def preStart() = {
    log.info("Started actor %s".format(self.path))
  }

  @scala.throws[Exception](classOf[Exception])
  override def preRestart(reason: Throwable, message: Option[Any]) {
    log.error(reason, "Restarting due to [{}] when processing [{}]",
      reason.getMessage, message.getOrElse(""))
    super.preRestart(reason, message)
  }

  @scala.throws[Exception](classOf[Exception])
  override def postStop(): Unit = {
    super.postStop()
  }

  override protected def longRunningPoolConfig: LongRunningPoolConfig = logkafkaCommandActorConfig.longRunningPoolConfig

  override protected def longRunningQueueFull(): Unit = {
    sender ! LKCCommandResult(Try(throw new UnsupportedOperationException("Long running executor blocking queue is full!")))
  }

  override def processActorResponse(response: ActorResponse): Unit = {
    response match {
      case any: Any => log.warning("lkca : processActorResponse : Received unknown message: {}", any)
    }
  }

  override def processCommandRequest(request: CommandRequest): Unit = {
    implicit val ec = longRunningExecutionContext
    request match {
      case LKCDeleteLogkafka(logkafka_id, log_path, logkafkaConfig) =>
        longRunning {
          Future {
            LKCCommandResult(Try {
              logkafkaAdminUtils.deleteLogkafka(logkafkaCommandActorConfig.curator, logkafka_id, log_path, logkafkaConfig)
            })
          }
        }
      case LKCCreateLogkafka(logkafka_id, log_path, config, logkafkaConfig) =>
        longRunning {
          Future {
            LKCCommandResult(Try {
              logkafkaAdminUtils.createLogkafka(logkafkaCommandActorConfig.curator, logkafka_id, log_path, config, logkafkaConfig)
            })
          }
        }
      case LKCUpdateLogkafkaConfig(logkafka_id, log_path, config, logkafkaConfig, checkConfig) =>
        longRunning {
          Future {
            LKCCommandResult(Try {
              logkafkaAdminUtils.changeLogkafkaConfig(logkafkaCommandActorConfig.curator, logkafka_id, log_path, config, logkafkaConfig, checkConfig)
            })
          }
        }
      case any: Any => log.warning("lkca : processCommandRequest : Received unknown message: {}", any)
    }
  }
} 
Example 40
Source File: KafkaServerInTest.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.utils

import kafka.manager.model.CuratorConfig
import org.apache.curator.framework.{CuratorFrameworkFactory, CuratorFramework}
import org.apache.curator.retry.BoundedExponentialBackoffRetry
import org.scalatest.{BeforeAndAfterAll, FunSuite}


trait KafkaServerInTest extends FunSuite with BeforeAndAfterAll {
  val kafkaServerZkPath : String

  lazy val sharedCurator: CuratorFramework = {
    val config = CuratorConfig(kafkaServerZkPath)
    val curator: CuratorFramework = CuratorFrameworkFactory.newClient(
      config.zkConnect,
      new BoundedExponentialBackoffRetry(config.baseSleepTimeMs, config.maxSleepTimeMs, config.zkMaxRetry))
    curator
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    sharedCurator.start()
  }

  override protected def afterAll(): Unit = {
    sharedCurator.close()
    super.afterAll()
  }
} 
Example 41
Source File: CuratorAwareTest.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.utils

import org.apache.curator.framework.{CuratorFrameworkFactory, CuratorFramework}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.curator.test.TestingServer
import org.scalatest.{BeforeAndAfterAll, FunSuite}

import scala.reflect.ClassTag


trait CuratorAwareTest extends FunSuite with BeforeAndAfterAll with ZookeeperServerAwareTest {

  private[this] var curator: Option[CuratorFramework] = None

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val retryPolicy = new ExponentialBackoffRetry(1000, 3)
    val curatorFramework = CuratorFrameworkFactory.newClient(testServer.getConnectString, retryPolicy)
    curatorFramework.start
    curator = Some(curatorFramework)
  }

  override protected def afterAll(): Unit = {
    curator.foreach(_.close())
    super.afterAll()
  }

  protected def withCurator(fn: CuratorFramework => Unit): Unit = {
    curator.foreach(fn)
  }

  protected def produceWithCurator[T](fn: CuratorFramework => T) : T = {
    require(curator.isDefined,"Cannot produce with no curator defined!")
    fn(curator.get)
  }

  protected def checkError[T](fn: => Any)(implicit tag: ClassTag[T]): Unit = {
    try {
      fn
      throw new RuntimeException(s"expected ${tag.runtimeClass} , but no exceptions were thrown!")
    } catch {
      case UtilException(caught) =>
        if(!tag.runtimeClass.isAssignableFrom(caught.getClass)) {
          throw new RuntimeException(s"expected ${tag.runtimeClass} , found ${caught.getClass}, value=$caught")
        }
      case throwable: Throwable =>
        throw new RuntimeException(s"expected ${tag.runtimeClass} , found ${throwable.getClass}", throwable)
    }
  }

} 
Example 42
Source File: KafkaTestBroker.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.test

import java.io.File
import java.util.Properties

import com.google.common.io.Files
import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.test.InstanceSpec

import scala.util.Try


class KafkaTestBroker(zookeeper: CuratorFramework, zookeeperConnectionString: String) {
  val AdminPath = "/admin"
  val BrokersPath = "/brokers"
  val ClusterPath = "/cluster"
  val ConfigPath = "/config"
  val ControllerPath = "/controller"
  val ControllerEpochPath = "/controller_epoch"
  val IsrChangeNotificationPath = "/isr_change_notification"
  val LogDirEventNotificationPath = "/log_dir_event_notification"
  val KafkaAclPath = "/kafka-acl"
  val KafkaAclChangesPath = "/kafka-acl-changes"

  val ConsumersPath = "/consumers"
  val ClusterIdPath = s"$ClusterPath/id"
  val BrokerIdsPath = s"$BrokersPath/ids"
  val BrokerTopicsPath = s"$BrokersPath/topics"
  val ReassignPartitionsPath = s"$AdminPath/reassign_partitions"
  val DeleteTopicsPath = s"$AdminPath/delete_topics"
  val PreferredReplicaLeaderElectionPath = s"$AdminPath/preferred_replica_election"
  val BrokerSequenceIdPath = s"$BrokersPath/seqid"
  val ConfigChangesPath = s"$ConfigPath/changes"
  val ConfigUsersPath = s"$ConfigPath/users"
  val ConfigBrokersPath = s"$ConfigPath/brokers"
  val ProducerIdBlockPath = "/latest_producer_id_block"

  private[this] val port: Int = InstanceSpec.getRandomPort
  private[this] val config: KafkaConfig = buildKafkaConfig(zookeeperConnectionString)
  private[this] val kafkaServerStartable: KafkaServerStartable = new KafkaServerStartable(config)
  kafkaServerStartable.startup()

  //wait until broker shows up in zookeeper
  var count = 0
  while(count < 10 && zookeeper.checkExists().forPath(BrokerIdsPath + "/0") == null) {
    count += 1
    println("Waiting for broker ...")
    println(Option(zookeeper.getData.forPath(BrokerIdsPath + "/0")).map(kafka.manager.asString))
    Thread.sleep(1000)
  }

  private def buildKafkaConfig(zookeeperConnectionString: String): KafkaConfig = {
    val p: Properties = new Properties
    p.setProperty("zookeeper.connect", zookeeperConnectionString)
    p.setProperty("broker.id", "0")
    p.setProperty("port", "" + port)
    p.setProperty("log.dirs", getLogDir)
    p.setProperty("log.retention.hours", "1")
    p.setProperty("offsets.topic.replication.factor", "1")
    p.setProperty("delete.topic.enable", "true")
    new KafkaConfig(p)
  }

  private def getLogDir: String = {
    val logDir: File = Files.createTempDir
    logDir.deleteOnExit()
    logDir.getAbsolutePath
  }

  def getBrokerConnectionString: String = s"localhost:$port"

  def getPort: Int = port

  def shutdown() {
    Try(kafkaServerStartable.shutdown())
  }
} 
Example 43
Source File: RequestUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.utils

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.curator.CuratorFactoryHolder
import com.stratio.sparta.serving.core.exception.ServingCoreException
import com.stratio.sparta.serving.core.models.submit.SubmitRequest
import com.stratio.sparta.serving.core.models.{ErrorModel, SpartaSerializer}
import org.apache.curator.framework.CuratorFramework
import org.json4s.jackson.Serialization._

import scala.collection.JavaConversions
import scala.util.Try

trait RequestUtils extends SpartaSerializer with SLF4JLogging {

  val curatorFramework: CuratorFramework

  def createRequest(request: SubmitRequest): Try[SubmitRequest] = {
    val requestPath = s"${AppConstant.ExecutionsPath}/${request.id}"
    if (CuratorFactoryHolder.existsPath(requestPath)) {
      updateRequest(request)
    } else {
      Try {
        log.info(s"Creating execution with id ${request.id}")
        curatorFramework.create.creatingParentsIfNeeded.forPath(requestPath, write(request).getBytes)
        request
      }
    }
  }

  def updateRequest(request: SubmitRequest): Try[SubmitRequest] = {
    Try {
      val requestPath = s"${AppConstant.ExecutionsPath}/${request.id}"
      if (CuratorFactoryHolder.existsPath(requestPath)) {
        curatorFramework.setData().forPath(requestPath, write(request).getBytes)
        request
      } else createRequest(request).getOrElse(throw new ServingCoreException(
        ErrorModel.toString(new ErrorModel(ErrorModel.CodeNotExistsPolicyWithId,
          s"Is not possible to create execution with id ${request.id}."))))
    }
  }

  def findAllRequests(): Try[Seq[SubmitRequest]] =
    Try {
      val requestPath = s"${AppConstant.ExecutionsPath}"
      if (CuratorFactoryHolder.existsPath(requestPath)) {
        val children = curatorFramework.getChildren.forPath(requestPath)
        val policiesRequest = JavaConversions.asScalaBuffer(children).toList.map(element =>
          read[SubmitRequest](new String(curatorFramework.getData.forPath(s"${AppConstant.ExecutionsPath}/$element")))
        )
        policiesRequest
      } else Seq.empty[SubmitRequest]
    }

  def findRequestById(id: String): Try[SubmitRequest] =
    Try {
      val requestPath = s"${AppConstant.ExecutionsPath}/$id"
      if (CuratorFactoryHolder.existsPath(requestPath))
        read[SubmitRequest](new String(curatorFramework.getData.forPath(requestPath)))
      else throw new ServingCoreException(
        ErrorModel.toString(new ErrorModel(ErrorModel.CodeNotExistsPolicyWithId, s"No execution context with id $id")))
    }

  def deleteAllRequests(): Try[_] =
    Try {
      val requestPath = s"${AppConstant.ExecutionsPath}"
      if (CuratorFactoryHolder.existsPath(requestPath)) {
        val children = curatorFramework.getChildren.forPath(requestPath)
        val policiesRequest = JavaConversions.asScalaBuffer(children).toList.map(element =>
          read[SubmitRequest](new String(curatorFramework.getData.forPath(s"${AppConstant.ExecutionsPath}/$element")))
        )

        policiesRequest.foreach(request => deleteRequest(request.id))
      }
    }

  def deleteRequest(id: String): Try[_] =
    Try {
      val requestPath = s"${AppConstant.ExecutionsPath}/$id"
      if (CuratorFactoryHolder.existsPath(requestPath)) {
        log.info(s"Deleting execution with id $id")
        curatorFramework.delete().forPath(requestPath)
      } else throw new ServingCoreException(ErrorModel.toString(
        new ErrorModel(ErrorModel.CodeNotExistsPolicyWithId, s"No execution with id $id")))
    }
} 
Example 44
Source File: ClusterCheckerService.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.services

import akka.actor.{ActorContext, ActorRef}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success}

class ClusterCheckerService(val curatorFramework: CuratorFramework) extends PolicyStatusUtils {

  def checkPolicyStatus(policy: PolicyModel, launcherActor: ActorRef, akkaContext: ActorContext): Unit = {
    findStatusById(policy.id.get) match {
      case Success(policyStatus) =>
        if (policyStatus.status == Launched || policyStatus.status == Starting || policyStatus.status == Uploaded ||
          policyStatus.status == Stopping || policyStatus.status == NotStarted) {
          val information = s"The checker detects that the policy not start/stop correctly"
          log.error(information)
          updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Some(information)))
          akkaContext.stop(launcherActor)
        } else {
          val information = s"The checker detects that the policy start/stop correctly"
          log.info(information)
          updateStatus(PolicyStatusModel(id = policy.id.get, status = NotDefined, statusInfo = Some(information)))
        }
      case Failure(exception) =>
        log.error(s"Error when extract policy status in scheduler task.", exception)
    }
  }
} 
Example 45
Source File: SpartaWorkflow.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver

import com.stratio.sparta.driver.factory.SparkContextFactory._
import com.stratio.sparta.driver.schema.SchemaHelper
import com.stratio.sparta.driver.stage._
import com.stratio.sparta.sdk.pipeline.input.Input
import com.stratio.sparta.sdk.utils.AggregationTime
import com.stratio.sparta.serving.core.helpers.PolicyHelper
import com.stratio.sparta.serving.core.models.policy._
import com.stratio.sparta.serving.core.utils.CheckpointUtils
import org.apache.curator.framework.CuratorFramework
import org.apache.spark.streaming.{Duration, StreamingContext}

class SpartaWorkflow(val policy: PolicyModel, val curatorFramework: CuratorFramework) extends CheckpointUtils
  with InputStage with OutputStage with ParserStage with CubeStage with RawDataStage with TriggerStage
  with ZooKeeperError {

  clearError()

  private val ReflectionUtils = PolicyHelper.ReflectionUtils
  private val outputs = outputStage(ReflectionUtils)
  private var input: Option[Input] = None

  def setup(): Unit = {
    input.foreach(input => input.setUp())
    outputs.foreach(output => output.setUp())
  }

  def cleanUp(): Unit = {
    input.foreach(input => input.cleanUp())
    outputs.foreach(output => output.cleanUp())
  }

  def streamingStages(): StreamingContext = {
    clearError()

    val checkpointPolicyPath = checkpointPath(policy)
    val window = AggregationTime.parseValueToMilliSeconds(policy.sparkStreamingWindow)
    val ssc = sparkStreamingInstance(Duration(window), checkpointPolicyPath, policy.remember)
    if(input.isEmpty)
      input = Option(createInput(ssc.get, ReflectionUtils))
    val inputDStream = inputStreamStage(ssc.get, input.get)

    saveRawData(policy.rawData, inputDStream, outputs)

    policy.transformations.foreach { transformationsModel =>
      val parserSchemas = SchemaHelper.getSchemasFromTransformations(
        transformationsModel.transformationsPipe, Input.InitSchema)
      val (parsers, writerOptions) = parserStage(ReflectionUtils, parserSchemas)
      val parsedData = ParserStage.applyParsers(
        inputDStream, parsers, parserSchemas.values.last, outputs, writerOptions)

      triggersStreamStage(parserSchemas.values.last, parsedData, outputs, window)
      cubesStreamStage(ReflectionUtils, parserSchemas.values.last, parsedData, outputs)
    }

    ssc.get
  }
}

object SpartaWorkflow {

  def apply(policy: PolicyModel, curatorFramework: CuratorFramework): SpartaWorkflow =
    new SpartaWorkflow(policy, curatorFramework)
} 
Example 46
Source File: MesosClusterPersistenceEngine.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 47
Source File: BaseStage.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.driver.stage

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.driver.utils.StageUtils
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum.NotDefined
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success, Try}


trait ErrorPersistor {
  def persistError(error: PolicyErrorModel): Unit
}

trait ZooKeeperError extends ErrorPersistor with PolicyStatusUtils {

  val curatorFramework: CuratorFramework

  def policy: PolicyModel

  def persistError(error: PolicyErrorModel): Unit =
    updateStatus(PolicyStatusModel(policy.id.get, NotDefined, None, None, lastError = Some(error)))

  def clearError(): Unit =
    clearLastError(policy.id.get)
}

trait LogError extends ErrorPersistor with SLF4JLogging {
  def persistError(error: PolicyErrorModel): Unit = log.error(s"This error was not saved to ZK : $error")
}

trait BaseStage extends SLF4JLogging with StageUtils {
  this: ErrorPersistor =>
  def policy: PolicyModel

  def generalTransformation[T](code: PhaseEnum.Value, okMessage: String, errorMessage: String)
                              (f: => T): T = {
    Try(f) match {
      case Success(result) =>
        log.info(okMessage)
        result
      case Failure(ex) => throw logAndCreateEx(code, ex, policy, errorMessage)
    }
  }

  def logAndCreateEx(code: PhaseEnum.Value,
                      ex: Throwable,
                      policy: PolicyModel,
                      message: String
                    ): IllegalArgumentException = {
    val originalMsg = ex.getCause match {
      case _: ClassNotFoundException => "The component couldn't be found in classpath. Please check the type."
      case exception: Throwable => exception.toString
      case _ => ex.toString
    }
    val policyError = PolicyErrorModel(message, code, originalMsg)
    log.error("An error was detected : {}", policyError)
    Try {
      persistError(policyError)
    } recover {
      case e => log.error(s"Error while persisting error: $policyError", e)
    }
    new IllegalArgumentException(message, ex)
  }

} 
Example 48
Source File: LauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.SupervisorStrategy.Escalate
import akka.actor._
import com.stratio.sparta.driver.service.StreamingContextService
import com.stratio.sparta.serving.api.utils.LauncherActorUtils
import com.stratio.sparta.serving.core.actor.LauncherActor.Launch
import com.stratio.sparta.serving.core.exception.ServingCoreException
import com.stratio.sparta.serving.core.models.policy.PolicyModel
import com.stratio.sparta.serving.core.utils.PolicyUtils
import org.apache.curator.framework.CuratorFramework

import scala.util.Try

class LauncherActor(val streamingContextService: StreamingContextService, val curatorFramework: CuratorFramework)
  extends Actor with LauncherActorUtils with PolicyUtils {

  override val supervisorStrategy: OneForOneStrategy =
    OneForOneStrategy() {
      case _: ServingCoreException => Escalate
      case t =>
        super.supervisorStrategy.decider.applyOrElse(t, (_: Any) => Escalate)
    }

  override def receive: Receive = {
    case Launch(policy) => sender ! create(policy)
    case _ => log.info("Unrecognized message in Launcher Actor")
  }

  def create(policy: PolicyModel): Try[PolicyModel] =
    Try {
      if (policy.id.isEmpty) createPolicy(policy)
      launch(policy, context)
    }
} 
Example 49
Source File: MarathonLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, Cancellable, PoisonPill}
import com.stratio.sparta.serving.core.marathon.MarathonService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.models.submit.SubmitRequest
import com.stratio.sparta.serving.core.services.ClusterCheckerService
import com.stratio.sparta.serving.core.utils._
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success, Try}

class MarathonLauncherActor(val curatorFramework: CuratorFramework) extends Actor
  with LauncherUtils with SchedulerUtils with SparkSubmitUtils with ClusterListenerUtils with ArgumentsUtils
  with PolicyStatusUtils with RequestUtils {

  private val clusterCheckerService = new ClusterCheckerService(curatorFramework)
  private val checkersPolicyStatus = scala.collection.mutable.ArrayBuffer.empty[Cancellable]

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => initializeSubmitRequest(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Marathon Launcher Actor")
  }

  override def postStop(): Unit = checkersPolicyStatus.foreach(_.cancel())

  def initializeSubmitRequest(policy: PolicyModel): Unit = {
    Try {
      log.info(s"Initializing options for submit Marathon application associated to policy: ${policy.name}")
      val zookeeperConfig = getZookeeperConfig
      val clusterConfig = SpartaConfig.getClusterConfig(Option(ConfigMesos)).get
      val master = clusterConfig.getString(Master).trim
      val driverFile = extractMarathonDriverSubmit(policy, DetailConfig, SpartaConfig.getHdfsConfig)
      val pluginsFiles = pluginsJars(policy)
      val driverArguments =
        extractDriverArguments(policy, driverFile, clusterConfig, zookeeperConfig, ConfigMesos, pluginsFiles)
      val (sparkSubmitArguments, sparkConfigurations) =
        extractSubmitArgumentsAndSparkConf(policy, clusterConfig, pluginsFiles)
      val submitRequest = SubmitRequest(policy.id.get, SpartaDriverClass, driverFile, master, sparkSubmitArguments,
        sparkConfigurations, driverArguments, ConfigMesos, killUrl(clusterConfig))
      val detailExecMode = getDetailExecutionMode(policy, clusterConfig)

      createRequest(submitRequest).getOrElse(throw new Exception("Impossible to create submit request in persistence"))

      (new MarathonService(context, curatorFramework, policy, submitRequest), detailExecMode)
    } match {
      case Failure(exception) =>
        val information = s"Error when initializing Sparta Marathon App options"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        self ! PoisonPill
      case Success((marathonApp, detailExecMode)) =>
        val information = "Sparta Marathon App configurations initialized correctly"
        log.info(information)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = NotStarted,
          statusInfo = Option(information), lastExecutionMode = Option(detailExecMode)))
        marathonApp.launch(detailExecMode)
        addMarathonContextListener(policy.id.get, policy.name, context, Option(self))
        checkersPolicyStatus += scheduleOneTask(AwaitPolicyChangeStatus, DefaultAwaitPolicyChangeStatus)(
          clusterCheckerService.checkPolicyStatus(policy, self, context))
    }
  }
} 
Example 50
Source File: LocalLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, PoisonPill}
import com.stratio.sparta.driver.factory.SparkContextFactory
import com.stratio.sparta.driver.service.StreamingContextService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.helpers.{JarsHelper, PolicyHelper, ResourceManagerLinkHelper}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.{LauncherUtils, PolicyConfigUtils, PolicyStatusUtils}
import org.apache.curator.framework.CuratorFramework
import org.apache.spark.streaming.StreamingContext

import scala.util.{Failure, Success, Try}

class LocalLauncherActor(streamingContextService: StreamingContextService, val curatorFramework: CuratorFramework)
  extends Actor with PolicyConfigUtils with LauncherUtils with PolicyStatusUtils{

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => doInitSpartaContext(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Local Launcher Actor")
  }

  private def doInitSpartaContext(policy: PolicyModel): Unit = {
    val jars = PolicyHelper.jarsFromPolicy(policy)

    jars.foreach(file => JarsHelper.addToClasspath(file))
    Try {
      val startingInfo = s"Starting Sparta local job for policy"
      log.info(startingInfo)
      updateStatus(PolicyStatusModel(
        id = policy.id.get,
        status = PolicyStatusEnum.NotStarted,
        statusInfo = Some(startingInfo),
        lastExecutionMode = Option(AppConstant.LocalValue)
      ))
      val (spartaWorkflow, ssc) = streamingContextService.localStreamingContext(policy, jars)
      spartaWorkflow.setup()
      ssc.start()
      val startedInformation = s"The Sparta local job was started correctly"
      log.info(startedInformation)
      updateStatus(PolicyStatusModel(
        id = policy.id.get,
        status = PolicyStatusEnum.Started,
        statusInfo = Some(startedInformation),
        resourceManagerUrl = ResourceManagerLinkHelper.getLink(executionMode(policy), policy.monitoringLink)
      ))
      ssc.awaitTermination()
      spartaWorkflow.cleanUp()
    } match {
      case Success(_) =>
        val information = s"Stopped correctly Sparta local job"
        log.info(information)
        updateStatus(PolicyStatusModel(
          id = policy.id.get, status = PolicyStatusEnum.Stopped, statusInfo = Some(information)))
        self ! PoisonPill
      case Failure(exception) =>
        val information = s"Error initiating Sparta local job"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(
          id = policy.id.get,
          status = PolicyStatusEnum.Failed,
          statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        SparkContextFactory.destroySparkContext()
        self ! PoisonPill
    }
  }
} 
Example 51
Source File: AppStatusHttpService.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.service.http

import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.exception.ServingCoreException
import com.stratio.sparta.serving.core.models.ErrorModel
import com.stratio.sparta.serving.core.models.dto.LoggedUser
import com.wordnik.swagger.annotations._
import org.apache.curator.framework.CuratorFramework
import spray.routing._

@Api(value = HttpConstant.AppStatus, description = "Operations about sparta status.")
trait AppStatusHttpService extends BaseHttpService {

  override def routes(user: Option[LoggedUser] = None): Route = checkStatus

  val curatorInstance : CuratorFramework

  @ApiOperation(value = "Check Sparta status depends to Zookeeper connexion",
    notes = "Returns Sparta status",
    httpMethod = "GET",
    response = classOf[String],
    responseContainer = "List")
  @ApiResponses(
    Array(new ApiResponse(code = HttpConstant.NotFound,
      message = HttpConstant.NotFoundMessage)))
  def checkStatus: Route = {
    path(HttpConstant.AppStatus) {
      get {
        complete {
          if (!curatorInstance.getZookeeperClient.getZooKeeper.getState.isConnected)
            throw new ServingCoreException(ErrorModel.toString(
              new ErrorModel(ErrorModel.CodeUnknown, s"Zk isn't connected at" +
                s" ${curatorInstance.getZookeeperClient.getCurrentConnectionString}.")
            ))
          else "OK"
        }
      }
    }
  }
} 
Example 52
Source File: ControllerActorTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.stratio.sparta.driver.service.StreamingContextService
import com.stratio.sparta.serving.core.actor.{RequestActor, FragmentActor, StatusActor}
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AkkaConstant
import org.apache.curator.framework.CuratorFramework
import org.junit.runner.RunWith
import org.scalamock.scalatest.MockFactory
import org.scalatest._
import org.scalatest.junit.JUnitRunner

@RunWith(classOf[JUnitRunner])
class ControllerActorTest(_system: ActorSystem) extends TestKit(_system)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with MockFactory {

  SpartaConfig.initMainConfig()
  SpartaConfig.initApiConfig()

  val curatorFramework = mock[CuratorFramework]
  val statusActor = _system.actorOf(Props(new StatusActor(curatorFramework)))
  val executionActor = _system.actorOf(Props(new RequestActor(curatorFramework)))
  val streamingContextService = new StreamingContextService(curatorFramework)
  val fragmentActor = _system.actorOf(Props(new FragmentActor(curatorFramework)))
  val policyActor = _system.actorOf(Props(new PolicyActor(curatorFramework, statusActor)))
  val sparkStreamingContextActor = _system.actorOf(
    Props(new LauncherActor(streamingContextService, curatorFramework)))
  val pluginActor = _system.actorOf(Props(new PluginActor()))
  val configActor = _system.actorOf(Props(new ConfigActor()))

  def this() =
    this(ActorSystem("ControllerActorSpec", SpartaConfig.daemonicAkkaConfig))

  implicit val actors = Map(
    AkkaConstant.StatusActorName -> statusActor,
    AkkaConstant.FragmentActorName -> fragmentActor,
    AkkaConstant.PolicyActorName -> policyActor,
    AkkaConstant.LauncherActorName -> sparkStreamingContextActor,
    AkkaConstant.PluginActorName -> pluginActor,
    AkkaConstant.ExecutionActorName -> executionActor,
    AkkaConstant.ConfigActorName -> configActor
  )

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  "ControllerActor" should {
    "set up the controller actor that contains all sparta's routes without any error" in {
      _system.actorOf(Props(new ControllerActor(actors, curatorFramework)))
    }
  }
} 
Example 53
Source File: AppStatusHttpServiceTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.service.http

import akka.actor.ActorRef
import com.stratio.sparta.serving.api.constants.HttpConstant
import org.apache.curator.framework.CuratorFramework
import org.junit.runner.RunWith
import org.scalamock.scalatest.MockFactory
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import spray.http.StatusCodes

@RunWith(classOf[JUnitRunner])
class AppStatusHttpServiceTest extends WordSpec
                              with AppStatusHttpService
                              with HttpServiceBaseTest
with MockFactory {

  override implicit val actors: Map[String, ActorRef] = Map()
  override val supervisor: ActorRef = testProbe.ref
  override val curatorInstance = mock[CuratorFramework]

  "AppStatusHttpService" should {
    "check the status of the server" in {
      Get(s"/${HttpConstant.AppStatus}") ~> routes() ~> check {
        status should be (StatusCodes.InternalServerError)
      }
    }
  }
} 
Example 54
Source File: ZooKeeperManager.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.server.recovery

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.api.UnhandledErrorListener
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.CuratorFrameworkFactory
import org.apache.curator.retry.RetryNTimes
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.livy.LivyConf
import org.apache.livy.Logging
import org.apache.livy.utils.LivyUncaughtException

class ZooKeeperManager(
    livyConf: LivyConf,
    mockCuratorClient: Option[CuratorFramework] = None)
  extends JsonMapper with Logging {

  def this(livyConf: LivyConf) {
    this(livyConf, None)
  }

  private val zkAddress = Option(livyConf.get(LivyConf.ZOOKEEPER_URL)).
    orElse(Option(livyConf.get(LivyConf.RECOVERY_STATE_STORE_URL))).
    map(_.trim).orNull

  require(zkAddress != null && !zkAddress.isEmpty,
    s"Please config ${LivyConf.ZOOKEEPER_URL.key}.")

  private val retryValue = Option(livyConf.get(LivyConf.ZK_RETRY_POLICY)).
    orElse(Option(livyConf.get(LivyConf.RECOVERY_ZK_STATE_STORE_RETRY_POLICY))).
    map(_.trim).orNull

  require(retryValue != null && !retryValue.isEmpty,
    s"Please config ${LivyConf.ZK_RETRY_POLICY.key}.")

  // a regex to match patterns like "m, n" where m and n both are integer values
  private val retryPattern = """\s*(\d+)\s*,\s*(\d+)\s*""".r
  private[recovery] val retryPolicy = retryValue match {
    case retryPattern(n, sleepMs) => new RetryNTimes(n.toInt, sleepMs.toInt)
    case _ => throw new IllegalArgumentException(
      s"contains bad value: $retryValue. " +
        "Correct format is <max retry count>,<sleep ms between retry>. e.g. 5,100")
  }

  private val curatorClient = mockCuratorClient.getOrElse {
    CuratorFrameworkFactory.newClient(zkAddress, retryPolicy)
  }

  curatorClient.getUnhandledErrorListenable().addListener(new UnhandledErrorListener {
    def unhandledError(message: String, e: Throwable): Unit = {
      error(s"Fatal Zookeeper error: ${message}.", e)
      throw new LivyUncaughtException(e.getMessage)
    }
  })

  def start(): Unit = {
    curatorClient.start()
  }

  def stop(): Unit = {
    curatorClient.close()
  }

  // TODO Make sure ZK path has proper secure permissions so that other users cannot read its
  // contents.
  def set(key: String, value: Object): Unit = {
    val data = serializeToBytes(value)
    if (curatorClient.checkExists().forPath(key) == null) {
      curatorClient.create().creatingParentsIfNeeded().forPath(key, data)
    } else {
      curatorClient.setData().forPath(key, data)
    }
  }

  def get[T: ClassTag](key: String): Option[T] = {
    if (curatorClient.checkExists().forPath(key) == null) {
      None
    } else {
      Option(deserialize[T](curatorClient.getData().forPath(key)))
    }
  }

  def getChildren(key: String): Seq[String] = {
    if (curatorClient.checkExists().forPath(key) == null) {
      Seq.empty[String]
    } else {
      curatorClient.getChildren.forPath(key).asScala
    }
  }

  def remove(key: String): Unit = {
    try {
      curatorClient.delete().guaranteed().forPath(key)
    } catch {
      case _: NoNodeException => warn(s"Fail to remove non-existed zookeeper node: ${key}")
    }
  }
} 
Example 55
Source File: MesosClusterPersistenceEngine.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster.mesos

import scala.collection.JavaConverters._

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NoNodeException

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] class ZookeeperMesosClusterPersistenceEngine(
    baseDir: String,
    zk: CuratorFramework,
    conf: SparkConf)
  extends MesosClusterPersistenceEngine with Logging {
  private val WORKING_DIR =
    conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)

  def path(name: String): String = {
    WORKING_DIR + "/" + name
  }

  override def expunge(name: String): Unit = {
    zk.delete().forPath(path(name))
  }

  override def persist(name: String, obj: Object): Unit = {
    val serialized = Utils.serialize(obj)
    val zkPath = path(name)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized)
  }

  override def fetch[T](name: String): Option[T] = {
    val zkPath = path(name)

    try {
      val fileData = zk.getData().forPath(zkPath)
      Some(Utils.deserialize[T](fileData))
    } catch {
      case e: NoNodeException => None
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(zkPath)
        None
    }
  }

  override def fetchAll[T](): Iterable[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T])
  }
} 
Example 56
Source File: SparkCuratorUtil.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import scala.collection.JavaConverters._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging

private[spark] object SparkCuratorUtil extends Logging {

  private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  private val ZK_SESSION_TIMEOUT_MILLIS = 60000
  private val RETRY_WAIT_MILLIS = 5000
  private val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(
      conf: SparkConf,
      zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = {
    val ZK_URL = conf.get(zkUrlConf)
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path).asScala) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
} 
Example 57
Source File: ZooKeeperLeaderElectionAgent.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging

private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
    conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging  {

  val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"

  private var zk: CuratorFramework = _
  private var leaderLatch: LeaderLatch = _
  private var status = LeadershipStatus.NOT_LEADER

  start()

  private def start() {
    logInfo("Starting ZooKeeper LeaderElection agent")
    zk = SparkCuratorUtil.newClient(conf)
    leaderLatch = new LeaderLatch(zk, WORKING_DIR)
    leaderLatch.addListener(this)
    leaderLatch.start()
  }

  override def stop() {
    leaderLatch.close()
    zk.close()
  }

  override def isLeader() {
    synchronized {
      // could have lost leadership by now.
      if (!leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have gained leadership")
      updateLeadershipStatus(true)
    }
  }

  override def notLeader() {
    synchronized {
      // could have gained leadership by now.
      if (leaderLatch.hasLeadership) {
        return
      }

      logInfo("We have lost leadership")
      updateLeadershipStatus(false)
    }
  }

  private def updateLeadershipStatus(isLeader: Boolean) {
    if (isLeader && status == LeadershipStatus.NOT_LEADER) {
      status = LeadershipStatus.LEADER
      masterInstance.electedLeader()
    } else if (!isLeader && status == LeadershipStatus.LEADER) {
      status = LeadershipStatus.NOT_LEADER
      masterInstance.revokedLeadership()
    }
  }

  private object LeadershipStatus extends Enumeration {
    type LeadershipStatus = Value
    val LEADER, NOT_LEADER = Value
  }
} 
Example 58
Source File: ZooKeeperPersistenceEngine.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import java.nio.ByteBuffer

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode

import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkCuratorUtil
import org.apache.spark.internal.Logging
import org.apache.spark.serializer.Serializer


private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer)
  extends PersistenceEngine
  with Logging {

  private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status"
  private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf)

  SparkCuratorUtil.mkdir(zk, WORKING_DIR)


  override def persist(name: String, obj: Object): Unit = {
    serializeIntoFile(WORKING_DIR + "/" + name, obj)
  }

  override def unpersist(name: String): Unit = {
    zk.delete().forPath(WORKING_DIR + "/" + name)
  }

  override def read[T: ClassTag](prefix: String): Seq[T] = {
    zk.getChildren.forPath(WORKING_DIR).asScala
      .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T])
  }

  override def close() {
    zk.close()
  }

  private def serializeIntoFile(path: String, value: AnyRef) {
    val serialized = serializer.newInstance().serialize(value)
    val bytes = new Array[Byte](serialized.remaining())
    serialized.get(bytes)
    zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes)
  }

  private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = {
    val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename)
    try {
      Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData)))
    } catch {
      case e: Exception =>
        logWarning("Exception while reading persisted file, deleting", e)
        zk.delete().forPath(WORKING_DIR + "/" + filename)
        None
    }
  }
} 
Example 59
Source File: SparkCuratorUtil.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import scala.collection.JavaConversions._

import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.KeeperException

import org.apache.spark.{Logging, SparkConf}

object SparkCuratorUtil extends Logging {

  val ZK_CONNECTION_TIMEOUT_MILLIS = 15000
  val ZK_SESSION_TIMEOUT_MILLIS = 60000
  val RETRY_WAIT_MILLIS = 5000
  val MAX_RECONNECT_ATTEMPTS = 3

  def newClient(conf: SparkConf): CuratorFramework = {
    val ZK_URL = conf.get("spark.deploy.zookeeper.url")
    val zk = CuratorFrameworkFactory.newClient(ZK_URL,
      ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS,
      new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS))
    zk.start()
    zk
  }

  def mkdir(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) == null) {
      try {
        zk.create().creatingParentsIfNeeded().forPath(path)
      } catch {
        case nodeExist: KeeperException.NodeExistsException =>
          // do nothing, ignore node existing exception.
        case e: Exception => throw e
      }
    }
  }

  def deleteRecursive(zk: CuratorFramework, path: String) {
    if (zk.checkExists().forPath(path) != null) {
      for (child <- zk.getChildren.forPath(path)) {
        zk.delete().forPath(path + "/" + child)
      }
      zk.delete().forPath(path)
    }
  }
}