scala.collection.mutable.ListBuffer Scala Examples

The following examples show how to use scala.collection.mutable.ListBuffer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: KNN.scala    From spark3D   with Apache License 2.0 5 votes vote down vote up
package com.astrolabsoftware.spark3d.spatialOperator

import com.astrolabsoftware.spark3d.geometryObjects.Point3D
import com.astrolabsoftware.spark3d.geometryObjects.Shape3D.Shape3D
import com.astrolabsoftware.spark3d.utils.GeometryObjectComparator
import com.astrolabsoftware.spark3d.utils.Utils.takeOrdered
import com.astrolabsoftware.spark3d.spatialPartitioning._

import org.apache.spark.rdd.RDD

import scala.collection.mutable.{HashSet, ListBuffer}
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import scala.util.control.Breaks._

object KNN {

  
  def KNNStandard[T <: Shape3D: ClassTag](
      rdd: RDD[T], queryObject: T,
      k: Int, unique: Boolean = false): List[T] = {
    val knn = takeOrdered[T](rdd, k, queryObject, unique)(
      new GeometryObjectComparator[T](queryObject.center)
    )
    knn.toList
  }
} 
Example 2
Source File: UniqueList.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.utils.collection.immutable

import scala.collection.generic.{CanBuildFrom, SeqForwarder}
import scala.collection.mutable.{Builder, ListBuffer}
import scala.collection.{SeqLike, mutable}


class UniqueList[T] private (
  list: List[T],
  set: Set[T]
) extends Seq[T] with SeqForwarder[T] with SeqLike[T, UniqueList[T]] {

  override def newBuilder: Builder[T, UniqueList[T]] = UniqueList.canBuildFrom()

  override def underlying: Seq[T] = list

  override def toString(): String = s"UniqueList(${list.mkString(", ")})"
}

object UniqueList {

  def newBuilder[T](): Builder[T, UniqueList[T]] = {
    new Builder[T, UniqueList[T]]() {
      val listBuffer = new ListBuffer[T]()
      val setBuffer = new mutable.HashSet[T]()

      override def +=(elem: T): this.type = {
        if(setBuffer.contains(elem)){
          throw new UnicityViolationException(elem)
        }
        else {
          listBuffer += elem
          setBuffer += elem
        }
        this
      }

      override def clear(): Unit = {
        listBuffer.clear()
        setBuffer.clear()
      }

      override def result(): UniqueList[T] = {
        new UniqueList(listBuffer.toList, setBuffer.toSet)
      }
    }
  }

  implicit def canBuildFrom[In, Out]: CanBuildFrom[UniqueList[In], Out, UniqueList[Out]] = {
    new CanBuildFrom[UniqueList[In], Out, UniqueList[Out]]() {
      override def apply(from: UniqueList[In]): Builder[Out, UniqueList[Out]] = newBuilder()
      override def apply(): Builder[Out, UniqueList[Out]] = newBuilder()
    }
  }

  def apply[T](elems: T*): UniqueList[T] = {
    (newBuilder() ++= elems).result()
  }

  def apply[T](elems: Iterable[T]): UniqueList[T] = {
    (newBuilder() ++= elems).result()
  }

} 
Example 3
Source File: NamedCollection.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.utils.collection.mutable

import com.flaminem.flamy.utils.Named

import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.{Builder, ListBuffer}
import scala.collection.{TraversableLike, mutable}
import scala.language.implicitConversions


class NamedCollection[V <: Named] extends IndexedCollection[String, V] with TraversableLike[V, NamedCollection[V]] {
  override def newBuilder: mutable.Builder[V, NamedCollection[V]] = {
    new ListBuffer[V] mapResult{x => new NamedCollection(x)}
  }

  def this(namedItems: Traversable[V]) = {
    this
    this++=namedItems
  }

  def getIndexOf(value: V): String = value.getName.toLowerCase

}

object NamedCollection {

  def newBuilder[V <: Named](): Builder[V, NamedCollection[V]] = {
    new Builder[V, NamedCollection[V]]() {
      val buffer = new NamedCollection[V]()

      override def +=(elem: V): this.type = {
        buffer += elem
        this
      }

      override def clear(): Unit = {
        buffer.clear()
      }

      override def result(): NamedCollection[V] = {
        buffer
      }
    }
  }

  implicit def canBuildFrom[In <: Named, Out <: Named]: CanBuildFrom[NamedCollection[In], Out, NamedCollection[Out]] = {
    new CanBuildFrom[NamedCollection[In], Out, NamedCollection[Out]]() {
      override def apply(from: NamedCollection[In]): Builder[Out, NamedCollection[Out]] = newBuilder()
      override def apply(): Builder[Out, NamedCollection[Out]] = newBuilder()
    }
  }

} 
Example 4
Source File: TraversableLikeExtension.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.utils.collection

import scala.collection.TraversableLike
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.ListBuffer
import scala.language.higherKinds


  def splitBy[B](fun: A => B)(implicit bf: CanBuildFrom[T[A], A, T[A]]): Iterable[(B, T[A])] = {
    if(t.isEmpty){
      Nil
    }
    else {
      val buffer = new ListBuffer[(B, T[A])]()
      var groupBuilder = bf(t)
      var prevB: Option[B] = None
      t.foreach{
        a =>
          val b = Some(fun(a))
          if(prevB != b){
            if(prevB.isDefined){
              buffer += prevB.get -> groupBuilder.result()
            }
            prevB = b
            groupBuilder = bf(t)
          }
          groupBuilder += a
      }
      buffer += prevB.get -> groupBuilder.result()
      buffer.result()
    }
  }

} 
Example 5
Source File: Partition.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.model.partitions

import com.flaminem.flamy.model.PartitionColumn

import scala.collection.mutable.ListBuffer
import scala.collection.{SeqLike, mutable}


class Partition(override val columns: Seq[PartitionColumn])
extends TPartition
with SeqLike[PartitionColumn, Partition]
{

  override def newBuilderImpl: mutable.Builder[PartitionColumn, Partition] = {
    new ListBuffer[PartitionColumn] mapResult (x => new Partition(x))
  }

  override def newBuilder: mutable.Builder[PartitionColumn, Partition] = {
    newBuilderImpl
  }

  def this(partitionName: String) {
    this(
      partitionName.split("/").map {
        s =>
          val a = s.split("=")
          new PartitionColumn(a(0), Option(a(1)))
      }
    )
  }

} 
Example 6
Source File: ItemFiles.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.model.files

import com.flaminem.flamy.model.names.ItemName
import com.flaminem.flamy.utils.logging.Logging

import scala.collection.generic.Growable
import scala.collection.mutable.ListBuffer
import scala.collection.{TraversableLike, mutable}


  def filterFileTypes(predicate: (ItemName, FileType)=>Boolean): ItemFiles[T] = {
    this.filter{
      case file => predicate(file.getItemName, file.fileType)
    }
  }

  override def toString: String = mm.values.toString

  override def +=(file: T): ItemFiles.this.type = {
    mm.addBinding(file.fileType, file)
    this
  }

  override def clear(): Unit = {
    mm.clear()
  }
} 
Example 7
Source File: TableDependencyCollection.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.parsing.model

import com.flaminem.flamy.model.names.TableName
import com.flaminem.flamy.utils.collection.mutable.IndexedCollection

import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.ListBuffer
import scala.collection.{TraversableLike, mutable}


  def copy(): TableDependencyCollection = {
    val res = new TableDependencyCollection
    res ++= this
    res
  }

  def getTables: Iterable[TableDependency] = {
    super.getAllValues
  }

  override def getIndexOf(value: TableDependency): TableName = {
    value.fullName
  }

}

object TableDependencyCollection {

  implicit val canBuildFrom = new CanBuildFrom[TableDependencyCollection, TableDependency, TableDependencyCollection] {
    override def apply(from: TableDependencyCollection): mutable.Builder[TableDependency, TableDependencyCollection] = {
      apply()
    }
    override def apply(): mutable.Builder[TableDependency, TableDependencyCollection] = {
      new ListBuffer[TableDependency] mapResult{x => new TableDependencyCollection(x)}
    }
  }

  implicit class TableDependencyCollectionConvertible(s: Seq[TableDependency]) {
    def toTableDependencyCollection: TableDependencyCollection = {
      new TableDependencyCollection(s)
    }
  }

} 
Example 8
Source File: crypto.scala    From SpinalCrypto   with MIT License 5 votes vote down vote up
package spinal

import scala.collection.mutable.ListBuffer
import spinal.lib.tools._
import spinal.core._


package object crypto{

  
    def polynomialNumberDecoder(radix: Int, p: String): List[Int] = {

      assert(List(2,16).contains(radix), "The following radix for polynomial is forbidden")

      // remove all _
      var strPoly = p.replace("_", "").toLowerCase

      // convert hexadecimal str into binary string
      var bitCount = -1
      if(radix == 16){
        val split = strPoly.split(''')
        bitCount  = split(0).toInt
        strPoly   = split(1).substring(1)
        strPoly   = BigIntToListBoolean(BigInt(strPoly, 16), bitCount bits).map(b => if(b) "1" else "0").reverse.mkString("")
      }else{
        strPoly = strPoly.substring(1)
      }

      // Convert the binary string into list of coefficient
      val listBuffer = new ListBuffer[Int]()
      for((b,i) <- strPoly.reverse.zipWithIndex){
        if(b == '1') listBuffer.append(i)
      }

      // append for hexadecimal polynomial the higher coefficient
      if(bitCount != -1){
        listBuffer.append(bitCount)
      }

      return listBuffer.toList
    }

    // remove all spaces
    val poly = polyStr.replace(" ", "")

    // detect the format of the string
    val rhex = """[0-9]+\'x[0-9a-fA-F_]+""".r
    val rbin = """b[0-1_]+""".r
    val rstr = """[0-9x\^\+]+""".r

    val polynomial = poly match{
      case rhex() => polynomialNumberDecoder(16, poly)
      case rbin() => polynomialNumberDecoder(2, poly)
      case rstr() => polynomialStrDecoder(poly)
      case _      => throw new Exception("Polynomial format issue")
    }

    return new PolynomialGF2(polynomial.sortWith(_ > _))
  }


} 
Example 9
Source File: Utils.scala    From SpinalCrypto   with MIT License 5 votes vote down vote up
package spinal.crypto

import scala.collection.mutable.ListBuffer



object CastByteArray {
  def apply(input: Array[Byte], castSize: Int): Array[Byte] = {
    if (input.length == castSize) {
      input
    } else if (input.length > castSize) {
      input.takeRight(castSize)
    } else {
      Array.fill[Byte](castSize - input.length)(0x00) ++ input
    }
  }
} 
Example 10
Source File: ActionData.scala    From amaterasu   with Apache License 2.0 5 votes vote down vote up
package org.apache.amaterasu.common.dataobjects

import org.apache.amaterasu.enums.ActionStatus.ActionStatus

import scala.collection.mutable.ListBuffer

case class ActionData(var status: ActionStatus,
                      name: String,
                      src: String,
                      groupId: String,
                      typeId: String,
                      id: String,
                      exports: Map[String, String],
                      nextActionIds: ListBuffer[String]) {
  var errorActionId: String = _
} 
Example 11
Source File: SparkTestsSuite.scala    From amaterasu   with Apache License 2.0 5 votes vote down vote up
package org.apache.amaterasu.spark

import java.io.{ByteArrayOutputStream, File}

import io.shinto.amaterasu.spark.PySparkRunnerTests
import org.apache.amaterasu.RunnersTests.RunnersLoadingTests
import org.apache.amaterasu.common.dataobjects.ExecData
import org.apache.amaterasu.common.execution.dependencies._
import org.apache.amaterasu.common.runtime.Environment
import org.apache.amaterasu.utilities.TestNotifier
import org.apache.amaterasu.executor.mesos.executors.ProvidersFactory
import org.apache.spark.repl.amaterasu.runners.spark.SparkScalaRunner
import org.apache.spark.sql.SparkSession
import org.scalatest._

import scala.collection.mutable.ListBuffer


class SparkTestsSuite extends Suites(
  new PySparkRunnerTests(),
  new RunnersLoadingTests()) with BeforeAndAfterAll {

  var env: Environment = _
  var factory: ProvidersFactory = _
  var spark: SparkSession = _

  override def beforeAll(): Unit = {

    env = Environment()
    env.workingDir = "file:///tmp/"
    env.master = "local[*]"

    // I can't apologise enough for this
    val resources = new File(getClass.getResource("/spark_intp.py").getPath).getParent

    val conf = Map[String, Any](
      "spark.cassandra.connection.host" -> "127.0.0.1",
      "sourceTable" -> "documents",
      "spark.local.ip" -> "127.0.0.1"
    )
    env.master = "local[1]"
    if (env.configuration != null) env.configuration ++ "pysparkPath" -> "/usr/bin/python" else env.configuration = Map(
      "pysparkPath" -> "/usr/bin/python",
      "cwd" -> resources
    )
    val excEnv = Map[String, Any](
      "PYTHONPATH" -> resources
    )
    env.configuration ++ "spark_exec_env" -> excEnv
    factory = ProvidersFactory(ExecData(env, Dependencies(ListBuffer.empty[Repo], List.empty[Artifact]), PythonDependencies(List.empty[PythonPackage]), Map("spark" -> Map.empty[String, Any],"spark_exec_env"->Map("PYTHONPATH"->resources))), "test", new ByteArrayOutputStream(), new TestNotifier(), "test")
    spark = factory.getRunner("spark", "scala").get.asInstanceOf[SparkScalaRunner].spark

    this.nestedSuites.filter(s => s.isInstanceOf[RunnersLoadingTests]).foreach(s => s.asInstanceOf[RunnersLoadingTests].factory = factory)
    this.nestedSuites.filter(s => s.isInstanceOf[PySparkRunnerTests]).foreach(s => s.asInstanceOf[PySparkRunnerTests].factory = factory)


    super.beforeAll()
  }

  override def afterAll(): Unit = {
    spark.stop()

    super.afterAll()
  }

} 
Example 12
Source File: MyDatasetFunc.scala    From Apache-Spark-2x-Machine-Learning-Cookbook   with MIT License 5 votes vote down vote up
package spark.ml.cookbook.chapter3

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{Dataset, SparkSession}
//import spark.ml.cookbook.chapter3.{Car, MyDatasetData}

//import scala.collection.mutable
import scala.collection.mutable.ListBuffer

object MyDatasetFunc {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.ERROR)
    Logger.getLogger("akka").setLevel(Level.ERROR)

    // setup SparkSession to use for interactions with Spark
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("mydatasetfunc")
      .config("spark.sql.warehouse.dir", ".")
      .getOrCreate()

    import spark.implicits._

    val cars = spark.createDataset(MyDatasetData.carData)
    cars.show(false)

    val modelData = cars.groupByKey(_.make).mapGroups({
      case (make, car) => {
        val carModel = new ListBuffer[String]()
        car.map(_.model).foreach({
            c =>  carModel += c
        })
        (make, carModel)
      }
    })

    modelData.show(false)

    spark.stop()
  }
} 
Example 13
Source File: ShellCompleter.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.exec.shell

import java.util

import com.flaminem.flamy.conf.FlamyContext
import com.flaminem.flamy.utils.CliUtils
import com.flaminem.flamy.utils.logging.Logging
import jline.console.completer.Completer

import scala.collection.JavaConversions._
import scala.collection.TraversableLike
import scala.collection.mutable.{Builder, ListBuffer}
import scala.util.control.NonFatal

object ShellCompleter {

  
class ShellCompleter(rootContext: FlamyContext) extends Completer with Logging {

  import ShellCompleter._

  val handler: CandidateListCompletionHandler = new CandidateListCompletionHandler()

  override def complete(buffer: String, cursor: Int, candidates: util.List[CharSequence]): Int = {
    handler.setPrintSpaceAfterFullCompletion(true)
    val args: Seq[String] = CliUtils.split(buffer.substring(0, cursor))
    val lastWord: String = args.lastOption.getOrElse("")
    try {
      val truncatedArgs: Seq[String] = args.dropRight(1)
      val cliArgs = CliArgs(truncatedArgs, lastWord)
      val Candidates(strings, shift) = new OptionCompleter(handler, rootContext).complete(cliArgs)
      candidates.addAll(strings.sorted)
      cursor - cliArgs.lastWord.length + shift
    }
    catch {
      case NonFatal(e) =>
        logger.debug("Exception caught during auto-completion", e)
        cursor - lastWord.length
    }
  }

} 
Example 14
Source File: OctreePartitioning.scala    From spark3D   with Apache License 2.0 5 votes vote down vote up
package com.astrolabsoftware.spark3d.spatialPartitioning

import com.astrolabsoftware.spark3d.geometryObjects.BoxEnvelope
import com.astrolabsoftware.spark3d.geometryObjects.Shape3D.Shape3D

import scala.collection.mutable.ListBuffer

class OctreePartitioning (private val octree: Octree)
  extends Serializable {

  
  def getGrids(): List[BoxEnvelope] = {
    octree.getLeafNodes.toList
  }
}

object OctreePartitioning {

  def apply(data: List[BoxEnvelope], tree: Octree): OctreePartitioning = {
    for (element <- data) {
      tree.insertElement(element)
    }
    tree.assignPartitionIDs
    new OctreePartitioning(tree)
  }
} 
Example 15
Source File: OctreeTest.scala    From spark3D   with Apache License 2.0 5 votes vote down vote up
package com.astrolabsoftware.spark3d.spatialPartitioning

import com.astrolabsoftware.spark3d.geometryObjects._

import org.scalatest.{BeforeAndAfterAll, FunSuite}
import scala.collection.mutable.ListBuffer

class OctreeTest extends FunSuite with BeforeAndAfterAll {

  var tree_space: BoxEnvelope = _
  var valid_tree: Octree = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    tree_space = BoxEnvelope.apply(0.0, 4.0, 0.0, 4.0, 0.0, 4.0)
    valid_tree = new Octree(new BoxEnvelope(tree_space), 0, null,2)
  }

  def containsElement(list: ListBuffer[BoxEnvelope], obj: BoxEnvelope): Boolean = {
    val objStr = obj.toString

    for (element <- list) {
      if (element.toString == objStr) {
        return true
      }
    }
    false
  }

  test ("Can you insert the elements into a Octree and verify its correctness?") {
    val element1 = BoxEnvelope.apply(0.0, 0.9, 0.0, 0.9, 0.0, 0.9)
    val element2 = BoxEnvelope.apply(1.0, 2.9, 1.0, 2.9, 1.0, 2.9)
    valid_tree.insertElement(element1)
    valid_tree.insertElement(element2)

    val spr = new ShellEnvelope(1.0, 1.0, 1.0, false, 0.9)
    var result = valid_tree.getElements(spr)
    assert(valid_tree.isLeaf)
    assert(result.size == 2)
    assert(containsElement(result, element1))
    assert(containsElement(result, element2))
    var leafNodes = valid_tree.getLeafNodes
    assert(leafNodes.size == 1)
    assert(containsElement(leafNodes, tree_space))

    val element3 = BoxEnvelope.apply(1.0, 1.9, 1.0, 1.9, 1.0, 1.9)
    valid_tree.insertElement(element3)
    result = valid_tree.getElements(BoxEnvelope.apply(3.0, 3.9, 3.0, 3.9, 3.0, 3.9))
    assert(result.size == 1)
    assert(containsElement(result, element2))
    result = valid_tree.getElements(element1)
    assert(result.size == 3)
    assert(containsElement(result, element1))
    assert(containsElement(result, element2))
    assert(containsElement(result, element3))

    val element4 = BoxEnvelope.apply(0.0, 0.9, 1.0, 1.9, 0.0, 0.9)
    valid_tree.insertElement(element4)

    result = valid_tree.getElements(element1)
    assert(!valid_tree.isLeaf)
    assert(result.size == 2)
    assert(containsElement(result, element1))
    assert(containsElement(result, element2))
    leafNodes = valid_tree.getLeafNodes
    assert(leafNodes.size == 15)

    valid_tree.assignPartitionIDs
    leafNodes = valid_tree.getLeafNodes
    for (node <- leafNodes) {
      assert(node.indexID <= 14 && node.indexID >= 0)
    }
  }

  test ("Can you force grow the octree to given level?") {
    valid_tree = new Octree(new BoxEnvelope(tree_space), 0, null, 2)
    valid_tree.insertElement(new ShellEnvelope(1.0, 1.0, 1.0, false, 1.0))
    valid_tree.forceGrowTree(2)
    assert(valid_tree.getLeafNodes.size == 64)
  }
} 
Example 16
Source File: OctreePartitionerTest.scala    From spark3D   with Apache License 2.0 5 votes vote down vote up
package com.astrolabsoftware.spark3d.spatialPartitioning

import org.scalatest.{BeforeAndAfterAll, FunSuite}
import com.astrolabsoftware.spark3d.geometryObjects._

import scala.collection.mutable.ListBuffer

class OctreePartitionerTest extends FunSuite with BeforeAndAfterAll {

    test ("Can you correctly place a Point3D inside the Octree space?") {

    var valid_tree = new Octree(BoxEnvelope.apply(0.0, 4.0, 0.0, 4.0, 0.0, 4.0), 0, null, 2)
    val element1 = BoxEnvelope.apply(0.0, 0.9, 0.0, 0.9, 0.0, 0.9)
    val element2 = BoxEnvelope.apply(1.0, 3.0, 1.0, 3.0, 1.0, 3.0)
    val element3 = BoxEnvelope.apply(1.0, 1.9, 1.0, 1.9, 1.0, 1.9)
    val element4 = BoxEnvelope.apply(0.0, 0.9, 1.0, 1.9, 0.0, 0.9)
    val data = new ListBuffer[BoxEnvelope]
    data += element1
    data += element2
    data += element3
    data += element4

    val partitioning = OctreePartitioning(data.toList, valid_tree)
    val partitioner = new OctreePartitioner(partitioning.getPartitionTree, partitioning.getGrids)
    assert(partitioner.numPartitions == 15)
    var spr = new ShellEnvelope(0.5, 0.5, 0.5, false, 0.2)
    var result = partitioner.placeObject(spr)
    assert(result.next._1 == 13)

    // case when object belongs to all partitions
    spr = new ShellEnvelope(2, 2, 2, false, 1.1)
    result = partitioner.placeObject(spr)
    var resultCount = 0
    while (result.hasNext) {
      resultCount += 1
      result.next
    }
    assert(resultCount == 15)
  }
} 
Example 17
Source File: ZeroExperienceCollector.scala    From ScalphaGoZero   with Apache License 2.0 5 votes vote down vote up
package org.deeplearning4j.scalphagozero.experience

import org.nd4j.linalg.api.ndarray.INDArray
import scala.collection.mutable.ListBuffer


class ZeroExperienceCollector extends ExperienceCollector {

  val states: ListBuffer[INDArray] = ListBuffer.empty
  val visitCounts: ListBuffer[INDArray] = ListBuffer.empty
  val rewards: ListBuffer[INDArray] = ListBuffer.empty

  private val currentEpisodeStates: ListBuffer[INDArray] = ListBuffer.empty
  private val currentEpisodeVisitCounts: ListBuffer[INDArray] = ListBuffer.empty

  def clearAllBuffers(): Unit = {
    states.clear()
    visitCounts.clear()
    rewards.clear()
    clearEpisodeBuffers()
  }

  private def clearEpisodeBuffers(): Unit = {
    currentEpisodeStates.clear()
    currentEpisodeVisitCounts.clear()
  }

  override def beginEpisode(): Unit = clearEpisodeBuffers()

  override def recordDecision(state: INDArray, visitCounts: INDArray): Unit = {
    currentEpisodeStates += state
    currentEpisodeVisitCounts += visitCounts
    ()
  }

  override def completeEpisode(reward: INDArray): Unit = {
    states ++= currentEpisodeStates
    visitCounts ++= currentEpisodeVisitCounts
    for (_ <- 1 to currentEpisodeStates.size)
      rewards += reward

    clearEpisodeBuffers()
  }

} 
Example 18
Source File: ElementsFlattenTraversalLogic.scala    From phobos   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.phobos.raw

import ru.tinkoff.phobos.ast.XmlLeaf
import ru.tinkoff.phobos.raw.ElementsFlattenTraversalLogic.Accumulator
import ru.tinkoff.phobos.traverse.DecodingTraversalLogic
import scala.collection.mutable.ListBuffer

class ElementsFlattenTraversalLogic private () extends DecodingTraversalLogic[Accumulator, ElementsFlatten] {
  override def newAcc(): Accumulator = Accumulator()

  override def onFinish(acc: Accumulator): ElementsFlatten =
    ElementsFlatten(acc.elements.toList: _*)

  override def combine(acc: Accumulator, field: String, intermediateResult: ElementsFlatten): Accumulator = {
    acc.elements ++= intermediateResult.elems
    acc
  }

  override def onText(acc: Accumulator, elemName: String, text: XmlLeaf): Accumulator = {
    acc.elements += (elemName -> text)
    acc
  }
}

object ElementsFlattenTraversalLogic {
  case class Accumulator(elements: ListBuffer[(String, XmlLeaf)] = ListBuffer.empty)

  val instance = new ElementsFlattenTraversalLogic()
} 
Example 19
Source File: RddToDataFrame.scala    From spark-sframe   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package org.apache.spark.turi

import org.graphlab.create.GraphLabUtil
import org.apache.spark.sql.{SQLContext, Row, DataFrame}
import org.apache.spark.rdd.RDD
import scala.collection.JavaConversions._
import org.apache.spark.sql.types._
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.ArrayBuffer
import scala.collection.immutable.Map
import java.util.HashMap
import java.util.ArrayList
import java.util.{Date,GregorianCalendar}
import java.sql.Date

object EvaluateRDD {
  
  def inferSchema(obj: Any): DataType = {
    if(obj.isInstanceOf[Int]) { 
      IntegerType
    } else if(obj.isInstanceOf[String]) { 
      StringType
    } else if(obj.isInstanceOf[Double]) { 
      DoubleType
    } else if(obj.isInstanceOf[Long]) { 
      LongType
    } else if(obj.isInstanceOf[Float]) { 
      FloatType
    } else if(obj.isInstanceOf[Map[_,_]]) {
      MapType(inferSchema(obj.asInstanceOf[Map[_,_]].head._1),inferSchema(obj.asInstanceOf[Map[_,_]].head._2))
    } else if(obj.isInstanceOf[java.util.HashMap[_,_]]) {
      MapType(inferSchema(obj.asInstanceOf[java.util.HashMap[_,_]].head._1),inferSchema(obj.asInstanceOf[java.util.HashMap[_,_]].head._2))
    } else if(obj.isInstanceOf[Array[_]]) {
      ArrayType(inferSchema(obj.asInstanceOf[Array[_]](0)))
    } else if(obj.isInstanceOf[java.util.ArrayList[_]]) {
      ArrayType(inferSchema(obj.asInstanceOf[java.util.ArrayList[_]](0)))
    } else if(obj.isInstanceOf[java.util.GregorianCalendar]) {
      TimestampType
    } else if(obj.isInstanceOf[java.util.Date] || obj.isInstanceOf[java.sql.Date]) {
      DateType
    } else { 
      StringType
    }
  }

  def toScala(obj: Any): Any = {
    if (obj.isInstanceOf[java.util.HashMap[_,_]]) {
      val jmap = obj.asInstanceOf[java.util.HashMap[_,_]]
      jmap.map { case (k,v) => toScala(k) -> toScala(v) }.toMap
    }
    else if(obj.isInstanceOf[java.util.ArrayList[_]]) {
      val buf = ArrayBuffer[Any]()
      val jArray = obj.asInstanceOf[java.util.ArrayList[_]]
      for(item <- jArray) {
        buf += toScala(item)
      }
      buf.toArray
    } else if(obj.isInstanceOf[java.util.GregorianCalendar]) {
      new java.sql.Timestamp(obj.asInstanceOf[java.util.GregorianCalendar].getTime().getTime())
    } else {
      obj
    }
  }
  def toSparkDataFrame(sqlContext: SQLContext, rdd: RDD[java.util.HashMap[String,_]]): DataFrame = { 
    val scalaRDD = rdd.map(l => toScala(l))
    val rowRDD = scalaRDD.map(l => Row.fromSeq(l.asInstanceOf[Map[_,_]].values.toList))
    
    var sample_data: java.util.HashMap[String,_] = rdd.take(1)(0)
    
    var schema_list: ListBuffer[StructField] = new ListBuffer[StructField]()
    for ((name,v) <- sample_data) { 
      schema_list.append(StructField(name,inferSchema(v)))
    }
    sqlContext.createDataFrame(rowRDD,StructType(schema_list))
  }
} 
Example 20
Source File: DataCodec.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.orderbook.util

import java.io._

import highperfscala.orderbook.Commands.Command
import org.slf4s.Logging

import scala.collection.mutable.ListBuffer


object DataCodec extends Logging {

  def write(cs: List[Command], output: File): Unit = {
    val oos = new ObjectOutputStream(new FileOutputStream(output))
    cs.foreach(oos.writeObject)
    oos.close()
  }

  def read(input: File): List[Command] = {
    val fis = new FileInputStream(input)
    val ois = new ObjectInputStream(fis)
    val commandBuilder = ListBuffer[Command]()
    while(fis.available() != 0) {
      commandBuilder.append(ois.readObject().asInstanceOf[Command])
    }
    ois.close()
    fis.close()

    commandBuilder.result()
  }

} 
Example 21
Source File: DataCodec.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.dataanalysis.util

import java.io._

import highperfscala.dataanalysis.Execution
import org.slf4s.Logging

import scala.collection.mutable.ListBuffer

object DataCodec extends Logging {

  def write(cs: List[Execution], output: File): Unit = {
    val oos = new ObjectOutputStream(new FileOutputStream(output))
    cs.foreach(oos.writeObject)
    oos.close()
  }

  def read(input: File): List[Execution] = {
    val fis = new FileInputStream(input)

    val ois = new ObjectInputStream(fis)
    val commandBuilder = ListBuffer[Execution]()
    while(fis.available() != 0) {
      commandBuilder.append(ois.readObject().asInstanceOf[Execution])
    }
    ois.close()
    fis.close()

    commandBuilder.result()
  }

  def read(input: InputStream): List[Execution] = {
    val ois = new ObjectInputStream(input)
    val commandBuilder = ListBuffer[Execution]()
    while(input.available() != 0) {
      commandBuilder.append(ois.readObject().asInstanceOf[Execution])
    }
    ois.close()

    commandBuilder.result()
  }

} 
Example 22
Source File: DataCodec.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala
package benchmarks
package util

import java.io._

import orderbook.Commands.Command
import org.slf4s.Logging

import scala.collection.mutable.ListBuffer


object DataCodec extends Logging {

  def write(cs: List[Command], output: File): Unit = {
    val oos = new ObjectOutputStream(new FileOutputStream(output))
    cs.foreach(oos.writeObject)
    oos.close()
  }

  def read(input: File): List[Command] = {
    val fis = new FileInputStream(input)
    val ois = new ObjectInputStream(fis)
    val commandBuilder = ListBuffer[Command]()
    while(fis.available() != 0) {
      commandBuilder.append(ois.readObject().asInstanceOf[Command])
    }
    ois.close()
    fis.close()

    commandBuilder.result()
  }

} 
Example 23
Source File: Cluster.scala    From exhibitor-mesos-framework   with Apache License 2.0 5 votes vote down vote up
package ly.stealth.mesos.exhibitor

import play.api.libs.functional.syntax._
import play.api.libs.json._

import scala.collection.mutable.ListBuffer

case class Cluster(exhibitorServers: List[ExhibitorServer] = Nil) {
  private val storage = Cluster.newStorage(Config.storage)
  private[exhibitor] var frameworkId: Option[String] = None

  private[exhibitor] val servers = new ListBuffer[ExhibitorServer]
  //add anything that was passed to constructor
  exhibitorServers.foreach(servers += _)

  def getServer(id: String): Option[ExhibitorServer] = servers.find(_.id == id)

  def addServer(server: ExhibitorServer): Boolean = {
    servers.find(_.id == server.id) match {
      case Some(_) => false
      case None =>
        servers += server
        true
    }
  }

  def expandIds(expr: String): List[String] = {
    if (expr == null || expr == "") throw new IllegalArgumentException("ID expression cannot be null or empty")
    else {
      expr.split(",").flatMap { part =>
        if (part == "*") return servers.map(_.id).toList
        else Util.Range(part).values.map(_.toString)
      }.distinct.sorted.toList
    }
  }

  def save() = storage.save(this)(Cluster.writer)

  def load() {
    storage.load(Cluster.reader).foreach { cluster =>
      this.frameworkId = cluster.frameworkId
      //TODO load servers too
    }
  }

  override def toString: String = servers.toString()
}

object Cluster {
  private def newStorage(storage: String): Storage[Cluster] = {
    storage.split(":", 2) match {
      case Array("file", fileName) => FileStorage(fileName)
      case _ => throw new IllegalArgumentException(s"Unsupported storage: $storage")
    }
  }

  implicit val writer = new Writes[Cluster] {
    override def writes(o: Cluster): JsValue = Json.obj("frameworkid" -> o.frameworkId, "cluster" -> o.servers.toList)
  }

  implicit val reader = ((__ \ 'frameworkid).readNullable[String] and
    (__ \ 'cluster).read[List[ExhibitorServer]])((frameworkId, servers) => {
    val cluster = Cluster(servers)
    cluster.frameworkId = frameworkId
    cluster
  })
} 
Example 24
Source File: CorpusReader.scala    From ai.vitk.ner   with GNU General Public License v3.0 5 votes vote down vote up
package ai.vitk.ner

import java.io.{File, InputStream}

import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.io.Source

object CorpusReader {
  val logger = LoggerFactory.getLogger(CorpusReader.getClass)

  
  def readVLSPTest1(resourcePath: String): List[Sentence] = {
    // read lines of the file and remove lines which contains "<s>"
    val stream = getClass.getResourceAsStream(resourcePath)
    val lines = Source.fromInputStream(stream).getLines().toList.filter {
      line => line.trim != "<s>"
    }
    val sentences = new ListBuffer[Sentence]()
    var tokens = new ListBuffer[Token]()
    for (i <- (0 until lines.length)) {
      val line = lines(i).trim
      if (line == "</s>") {
        if (!tokens.isEmpty) sentences.append(Sentence(tokens))
        tokens = new ListBuffer[Token]()
      } else {
        val parts = line.split("\\s+")
        if (parts.length < 3) 
          logger.error("Invalid line = " + line) 
        else 
          tokens.append(Token(parts(0), Map(Label.PartOfSpeech -> parts(1), Label.Chunk -> parts(2))))
      }
    }
    logger.info(resourcePath + ", number of sentences = " + sentences.length)
    sentences.toList
  }
  
  def readVLSPTest2(dir: String): List[Sentence] = {
    def getListOfFiles: List[File] = {
      val d = new File(dir)
      if (d.exists && d.isDirectory) {
        d.listFiles.filter(_.isFile).toList
      } else {
        List[File]()
      }
    }
    val files = getListOfFiles
    logger.info("Number of test files = " + files.length)
    files.flatMap {
      file  => {
        val x = file.getAbsolutePath
        val resourcePath = x.substring(x.indexOf("/ner"))
        readVLSPTest1(resourcePath)
      } 
    }
  }
  
  
  def main(args: Array[String]): Unit = {
    val path = "/ner/vi/train.txt"
    val sentences = readCoNLL(path)
    logger.info("Number of sentences = " + sentences.length)
    sentences.take(10).foreach(s => logger.info(s.toString))
    sentences.takeRight(10).foreach(s => logger.info(s.toString))
  }
} 
Example 25
Source File: CancelUtils.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import scala.collection.mutable.ListBuffer


  def cancelAll(cancelables: CancelToken[IO]*): CancelToken[IO] =
    if (cancelables.isEmpty) {
      IO.unit
    } else {
      IO.suspend {
        cancelAll(cancelables.iterator)
      }
    }

  def cancelAll(cursor: Iterator[CancelToken[IO]]): CancelToken[IO] =
    if (cursor.isEmpty) {
      IO.unit
    } else {
      IO.suspend {
        val frame = new CancelAllFrame(cursor)
        frame.loop()
      }
    }

  // Optimization for `cancelAll`
  final private class CancelAllFrame(cursor: Iterator[CancelToken[IO]]) extends IOFrame[Unit, IO[Unit]] {
    private[this] val errors = ListBuffer.empty[Throwable]

    def loop(): CancelToken[IO] =
      if (cursor.hasNext) {
        cursor.next().flatMap(this)
      } else {
        errors.toList match {
          case Nil =>
            IO.unit
          case first :: rest =>
            // Logging the errors somewhere, because exceptions
            // should never be silent
            rest.foreach(Logger.reportFailure)
            IO.raiseError(first)
        }
      }

    def apply(a: Unit): IO[Unit] =
      loop()

    def recover(e: Throwable): IO[Unit] = {
      errors += e
      loop()
    }
  }
} 
Example 26
Source File: EigenValue.scala    From Clustering4Ever   with Apache License 2.0 5 votes vote down vote up
package org.clustering4ever.scala.clustering.tensor

    val n2 = data.head.cols
    val timeColumn = DenseMatrix.zeros[Double](m,n2)  
    val timeRow = DenseMatrix.zeros[Double](m,n1)

    @annotation.tailrec
    def matriceColumnSet(t:mutable.ArrayBuffer[DenseMatrix[Double]], m:DenseMatrix[Double], c:DenseMatrix[Double], i: Int, j: Int , k: Int): DenseMatrix[Double] = {
      if (j < t.head.cols && k < t.length) {
        m(k, j) = t(k)(i, j)
        matriceColumnSet(t, m, c, i, j, k + 1)
      }
      else if (k == t.length && j < t.head.cols) {
        matriceColumnSet(t, m, c, i, j + 1 , 0)
      }
      else if (i < t.head.rows - 1) {
        c += cov(m)
        matriceColumnSet(t, m, c, i + 1, 0, 0)
      }
      else {
        c += cov(m)
      }
    }

    @annotation.tailrec
    def matriceRowSet(t: mutable.ArrayBuffer[DenseMatrix[Double]], m: DenseMatrix[Double], c: DenseMatrix[Double], i: Int, j: Int , k: Int): DenseMatrix[Double] = {
      if (i < t.head.rows && k < t.length) {
        m(k, i) = t(k)(i, j)
        matriceRowSet(t, m, c, i, j, k + 1)
      }
      else if (k == t.length && i < t.head.rows) {
        matriceRowSet(t, m, c, i + 1, j , 0)
      }
      else if (j < t.head.cols - 1){
        c += cov(m)
        matriceRowSet(t, m, c, 0, j + 1, 0)
      }
      else {
        c += cov(m)
      }
    }

    val columnMatrix = matriceColumnSet(data, timeColumn, DenseMatrix.zeros[Double](n2,n2), 0, 0, 0 )
    val svd.SVD(u1,eigValue,eigVector) = svd(columnMatrix)
    val columnEigvalue = eigValue.toArray   

    val rowMatrix = matriceRowSet(data, timeRow, DenseMatrix.zeros[Double](n1,n1), 0, 0, 0 )
    val svd.SVD(u2,eigValue2,eigVector2) = svd(rowMatrix)
    val rowEigvalue = eigValue2.toArray

    Array(rowEigvalue.take(5), columnEigvalue.take(5))
  
  }
}  
 
object EigenValue extends Serializable {
  
  def train(k: Int, data: mutable.ArrayBuffer[DenseMatrix[Double]]) = (new EigenValue(k)).fit(data)

} 
Example 27
Source File: KernelOutputStream.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.stream

import java.io.OutputStream
import java.nio.charset.Charset

import org.apache.toree.kernel.protocol.v5.content.StreamContent
import org.apache.toree.kernel.protocol.v5.{SystemActorType, MessageType, KMBuilder}
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.utils.{LogLike, ScheduledTaskManager}
import scala.collection.mutable.ListBuffer
import KernelOutputStream._

object KernelOutputStream {
  val DefaultStreamType = "stdout"
  val DefaultSendEmptyOutput = false
}


  override def write(b: Int): Unit = internalBytes.synchronized {
    // Begin periodic flushing if this is a new set of bytes
    enableAutoFlush()

    internalBytes += b.toByte
  }
} 
Example 28
Source File: KernelInputStream.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.stream

import java.io.InputStream
import java.nio.charset.Charset

import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.content.InputRequest
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.kernel.Utilities.timeout
import org.apache.toree.kernel.protocol.v5.{KMBuilder, MessageType}

import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, Future}

import KernelInputStream._

object KernelInputStream {
  val DefaultPrompt = ""
  val DefaultPassword = false
}


  override def read(): Int = {
    if (!this.hasByte) this.requestBytes()

    this.nextByte()
  }

  private def hasByte: Boolean = internalBytes.nonEmpty

  private def nextByte(): Int = {
    val byte = internalBytes.head

    internalBytes = internalBytes.tail

    byte
  }

  private def requestBytes(): Unit = {
    val inputRequest = InputRequest(prompt, password)
    // NOTE: Assuming already provided parent header and correct ids
    val kernelMessage = kmBuilder
      .withHeader(MessageType.Outgoing.InputRequest)
      .withContentString(inputRequest)
      .build

    // NOTE: The same handler is being used in both request and reply
    val responseFuture: Future[String] =
      (actorLoader.load(MessageType.Incoming.InputReply) ? kernelMessage)
      .mapTo[String]

    // Block until we get a response
    import scala.concurrent.duration._
    internalBytes ++=
      Await.result(responseFuture, Duration.Inf).getBytes(EncodingType)
  }
} 
Example 29
Source File: RedisPubSubTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.redis.sink.writer

import com.datamountaineer.streamreactor.connect.redis.sink.config.{RedisConfig, RedisConfigConstants, RedisConnectionInfo, RedisSinkSettings}
import org.apache.kafka.connect.data.{Schema, SchemaBuilder, Struct}
import org.apache.kafka.connect.sink.SinkRecord
import org.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import redis.clients.jedis.{Jedis, JedisPubSub}
import redis.embedded.RedisServer

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer

class RedisPubSubTest extends AnyWordSpec with Matchers with BeforeAndAfterAll with MockitoSugar {

  val redisServer = new RedisServer(6379)

  override def beforeAll() = redisServer.start()

  override def afterAll() = redisServer.stop()

  "Redis PUBSUB writer" should {

    "write Kafka records to a Redis PubSub" in {

      val TOPIC = "cpuTopic"
      val KCQL = s"SELECT * from $TOPIC STOREAS PubSub (channel=type)"
      println("Testing KCQL : " + KCQL)
      val props = Map(
        RedisConfigConstants.REDIS_HOST->"localhost",
        RedisConfigConstants.REDIS_PORT->"6379",
        RedisConfigConstants.KCQL_CONFIG->KCQL
      ).asJava

      val config = RedisConfig(props)
      val connectionInfo = new RedisConnectionInfo("localhost", 6379, None)
      val settings = RedisSinkSettings(config)
      val writer = new RedisPubSub(settings)
      writer.createClient(settings)

      val schema = SchemaBuilder.struct().name("com.example.Cpu")
        .field("type", Schema.STRING_SCHEMA)
        .field("temperature", Schema.FLOAT64_SCHEMA)
        .field("voltage", Schema.FLOAT64_SCHEMA)
        .field("ts", Schema.INT64_SCHEMA).build()

      val struct1 = new Struct(schema).put("type", "Xeon").put("temperature", 60.4).put("voltage", 90.1).put("ts", 1482180657010L)
      val struct2 = new Struct(schema).put("type", "i7").put("temperature", 62.1).put("voltage", 103.3).put("ts", 1482180657020L)
      val struct3 = new Struct(schema).put("type", "i7-i").put("temperature", 64.5).put("voltage", 101.1).put("ts", 1482180657030L)

      val sinkRecord1 = new SinkRecord(TOPIC, 0, null, null, schema, struct1, 1)
      val sinkRecord2 = new SinkRecord(TOPIC, 0, null, null, schema, struct2, 2)
      val sinkRecord3 = new SinkRecord(TOPIC, 0, null, null, schema, struct3, 3)

      val jedis = new Jedis(connectionInfo.host, connectionInfo.port)
      // Clean up in-memory jedis
      jedis.flushAll()

      val messagesMap = collection.mutable.Map[String, ListBuffer[String]]()

      val t = new Thread {
        private val pubsub = new JedisPubSub {
          override def onMessage(channel: String, message: String): Unit = {
            messagesMap.get(channel) match {
              case Some(msgs) => messagesMap.put(channel, msgs += message)
              case None => messagesMap.put(channel, ListBuffer(message))
            }
          }
        }

        override def run(): Unit = {
          jedis.subscribe(pubsub, "Xeon", "i7", "i7-i")
        }

        override def interrupt(): Unit = {
          pubsub.punsubscribe("*")
          super.interrupt()
        }
      }
      t.start()
      t.join(5000)
      if (t.isAlive) t.interrupt()

      writer.write(Seq(sinkRecord1))
      writer.write(Seq(sinkRecord2, sinkRecord3))

      messagesMap.size shouldBe 3

      messagesMap("Xeon").head shouldBe """{"type":"Xeon","temperature":60.4,"voltage":90.1,"ts":1482180657010}"""
      messagesMap("i7").head shouldBe """{"type":"i7","temperature":62.1,"voltage":103.3,"ts":1482180657020}"""
      messagesMap("i7-i").head shouldBe """{"type":"i7-i","temperature":64.5,"voltage":101.1,"ts":1482180657030}"""
    }
  }
} 
Example 30
Source File: DockerRunAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.docker

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.{DevClusterProcess, GeneralConstants, GeneralUtilities}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.{Process, ProcessLogger}


object DockerRunAction {

  private val logger = LoggerFactory.getLogger(classOf[DockerBuildAction])

  private val processLogger = ProcessLogger(
    (e: String) => logger.info("err " + e))

  private val ports = new ListBuffer[String]

  def apply(hosts: Map[String, String],
            image: String,
            mountDir: Option[String] = None,
            command: Option[String] = None,
            ports: Array[PortMeta],
            conf: Map[String, String],
            background: Boolean = false) : Option[String] = {
    val sb = new StringBuilder("docker run -P")
    if (background) {
      sb.append(" -d")
    } else {
      sb.append(" -it")
    }
    hosts.foreach {
      case (ip, name) => sb.append(s" --add-host=$name:$ip")
    }

    ports.foreach(p => {
      if (p.exposedPort.isDefined) {
        sb.append(s" -p ${p.exposedPort.get}:${p.port}")
      } else {
        sb.append(s" -p ${p.port}")
      }
    })

    if (mountDir.isDefined) {
      sb.append(s" -v ${mountDir.get}")
      sb.append(":/mount")
    }

    sb.append(s" $image")

    if (command.isDefined) {
      sb.append(s" ${command.get}")
    }

    val commandString = sb.toString
    println(commandString)

    if (background) {
      val output = DevClusterProcess.process(sb.toString).!!.stripLineEnd
      Some(output)
    } else {
      //write command to execute later (in dev-cluster script)
      DockerUtilities.writeDockerCommand(commandString)
      None
    }
  }
} 
Example 31
Source File: DockerBuildAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.docker

import com.criteo.dev.cluster.DevClusterProcess
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.{Process, ProcessLogger}



class DockerBuildAction (dockerFile: String, dockerImage: String) {

  private val logger = LoggerFactory.getLogger(classOf[DockerBuildAction])

  private val processLogger = ProcessLogger(
    (e: String) => logger.info("err " + e))

  private val args = new ListBuffer[Pair[String, String]]
  private val ports = new ListBuffer[PortMeta]

  def addArg(key: String, value: String) = {
     args.+=(Pair(key, value))
  }

  def run() : Unit = {
    val sb = new StringBuilder("docker build")
    sb.append(s" -t $dockerImage")
    sb.append(s" -f ./${DockerConstants.dockerBaseDir}/$dockerFile")
    args.foreach(p =>  sb.append(s" --build-arg ${p._1}=${p._2}"))
    sb.append(s" ${DockerConstants.dockerBaseDir}")
    val out = DevClusterProcess.process(sb.toString).!
    if (out != 0) {
      throw new Exception("Failure running docker command.")
    }
  }
}

object DockerBuildAction {
  def apply(dockerFile: String, dockerImage: String) = {
    val dba = new DockerBuildAction(dockerFile, dockerImage)
    dba.run
  }
} 
Example 32
Source File: ShellMultiAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.{GeneralUtilities, Public}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.ProcessLogger


@Public
case class ShellMultiAction() extends MultiAction {
  private val logger = LoggerFactory.getLogger(this.getClass)
  private val commands = new ListBuffer[String]

  //to allow concurrency
  val filepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}.sh"

  def add(command: String): Unit = {
    commands.+=(command)
  }

  def run(returnResult: Boolean = false, ignoreError: Boolean = false): String = {
    val localTmpShellFile = new File(filepath)
    ShellAction(s"rm $filepath", returnResult = false, true)

    //Write a temp shell script
    val writer = new PrintWriter(localTmpShellFile)
    commands.foreach(s => writer.write(s"$s\n"))
    writer.close

    localTmpShellFile.setExecutable(true)
    localTmpShellFile.setReadable(true)
    localTmpShellFile.deleteOnExit()

    commands.foreach(s => logger.info(s))
    val res = ShellAction(filepath, returnResult, ignoreError)
    localTmpShellFile.delete()
    res
  }
}

object ShellMultiAction {
  def apply(
             commands: List[String],
             returnResult: Boolean = false,
             ignoreError: Boolean = false
           ): String = {
    val action = new ShellMultiAction()
    commands.foreach(action.add)
    action.run(returnResult, ignoreError)
  }
} 
Example 33
Source File: ShellHiveAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.{GeneralUtilities, Public}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer


@Public
class ShellHiveAction(ignoreError: Boolean = false) extends HiveAction {
  private val commands = new ListBuffer[String]
  private val logger = LoggerFactory.getLogger(this.getClass)

  private final val filepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}-hivequery"

  def add(action: String): Unit = {
    commands += action
  }

  def run(): String = {
    val localQueryFile = new File(filepath)
    val writer = new PrintWriter(localQueryFile)
    commands.foreach(s => {
      writer.write(s"$s;\n")
      logger.info(s)
    })
    writer.close

    localQueryFile.setExecutable(true)
    localQueryFile.setReadable(true)
    localQueryFile.deleteOnExit()

    val ignoreErrorFlag = if (ignoreError) "-hiveconf hive.cli.errors.ignore=true" else ""
    val res = ShellAction(s"hive $ignoreErrorFlag -f $filepath", returnResult = true, ignoreError)
    localQueryFile.delete()
    res
  }
} 
Example 34
Source File: SshMultiAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster._
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.ProcessLogger


@Public
case class SshMultiAction(node: Node) extends MultiAction {

  private val logger = LoggerFactory.getLogger(this.getClass)
  private val commands = new ListBuffer[String]

  //to allow concurrency
  val localFilepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}.sh"
  val remoteFilePath = s"/tmp/${GeneralUtilities.getTempPrefix}.sh"

  def add(command : String): Unit = {
    commands.+=(command)
  }

  def run(returnResult: Boolean = false, ignoreError: Boolean = false) : String = {
    val localTmpShellFile = new File(localFilepath)
    SshAction(node, " rm " + remoteFilePath, returnResult = false, true)

    //Write a temp shell script
    val writer = new PrintWriter(localTmpShellFile)
    commands.foreach(s => writer.write(s"$s\n"))
    writer.close

    localTmpShellFile.setExecutable(true)
    localTmpShellFile.setReadable(true)
    localTmpShellFile.deleteOnExit()

    commands.foreach(s => logger.info(s))

    ScpAction(None, localFilepath, Some(node), remoteFilePath)
    val res = SshAction(node, s"source $remoteFilePath", returnResult, ignoreError)
    SshAction(node, s"rm $remoteFilePath", returnResult = false, true)
    localTmpShellFile.delete()
    res
  }
}

object SshMultiAction {
  def apply(node: Node,
            commands: List[String],
            returnResult: Boolean = false,
            ignoreError: Boolean = false) : String = {
    val action = new SshMultiAction(node)
    commands.foreach(action.add)
    action.run(returnResult = returnResult, ignoreError = ignoreError)
  }
} 
Example 35
Source File: SshHiveAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster._
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process._


@Public
class SshHiveAction(node: Node, ignoreError: Boolean = false) extends HiveAction {

  private final val localFilepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}-hivequery"
  private final val remoteFilepath = s"/tmp/${GeneralUtilities.getTempPrefix}-hivequery"  //concurrent hive actions on same node not supported for now

  private val commands = new ListBuffer[String]
  private val logger = LoggerFactory.getLogger(classOf[SshHiveAction])

  def add(action: String): Unit = {
    commands.+=(action)
  }

  def run(): String = {
    val localQueryFile = new File(localFilepath)
    val writer = new PrintWriter(localQueryFile)
    commands.foreach(s => {
      writer.write(s"$s;\n")
      logger.info(s)
    })
    writer.close

    localQueryFile.setExecutable(true)
    localQueryFile.setReadable(true)
    localQueryFile.deleteOnExit()

    ScpAction(None, localFilepath, Some(node), remoteFilepath)
    val ignoreErrorFlag = if (ignoreError) "-hiveconf hive.cli.errors.ignore=true" else ""

    val res = SshAction(node, s"hive $ignoreErrorFlag -f $remoteFilepath", returnResult = true, ignoreError)
    SshAction(node, s"rm $remoteFilepath")
    localQueryFile.delete()
    res
  }

  override def toString = {
    commands.mkString("\n")
  }
}

object SshHiveAction {
  def apply(node: Node, statements: List[String], ignoreError: Boolean = false) = {
    val action = new SshHiveAction(node, ignoreError)
    statements.foreach(action.add)
    action.run
  }
} 
Example 36
Source File: Analyser.scala    From ClassDependenceAnalyser   with GNU General Public License v2.0 5 votes vote down vote up
package com.github.jllk.analyser

import java.io.File
import java.net.{URLClassLoader, URL}

import scala.collection.mutable
import scala.collection.mutable.ListBuffer


object Analyser {
  def notCareClass(fullClassName: String): Boolean =
    fullClassName.startsWith("java") ||
    fullClassName.startsWith("scala") ||
    fullClassName.startsWith("\"[") ||
    (fullClassName.startsWith("android") && !fullClassName.startsWith("android/support"))
}

class Analyser(private val dependenceJarPath: List[File]) {

  import Analyser._

  def analysis(fullClassName: String): mutable.Set[String] = {
    val dependentClasses = mutable.Set[String]()
    val importDependence = analysisImportDependence(fullClassName)
    importDependence
      .foreach(c => {
        dependentClasses += c
        dependentClasses ++= analysisInheritDependence(c)
      })
    dependentClasses
  }

  private def analysisImportDependence(fullClassName: String): List[String] = {
    val dependentClasses = new ListBuffer[String]()
    val classpath = dependenceJarPath.map(f => s"-classpath ${f.toPath}") mkString " "
    val classReport = ProcessUtils.exec(s"javap -verbose $classpath ${fullClassName.replace('.', '/')}")
    val lines = classReport.split('\n')
    lines
      .filter(l => l.contains("= Class") && !l.contains("\"[Ljava/lang/Object;\""))
      .foreach(l => dependentClasses += l.substring(l.indexOf("//") + 2).replaceAll(" ", "").replaceAll("/", "\\.").trim())
    dependentClasses
      .filter(notCareClass)
      .toList
  }

  private def analysisInheritDependence(fullClassName: String): List[String] = {
    val urls = ListBuffer[URL]()
    dependenceJarPath.foreach(f => urls += f.toURI.toURL)
    val classLoader = new URLClassLoader(urls.toArray)
    doClassInheritSearch(fullClassName, classLoader)
  }

  private def doClassInheritSearch(fullClassName: String, classLoader: URLClassLoader): List[String] = {
    if (notCareClass(fullClassName)) {
      List.empty[String]
    } else {

      val dependentClasses = mutable.Set[String]()
      dependentClasses += fullClassName
      dependentClasses ++= analysisImportDependence(fullClassName)
      dependentClasses.foreach(fullClassName => {
        val targetClass: Either[Class[_], Exception] =
          try
            Left(classLoader.loadClass(fullClassName))
          catch {
            case e: ClassNotFoundException => Right(e)
            case e: Exception => Right(e)
          }

        targetClass match {
          case Left(c) =>
            val superclass = c.getSuperclass
            if (superclass != null) {
              dependentClasses ++= doClassInheritSearch(superclass.getName, classLoader)
            }
            c.getInterfaces.foreach(i => dependentClasses ++= doClassInheritSearch(i.getName, classLoader))
          case Right(e) =>
            println(s"[doClassInheritSearch] exception happened: ${e.getMessage}, please check your dependenceJarPath.")
        }

      })
      dependentClasses.toList
    }
  }
} 
Example 37
Source File: PosixFileAttributes.scala    From releaser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc

import java.nio.file.attribute.PosixFilePermission
import java.nio.file.attribute.PosixFilePermission._

import scala.collection.JavaConversions.setAsJavaSet
import scala.collection.mutable.ListBuffer

object PosixFileAttributes {

  implicit def convertToPermissionsSet(mode: Int): java.util.Set[PosixFilePermission] = {

    def isSet(mode: Int, testbit: Int) = {
      (mode & testbit) == testbit
    }

    val result = ListBuffer[PosixFilePermission]()

    if (isSet(mode, 256)) { // 0400
      result += OWNER_READ
    }
    if (isSet(mode, 128)) { // 0200
      result += OWNER_WRITE
    }
    if (isSet(mode, 64)) { // 0100
      result += OWNER_EXECUTE
    }
    if (isSet(mode, 32)) { // 040
      result += GROUP_READ
    }
    if (isSet(mode, 16)) { // 020
      result += GROUP_WRITE
    }
    if (isSet(mode, 8)) { // 010
      result += GROUP_EXECUTE
    }
    if (isSet(mode, 4)) { // 04
      result += OTHERS_READ
    }
    if (isSet(mode, 2)) { // 02
      result += OTHERS_WRITE
    }
    if (isSet(mode, 1)) { // 01
      result += OTHERS_EXECUTE
    }
    setAsJavaSet(result.toSet)
  }

  implicit def convertToInt(permissions: java.util.Set[PosixFilePermission]): Int = {

    var result = 0
    if (permissions.contains(OWNER_READ)) {
      result = result | 256 //0400
    }
    if (permissions.contains(OWNER_WRITE)) {
      result = result | 128 // 0200
    }
    if (permissions.contains(OWNER_EXECUTE)) {
      result = result | 64 // 0100
    }
    if (permissions.contains(GROUP_READ)) {
      result = result | 32 // 040
    }
    if (permissions.contains(GROUP_WRITE)) {
      result = result | 16 // 020
    }
    if (permissions.contains(GROUP_EXECUTE)) {
      result = result | 8 // 010
    }
    if (permissions.contains(OTHERS_READ)) {
      result = result | 4 // 04
    }
    if (permissions.contains(OTHERS_WRITE)) {
      result = result | 2 // 02
    }
    if (permissions.contains(OTHERS_EXECUTE)) {
      result = result | 1 // 01
    }
    result
  }
} 
Example 38
Source File: TgzTransformerSpec.scala    From releaser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.releaser

import java.io._
import java.nio.file.{Files, Path}

import org.apache.commons.compress.archivers.tar.{TarArchiveEntry, TarArchiveInputStream}
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream
import org.apache.commons.io.FileUtils
import org.scalatest._

import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Success}

class TgzTransformerSpec extends WordSpec with Matchers with BeforeAndAfterEach with OptionValues with TryValues{

  val tgzPath = new File(this.getClass.getResource("/help-frontend/uk/gov/hmrc/help-frontend_2.11/1.26.0-3-gd7ed03c/help-frontend_2.11-1.26.0-3-gd7ed03c.tgz").toURI).toPath

  var transformer:TgzTransformer = _
  val candidate_1_26_0_3_gd7ed03c = ReleaseCandidateVersion("1.26.0-3-gd7ed03c")
  val release_1_4_0 = ReleaseVersion("1.4.0")
  var tmpDir:Path = _

  override def beforeEach(){
    tmpDir = Files.createTempDirectory("tmp")
    transformer = new TgzTransformer()
    FileUtils.copyFileToDirectory(tgzPath.toFile, tmpDir.toFile)
  }

  override def afterEach(){
    FileUtils.deleteDirectory(tmpDir.toFile)
  }

  "the transformer" should {

    "decompress the tgz, rename the main folder and compress it back" in {

      val inFile = new File(tmpDir.toFile, tgzPath.getFileName.toString).toPath
      val targetFilePath = tmpDir.resolve("help-frontend-1.4.0.tgz")

      val originalTarEntries = listTgzEntries(inFile)
      assertTarEntry(originalTarEntries, "./help-frontend-1.26.0-3-gd7ed03c/")
      assertTarEntry(originalTarEntries, "./help-frontend-1.4.0/", exists = false)
      assertTarEntry(originalTarEntries, "./start-docker.sh", mode = Some(493))

      val outFileTry = transformer(inFile, "help-frontend", candidate_1_26_0_3_gd7ed03c, release_1_4_0, targetFilePath)
      outFileTry match {
        case Success(outFile) =>
          val tarEntries = listTgzEntries(targetFilePath)
          assertTarEntry(tarEntries, "./help-frontend-1.26.0-3-gd7ed03c/", exists = false)
          assertTarEntry(tarEntries, "./help-frontend-1.4.0/")
          assertTarEntry(tarEntries, "./start-docker.sh", mode = Some(493))
        case Failure(e) => fail("Caught exception: " + e.getMessage, e)
      }


    }
  }

  private def listTgzEntries(localTgzFile: Path) : List[TarArchiveEntry] =  {
    val bytes = new Array[Byte](2048)
    val fin = new BufferedInputStream(new FileInputStream(localTgzFile.toFile))
    val gzIn = new GzipCompressorInputStream(fin)
    val tarIn = new TarArchiveInputStream(gzIn)

    val entries = ListBuffer[TarArchiveEntry]()

    Iterator continually tarIn.getNextTarEntry takeWhile (null !=) foreach { tarEntry =>
      entries += tarEntry
    }

    tarIn.close()

    entries.toList

  }

  private def assertTarEntry(tarEntries: List[TarArchiveEntry], entryName: String, exists: Boolean = true, mode: Option[Int] = None) = {
    val entryOption = tarEntries.find(_.getName == entryName)
    entryOption match {
      case Some(entry) =>
        exists shouldBe true
        mode.foreach { m => m shouldBe entry.getMode}
      case None => exists shouldBe false
    }

  }

} 
Example 39
Source File: WebSocketMessagePushManager.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.service.gateway

import com.ecfront.common.JsonHelper
import com.ecfront.ez.framework.core.logger.Logging
import io.vertx.core.Handler
import io.vertx.core.http._

import scala.collection.mutable.ListBuffer


  def remove(path: String, matchAll: Boolean = true): Unit = {
    if (!matchAll) {
      webSocketContainer.keys.filter(_.startsWith(path)).foreach {
        webSocketContainer -= _
      }
    } else {
      webSocketContainer -= path
    }
  }

} 
Example 40
Source File: Predictor.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package com.tencent.angel.sona.ml.common
import com.tencent.angel.mlcore.conf.{MLCoreConf, SharedConf}
import com.tencent.angel.ml.math2.utils.{DataBlock, LabeledData}
import org.apache.spark.broadcast.Broadcast
import com.tencent.angel.sona.ml.common.MathImplicits._
import com.tencent.angel.sona.core.{AngelGraphModel, ExecutorContext}
import com.tencent.angel.sona.data.LocalMemoryDataBlock
import org.apache.spark.linalg
import org.apache.spark.linalg.Vectors
import org.apache.spark.sql.types.{DoubleType, StructField, StructType}
import org.apache.spark.sql.{Row, SPKSQLUtils}

import scala.collection.mutable.ListBuffer

class Predictor(bcValue: Broadcast[ExecutorContext],
                featIdx: Int, predictionCol: String, probabilityCol: String,
                bcConf: Broadcast[SharedConf]) extends Serializable {

  @transient private lazy val executorContext: ExecutorContext = {
    bcValue.value
  }

  @transient private lazy implicit val dim: Long = {
    executorContext.conf.getLong(MLCoreConf.ML_FEATURE_INDEX_RANGE)
  }

  @transient private lazy val appendedSchema: StructType = if (probabilityCol.nonEmpty) {
    new StructType(Array[StructField](StructField(probabilityCol, DoubleType),
      StructField(predictionCol, DoubleType)))
  } else {
    new StructType(Array[StructField](StructField(predictionCol, DoubleType)))
  }

  def predictRDD(data: Iterator[Row]): Iterator[Row] = {
    val localModel = executorContext.borrowModel(bcConf.value)
    val batchSize = 1024
    val storage = new LocalMemoryDataBlock(batchSize, batchSize * 1024 * 1024)

    var count = 0
    val cachedRows: Array[Row] = new Array[Row](batchSize)
    val result: ListBuffer[Row] = ListBuffer[Row]()
    data.foreach {
      case row if count != 0 && count % batchSize == 0 =>
        predictInternal(localModel, storage, cachedRows, result)

        storage.clean()
        storage.put(new LabeledData(row.get(featIdx).asInstanceOf[linalg.Vector], 0.0))
        cachedRows(count % batchSize) = row
        count += 1
      case row =>
        storage.put(new LabeledData(row.get(featIdx).asInstanceOf[linalg.Vector], 0.0))
        cachedRows(count % batchSize) = row
        count += 1
    }

    predictInternal(localModel, storage, cachedRows, result)

    executorContext.returnModel(localModel)

    result.toIterator
  }

  private def predictInternal(model: AngelGraphModel,
                              storage: DataBlock[LabeledData],
                              cachedRows: Array[Row],
                              result: ListBuffer[Row]): Unit = {
    val predicted = model.predict(storage)

    if (appendedSchema.length == 1) {
      predicted.zipWithIndex.foreach {
        case (res, idx) =>
          result.append(SPKSQLUtils.append(cachedRows(idx), appendedSchema, res.pred))
      }
    } else {
      predicted.zipWithIndex.foreach {
        case (res, idx) =>
          result.append(SPKSQLUtils.append(cachedRows(idx), appendedSchema, res.proba, res.predLabel))
      }
    }

  }

  def predictRaw(features: linalg.Vector): linalg.Vector = {
    val localModel = executorContext.borrowModel(bcConf.value)

    val res = localModel.predict(new LabeledData(features, 0.0))

    executorContext.returnModel(localModel)
    Vectors.dense(res.pred, -res.pred)
  }
} 
Example 41
Source File: RollingFileWriterSuite.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import java.io.File
import java.nio.file.Files
import java.nio.file.Paths

import com.netflix.atlas.core.model.Datapoint
import com.netflix.spectator.api.NoopRegistry
import org.apache.avro.file.DataFileReader
import org.apache.avro.specific.SpecificDatumReader
import org.scalatest.BeforeAndAfter
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite

import scala.collection.mutable.ListBuffer

class RollingFileWriterSuite extends AnyFunSuite with BeforeAndAfter with BeforeAndAfterAll {

  private val outputDir = "./target/unitTestAvroOutput"
  private val registry = new NoopRegistry

  before {
    listFilesSorted(outputDir).foreach(_.delete()) // Clean up files if exits
    Files.createDirectories(Paths.get(outputDir))
  }

  after {
    listFilesSorted(outputDir).foreach(_.delete())
    Files.deleteIfExists(Paths.get(outputDir))
  }

  // Write 3 datapoints, first 2 is written in file 1, rollover, and 3rd one is written in file 2
  test("avro writer rollover by max records") {
    val rollingConf = RollingConfig(2, 12000, 12000)
    val hourStart = 3600000
    val hourEnd = 7200000
    val writer =
      new RollingFileWriter(s"$outputDir/prefix", rollingConf, hourStart, hourEnd, registry)
    writer.initialize()
    createData(hourStart, 0, 1, 2).foreach(writer.write)
    writer.write(Datapoint(Map.empty, hourEnd, 3)) // out of range, should be ignored
    writer.close()

    // Check num of files
    val files = listFilesSorted(outputDir)
    assert(files.size == 2)

    // Check file 1 records
    val file1 = files.head
    assert(file1.getName.endsWith(".0000-0001"))
    val dpArray1 = readAvro(file1)
    assert(dpArray1.size == 2)
    assert(dpArray1(0).getValue == 0)
    assert(dpArray1(0).getTags.get("node") == "0")
    assert(dpArray1(1).getValue == 1)
    assert(dpArray1(1).getTags.get("node") == "1")

    // Check file 2 records
    val file2 = files.last
    assert(file2.getName.endsWith(".0002-0002"))
    val dpArray2 = readAvro(file2)
    assert(dpArray2.size == 1)
    assert(dpArray2(0).getValue == 2)
    assert(dpArray2(0).getTags.get("node") == "2")
  }

  private def createData(startTime: Long, values: Double*): List[Datapoint] = {
    values.toList.zipWithIndex.map {
      case (v, i) =>
        val tags = Map(
          "name" -> "cpu",
          "node" -> s"$i"
        )
        Datapoint(tags, startTime + i * 1000, v, 60000)
    }
  }

  private def listFilesSorted(dir: String): List[File] = {
    val d = new File(dir)
    if (!d.exists()) {
      Nil
    } else {
      new File(dir).listFiles().filter(_.isFile).toList.sortBy(_.getName)
    }
  }

  private def readAvro(file: File): Array[AvroDatapoint] = {
    val userDatumReader = new SpecificDatumReader[AvroDatapoint](classOf[AvroDatapoint])
    val dataFileReader = new DataFileReader[AvroDatapoint](file, userDatumReader)
    val dpListBuf = ListBuffer.empty[AvroDatapoint]
    try {
      while (dataFileReader.hasNext) {
        dpListBuf.addOne(dataFileReader.next)
      }
    } finally {
      dataFileReader.close()
    }
    dpListBuf.toArray
  }
} 
Example 42
Source File: DummyEmailService.scala    From scala-clippy   with Apache License 2.0 5 votes vote down vote up
package util.email

import com.typesafe.scalalogging.StrictLogging

import scala.collection.mutable.ListBuffer
import scala.concurrent.Future

class DummyEmailService extends EmailService with StrictLogging {
  private val sentEmails: ListBuffer[(String, String, String)] = ListBuffer()

  logger.info("Using dummy email service")

  def reset() {
    sentEmails.clear()
  }

  override def send(to: String, subject: String, body: String) = {
    this.synchronized {
      sentEmails.+=((to, subject, body))
    }

    logger.info(s"Would send email to $to, with subject: $subject, body: $body")
    Future.successful(())
  }

  def wasEmailSent(to: String, subject: String): Boolean =
    sentEmails.exists(email => email._1.contains(to) && email._2 == subject)
} 
Example 43
Source File: ClippySbtPlugin.scala    From scala-clippy   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.clippy

import sbt._
import sbt.Keys._

import scala.collection.mutable.ListBuffer

object ClippySbtPlugin extends AutoPlugin {
  object ClippyColor extends Enumeration {
    val Black        = Value("black")
    val LightGray    = Value("light-gray")
    val DarkGray     = Value("dark-gray")
    val Red          = Value("red")
    val LightRed     = Value("light-red")
    val Green        = Value("green")
    val LightGreen   = Value("light-green")
    val Yellow       = Value("yellow")
    val LightYellow  = Value("light-yellow")
    val Blue         = Value("blue")
    val LightBlue    = Value("light-blue")
    val Magenta      = Value("magenta")
    val LightMagenta = Value("light-magenta")
    val Cyan         = Value("cyan")
    val LightCyan    = Value("light-cyan")
    val White        = Value("white")
    val None         = Value("none")
  }

  object WarningPatterns {
    val NonExhaustiveMatch = "match may not be exhaustive[\\s\\S]*"
  }

  object autoImport {
    val clippyColorsEnabled = settingKey[Boolean]("Should Clippy color type mismatch diffs and highlight syntax")
    val clippyColorDiff     = settingKey[Option[ClippyColor.Value]]("The color to use for diffs, if other than default")
    val clippyColorComment =
      settingKey[Option[ClippyColor.Value]]("The color to use for comments, if other than default")
    val clippyColorType = settingKey[Option[ClippyColor.Value]]("The color to use for types, if other than default")
    val clippyColorLiteral =
      settingKey[Option[ClippyColor.Value]]("The color to use for literals, if other than default")
    val clippyColorKeyword =
      settingKey[Option[ClippyColor.Value]]("The color to use for keywords, if other than default")
    val clippyColorReset =
      settingKey[Option[ClippyColor.Value]]("The color to use for resetting to neutral, if other than default")
    val clippyUrl = settingKey[Option[String]]("Url from which to fetch advice, if other than default")
    val clippyLocalStoreDir =
      settingKey[Option[String]]("Directory where cached advice data should be stored, if other than default")
    val clippyProjectRoot =
      settingKey[Option[String]]("Project root in which project-specific advice is stored, if any")
    val clippyFatalWarnings =
      settingKey[List[String]]("Regular expressions of warning messages which should fail compilation")
    val NonExhaustiveMatch = "match may not be exhaustive[\\s\\S]*"
  }

  // in ~/.sbt auto import doesn't work, so providing aliases here for convenience
  val clippyColorsEnabled = autoImport.clippyColorsEnabled
  val clippyColorDiff     = autoImport.clippyColorDiff
  val clippyColorComment  = autoImport.clippyColorComment
  val clippyColorType     = autoImport.clippyColorType
  val clippyColorLiteral  = autoImport.clippyColorLiteral
  val clippyColorKeyword  = autoImport.clippyColorKeyword
  val clippyColorReset    = autoImport.clippyColorReset
  val clippyUrl           = autoImport.clippyUrl
  val clippyLocalStoreDir = autoImport.clippyLocalStoreDir
  val clippyProjectRoot   = autoImport.clippyProjectRoot
  val clippyFatalWarnings = autoImport.clippyFatalWarnings

  override def projectSettings = Seq(
    clippyColorsEnabled := false,
    clippyColorDiff := None,
    clippyColorComment := None,
    clippyColorType := None,
    clippyColorLiteral := None,
    clippyColorKeyword := None,
    clippyColorReset := None,
    clippyUrl := None,
    clippyLocalStoreDir := None,
    clippyProjectRoot := None,
    clippyFatalWarnings := Nil,
    addCompilerPlugin("com.softwaremill.clippy" %% "plugin" % ClippyBuildInfo.version classifier "bundle"),
    scalacOptions := {
      val result = ListBuffer(scalacOptions.value: _*)
      if (clippyColorsEnabled.value) result += "-P:clippy:colors=true"
      clippyColorDiff.value.foreach(c => result += s"-P:clippy:colors-diff=$c")
      clippyColorComment.value.foreach(c => result += s"-P:clippy:colors-comment=$c")
      clippyColorType.value.foreach(c => result += s"-P:clippy:colors-type=$c")
      clippyColorLiteral.value.foreach(c => result += s"-P:clippy:colors-literal=$c")
      clippyColorKeyword.value.foreach(c => result += s"-P:clippy:colors-keyword=$c")
      clippyColorReset.value.foreach(c => result += s"-P:clippy:colors-reset=$c")
      clippyUrl.value.foreach(c => result += s"-P:clippy:url=$c")
      clippyLocalStoreDir.value.foreach(c => result += s"-P:clippy:store=$c")
      clippyProjectRoot.value.foreach(c => result += s"-P:clippy:projectRoot=$c")
      if (clippyFatalWarnings.value.nonEmpty)
        result += s"-P:clippy:fatalWarnings=${clippyFatalWarnings.value.mkString("|")}"
      result.toList
    }
  )

  override def trigger = allRequirements
} 
Example 44
Source File: ScalaMetrics.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.buransky.plugins.scoverage.measure

import org.sonar.api.measures.{CoreMetrics, Metric, Metrics}
import org.sonar.api.measures.Metric.ValueType
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer


class ScalaMetrics extends Metrics {
  override def getMetrics = ListBuffer(ScalaMetrics.statementCoverage, ScalaMetrics.coveredStatements, ScalaMetrics.totalStatements).toList
}

object ScalaMetrics {
  private val STATEMENT_COVERAGE_KEY = "scoverage"
  private val COVERED_STATEMENTS_KEY = "covered_statements"
  private val TOTAL_STATEMENTS_KEY = "total_statements"

  lazy val statementCoverage = new Metric.Builder(STATEMENT_COVERAGE_KEY,
    "Statement coverage", ValueType.PERCENT)
    .setDescription("Statement coverage by tests")
    .setDirection(Metric.DIRECTION_BETTER)
    .setQualitative(true)
    .setDomain(CoreMetrics.DOMAIN_TESTS)
    .setWorstValue(0.0)
    .setBestValue(100.0)
    .create[java.lang.Double]()

  lazy val coveredStatements = new Metric.Builder(COVERED_STATEMENTS_KEY,
    "Covered statements", Metric.ValueType.INT)
    .setDescription("Number of statements covered by tests")
    .setDirection(Metric.DIRECTION_BETTER)
    .setQualitative(false)
    .setDomain(CoreMetrics.DOMAIN_SIZE)
    .create[java.lang.Integer]()
    
  lazy val totalStatements = new Metric.Builder(TOTAL_STATEMENTS_KEY,
    "Total statements", Metric.ValueType.INT)
    .setDescription("Number of all statements covered by tests and uncovered")
    .setDirection(Metric.DIRECTION_BETTER)
    .setQualitative(false)
    .setDomain(CoreMetrics.DOMAIN_SIZE)
    .create[java.lang.Integer]()        
} 
Example 45
Source File: ScalaPlugin.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.sagacify.sonar.scala

import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer

import com.buransky.plugins.scoverage.measure.ScalaMetrics
import com.buransky.plugins.scoverage.sensor.ScoverageSensor
import com.buransky.plugins.scoverage.widget.ScoverageWidget
import com.ncredinburgh.sonar.scalastyle.ScalastyleQualityProfile
import com.ncredinburgh.sonar.scalastyle.ScalastyleRepository
import com.ncredinburgh.sonar.scalastyle.ScalastyleSensor
import org.sonar.api.config.Settings
import org.sonar.api.Extension
import org.sonar.api.resources.AbstractLanguage
import org.sonar.api.SonarPlugin
import scalariform.lexer.ScalaLexer
import scalariform.lexer.Token


class ScalaPlugin extends SonarPlugin {

  override def getExtensions: java.util.List[Class[_]] =
    ListBuffer[Class[_]] (
      classOf[Scala],
      classOf[ScalaSensor],
      classOf[ScalastyleRepository],
      classOf[ScalastyleQualityProfile],
      classOf[ScalastyleSensor],
      classOf[ScalaMetrics],
      classOf[ScoverageSensor],
      classOf[ScoverageWidget]
    )

  override val toString = getClass.getSimpleName

} 
Example 46
Source File: WorkflowHistory.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.exec.utils

import com.flaminem.flamy.exec.utils.Workflow.Status

import scala.collection.mutable.ListBuffer


trait WorkflowHistory[T] extends Workflow[T]{

  private val history: ListBuffer[(T, Status)] = ListBuffer[(T, Status)]()

  override def todo(v: T): Unit = {
    history += v -> Workflow.Status.TODO
    super.todo(v)
  }

  override def running(v: T): Unit = {
    history += v -> Workflow.Status.RUNNING
    super.running(v)
  }

  override def successful(v: T): Unit = {
    history += v -> Workflow.Status.SUCCESSFUL
    super.successful(v)
  }

  override def failed(v: T): Unit = {
    history += v -> Workflow.Status.FAILED
    super.failed(v)
  }

  override def skipped(v: T): Unit = {
    history += v -> Workflow.Status.SKIPPED
    super.skipped(v)
  }

  override def interrupting(v: T): Unit = {
    history += v -> Workflow.Status.INTERRUPTING
    super.interrupting(v)
  }

  override def interrupted(v: T): Unit = {
    history += v -> Workflow.Status.INTERRUPTED
    super.interrupted(v)
  }

  def getHistory: Seq[(T, Status)] = history

} 
Example 47
Source File: AuthServiceJWT.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.auth

import java.util.concurrent.{CompletableFuture, CompletionStage}

import com.daml.lf.data.Ref
import com.daml.jwt.{JwtVerifier, JwtVerifierBase}
import com.daml.ledger.api.auth.AuthServiceJWT.Error
import io.grpc.Metadata
import org.slf4j.{Logger, LoggerFactory}
import spray.json._

import scala.collection.mutable.ListBuffer
import scala.util.Try


class AuthServiceJWT(verifier: JwtVerifierBase) extends AuthService {

  protected val logger: Logger = LoggerFactory.getLogger(AuthServiceJWT.getClass)

  override def decodeMetadata(headers: Metadata): CompletionStage[Claims] = {
    decodeAndParse(headers).fold(
      error => {
        logger.warn("Authorization error: " + error.message)
        CompletableFuture.completedFuture(Claims.empty)
      },
      token => CompletableFuture.completedFuture(payloadToClaims(token))
    )
  }

  private[this] def parsePayload(jwtPayload: String): Either[Error, AuthServiceJWTPayload] = {
    import AuthServiceJWTCodec.JsonImplicits._
    Try(JsonParser(jwtPayload).convertTo[AuthServiceJWTPayload]).toEither.left.map(t =>
      Error("Could not parse JWT token: " + t.getMessage))
  }

  private[this] def decodeAndParse(headers: Metadata): Either[Error, AuthServiceJWTPayload] = {
    val bearerTokenRegex = "Bearer (.*)".r

    for {
      headerValue <- Option
        .apply(headers.get(AUTHORIZATION_KEY))
        .toRight(Error("Authorization header not found"))
      token <- bearerTokenRegex
        .findFirstMatchIn(headerValue)
        .map(_.group(1))
        .toRight(Error("Authorization header does not use Bearer format"))
      decoded <- verifier
        .verify(com.daml.jwt.domain.Jwt(token))
        .toEither
        .left
        .map(e => Error("Could not verify JWT token: " + e.message))
      parsed <- parsePayload(decoded.payload)
    } yield parsed
  }

  private[this] def payloadToClaims(payload: AuthServiceJWTPayload): Claims = {
    val claims = ListBuffer[Claim]()

    // Any valid token authorizes the user to use public services
    claims.append(ClaimPublic)

    if (payload.admin)
      claims.append(ClaimAdmin)

    payload.actAs
      .foreach(party => claims.append(ClaimActAsParty(Ref.Party.assertFromString(party))))

    payload.readAs
      .foreach(party => claims.append(ClaimReadAsParty(Ref.Party.assertFromString(party))))

    Claims(
      claims = claims.toList,
      ledgerId = payload.ledgerId,
      participantId = payload.participantId,
      applicationId = payload.applicationId,
      expiration = payload.exp,
    )
  }
}

object AuthServiceJWT {
  final case class Error(message: String)

  def apply(verifier: com.auth0.jwt.interfaces.JWTVerifier) =
    new AuthServiceJWT(new JwtVerifier(verifier))

  def apply(verifier: JwtVerifierBase) =
    new AuthServiceJWT(verifier)
} 
Example 48
Source File: LedgerTestSuite.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testtool.infrastructure

import com.daml.ledger.api.testtool.infrastructure.Allocation.{ParticipantAllocation, Participants}
import com.daml.lf.data.Ref

import scala.collection.mutable.ListBuffer
import scala.concurrent.{ExecutionContext, Future}

private[testtool] abstract class LedgerTestSuite {
  val name: String = getClass.getSimpleName

  private val testCaseBuffer: ListBuffer[LedgerTestCase] = ListBuffer()

  final lazy val tests: Vector[LedgerTestCase] = testCaseBuffer.toVector

  protected final def test(
      shortIdentifier: String,
      description: String,
      participants: ParticipantAllocation,
      timeoutScale: Double = 1.0,
  )(testCase: ExecutionContext => Participants => Future[Unit]): Unit = {
    val shortIdentifierRef = Ref.LedgerString.assertFromString(shortIdentifier)
    testCaseBuffer.append(
      new LedgerTestCase(
        this,
        shortIdentifierRef,
        description,
        timeoutScale,
        participants,
        testCase,
      ),
    )
  }
} 
Example 49
Source File: FileBasedLedgerDataExporter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.export

import java.io.DataOutputStream
import java.time.Instant
import java.util.concurrent.locks.StampedLock

import com.daml.ledger.participant.state.v1.ParticipantId
import com.daml.ledger.validator.LedgerStateOperations.{Key, Value}
import com.google.protobuf.ByteString

import scala.collection.mutable
import scala.collection.mutable.ListBuffer


class FileBasedLedgerDataExporter(output: DataOutputStream) extends LedgerDataExporter {
  import FileBasedLedgerDataExporter._

  private val outputLock = new StampedLock

  private[export] val correlationIdMapping = mutable.Map.empty[String, String]
  private[export] val inProgressSubmissions = mutable.Map.empty[String, SubmissionInfo]
  private[export] val bufferedKeyValueDataPerCorrelationId =
    mutable.Map.empty[String, mutable.ListBuffer[(Key, Value)]]

  def addSubmission(
      submissionEnvelope: ByteString,
      correlationId: String,
      recordTimeInstant: Instant,
      participantId: ParticipantId): Unit =
    this.synchronized {
      inProgressSubmissions.put(
        correlationId,
        SubmissionInfo(submissionEnvelope, correlationId, recordTimeInstant, participantId))
      ()
    }

  def addParentChild(parentCorrelationId: String, childCorrelationId: String): Unit =
    this.synchronized {
      correlationIdMapping.put(childCorrelationId, parentCorrelationId)
      ()
    }

  def addToWriteSet(correlationId: String, data: Iterable[(Key, Value)]): Unit =
    this.synchronized {
      correlationIdMapping
        .get(correlationId)
        .foreach { parentCorrelationId =>
          val keyValuePairs = bufferedKeyValueDataPerCorrelationId
            .getOrElseUpdate(parentCorrelationId, ListBuffer.empty)
          keyValuePairs.appendAll(data)
          bufferedKeyValueDataPerCorrelationId.put(parentCorrelationId, keyValuePairs)
        }
    }

  def finishedProcessing(correlationId: String): Unit = {
    val (submissionInfo, bufferedData) = this.synchronized {
      (
        inProgressSubmissions.get(correlationId),
        bufferedKeyValueDataPerCorrelationId.get(correlationId))
    }
    submissionInfo.foreach { submission =>
      bufferedData.foreach(writeSubmissionData(submission, _))
      this.synchronized {
        inProgressSubmissions.remove(correlationId)
        bufferedKeyValueDataPerCorrelationId.remove(correlationId)
        correlationIdMapping
          .collect {
            case (key, value) if value == correlationId => key
          }
          .foreach(correlationIdMapping.remove)
      }
    }
  }

  private def writeSubmissionData(
      submissionInfo: SubmissionInfo,
      writeSet: ListBuffer[(Key, Value)]): Unit = {
    val stamp = outputLock.writeLock()
    try {
      Serialization.serializeEntry(submissionInfo, writeSet, output)
      output.flush()
    } finally {
      outputLock.unlock(stamp)
    }
  }
}

object FileBasedLedgerDataExporter {
  case class SubmissionInfo(
      submissionEnvelope: ByteString,
      correlationId: String,
      recordTimeInstant: Instant,
      participantId: ParticipantId)

  type WriteSet = Seq[(Key, Value)]
} 
Example 50
Source File: WriteRecordingLedgerStateOperations.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.tools

import com.daml.ledger.participant.state.kvutils.export.FileBasedLedgerDataExporter.WriteSet
import com.daml.ledger.validator.LedgerStateOperations
import com.daml.ledger.validator.LedgerStateOperations.{Key, Value}

import scala.collection.mutable.ListBuffer
import scala.concurrent.Future

class WriteRecordingLedgerStateOperations[LogResult](delegate: LedgerStateOperations[LogResult])
    extends LedgerStateOperations[LogResult]
    with QueryableWriteSet {
  private val recordedWriteSet = ListBuffer.empty[(Key, Value)]

  override def readState(key: Key): Future[Option[Value]] = delegate.readState(key)

  override def readState(keys: Seq[Key]): Future[Seq[Option[Value]]] = delegate.readState(keys)

  override def writeState(key: Key, value: Value): Future[Unit] = {
    this.synchronized(recordedWriteSet.append((key, value)))
    delegate.writeState(key, value)
  }

  override def writeState(keyValuePairs: Seq[(Key, Value)]): Future[Unit] = {
    this.synchronized(recordedWriteSet.appendAll(keyValuePairs))
    delegate.writeState(keyValuePairs)
  }

  override def appendToLog(key: Key, value: Value): Future[LogResult] = {
    this.synchronized(recordedWriteSet.append((key, value)))
    delegate.appendToLog(key, value)
  }

  override def getAndClearRecordedWriteSet(): WriteSet = {
    this.synchronized {
      val result = Seq(recordedWriteSet: _*)
      recordedWriteSet.clear()
      result
    }
  }
} 
Example 51
Source File: LoggerSimulation.scala    From BigData-News   with Apache License 2.0 5 votes vote down vote up
package com.vita.spark.utils

import java.io.PrintWriter
import java.net.ServerSocket

class LoggerSimulation {

}

object LoggerSimulation {

  var numIndex = 0

  /**
    * 生成一个字母
    *
    * @param 字母的下标
    * @return 生成的字母
    */
  def gennerateContent(index: Int): String = {
    import scala.collection.mutable.ListBuffer
    val charList = ListBuffer[Char]();
    for (i <- 65 to 90) {
      charList += i.toChar
    }
    val charArray = charList.toArray
    charArray(index).toString();
  }

  def gennerateNumber(): String = {
    //    numIndex += 1
    //    return numIndex.toString
    return "a,b,c,d,e,f"
  }

  /**
    * 生成随机下标
    *
    * @return 返回一个下标
    */
  def index = {
    import java.util.Random
    val rdm = new Random()
    rdm.nextInt(7)
  }

  /**
    * 启动一个main方法来创建一个serversockt发送消息
    *
    * @param args 端口,发送的时间间隔
    */
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      System.err.println("Usage:<port><millisecond>")
      System.exit(1);
    }

    val listener = new ServerSocket(args(0).toInt)
    println("已经做好连接的准备-------")
    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from:" + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream, true)
          while (true) {
            Thread.sleep(args(1).toLong)
            //            val content = gennerateContent(index)
            val content = gennerateNumber()
            println(content)
            out.write(content + "\n")
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
} 
Example 52
Source File: NumericParser.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import java.util.StringTokenizer

import scala.collection.mutable.{ArrayBuilder, ListBuffer}

import org.apache.spark.SparkException


  def parse(s: String): Any = {
    val tokenizer = new StringTokenizer(s, "()[],", true)
    if (tokenizer.hasMoreTokens()) {
      val token = tokenizer.nextToken()
      if (token == "(") {
        parseTuple(tokenizer)
      } else if (token == "[") {
        parseArray(tokenizer)
      } else {
        // expecting a number
        parseDouble(token)
      }
    } else {
      throw new SparkException(s"Cannot find any token from the input string.")
    }
  }

  private def parseArray(tokenizer: StringTokenizer): Array[Double] = {
    val values = ArrayBuilder.make[Double]
    var parsing = true
    var allowComma = false
    var token: String = null
    while (parsing && tokenizer.hasMoreTokens()) {
      token = tokenizer.nextToken()
      if (token == "]") {
        parsing = false
      } else if (token == ",") {
        if (allowComma) {
          allowComma = false
        } else {
          throw new SparkException("Found a ',' at a wrong position.")
        }
      } else {
        // expecting a number
        values += parseDouble(token)
        allowComma = true
      }
    }
    if (parsing) {
      throw new SparkException(s"An array must end with ']'.")
    }
    values.result()
  }

  private def parseTuple(tokenizer: StringTokenizer): Seq[_] = {
    val items = ListBuffer.empty[Any]
    var parsing = true
    var allowComma = false
    var token: String = null
    while (parsing && tokenizer.hasMoreTokens()) {
      token = tokenizer.nextToken()
      if (token == "(") {
        items.append(parseTuple(tokenizer))
        allowComma = true
      } else if (token == "[") {
        items.append(parseArray(tokenizer))
        allowComma = true
      } else if (token == ",") {
        if (allowComma) {
          allowComma = false
        } else {
          throw new SparkException("Found a ',' at a wrong position.")
        }
      } else if (token == ")") {
        parsing = false
      } else if (token.trim.isEmpty) {
          // ignore whitespaces between delim chars, e.g. ", ["
      } else {
        // expecting a number
        items.append(parseDouble(token))
        allowComma = true
      }
    }
    if (parsing) {
      throw new SparkException(s"A tuple must end with ')'.")
    }
    items
  }

  private def parseDouble(s: String): Double = {
    try {
      java.lang.Double.parseDouble(s)
    } catch {
      case e: NumberFormatException =>
        throw new SparkException(s"Cannot parse a double from: $s", e)
    }
  }
} 
Example 53
Source File: ClientArguments.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.net.{URI, URISyntaxException}

import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer

import org.apache.log4j.Level

import org.apache.spark.util.{IntParam, MemoryParam, Utils}


  private def printUsageAndExit(exitCode: Int) {
    // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars
    //       separately similar to in the YARN client.
    val usage =
     s"""
      |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options]
      |Usage: DriverClient kill <active-master> <driver-id>
      |
      |Options:
      |   -c CORES, --cores CORES        Number of cores to request (default: $DEFAULT_CORES)
      |   -m MEMORY, --memory MEMORY     Megabytes of memory to request (default: $DEFAULT_MEMORY)
      |   -s, --supervise                Whether to restart the driver on failure
      |                                  (default: $DEFAULT_SUPERVISE)
      |   -v, --verbose                  Print more debugging output
     """.stripMargin
    // scalastyle:off println
    System.err.println(usage)
    // scalastyle:on println
    System.exit(exitCode)
  }
}

private[deploy] object ClientArguments {
  val DEFAULT_CORES = 1
  val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB
  val DEFAULT_SUPERVISE = false

  def isValidJarUrl(s: String): Boolean = {
    try {
      val uri = new URI(s)
      uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar")
    } catch {
      case _: URISyntaxException => false
    }
  }
} 
Example 54
Source File: TaskInfo.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import scala.collection.mutable.ListBuffer

import org.apache.spark.TaskState
import org.apache.spark.TaskState.TaskState
import org.apache.spark.annotation.DeveloperApi


  var finishTime: Long = 0

  var failed = false

  var killed = false

  private[spark] def markGettingResult(time: Long = System.currentTimeMillis) {
    gettingResultTime = time
  }

  private[spark] def markFinished(state: TaskState, time: Long = System.currentTimeMillis) {
    finishTime = time
    if (state == TaskState.FAILED) {
      failed = true
    } else if (state == TaskState.KILLED) {
      killed = true
    }
  }

  def gettingResult: Boolean = gettingResultTime != 0

  def finished: Boolean = finishTime != 0

  def successful: Boolean = finished && !failed && !killed

  def running: Boolean = !finished

  def status: String = {
    if (running) {
      if (gettingResult) {
        "GET RESULT"
      } else {
        "RUNNING"
      }
    } else if (failed) {
      "FAILED"
    } else if (killed) {
      "KILLED"
    } else if (successful) {
      "SUCCESS"
    } else {
      "UNKNOWN"
    }
  }

  def id: String = s"$index.$attemptNumber"

  def duration: Long = {
    if (!finished) {
      throw new UnsupportedOperationException("duration() called on unfinished task")
    } else {
      finishTime - launchTime
    }
  }

  private[spark] def timeRunning(currentTime: Long): Long = currentTime - launchTime
} 
Example 55
Source File: Main.scala    From ros_hadoop   with Apache License 2.0 5 votes vote down vote up
package de.valtech.foss

import scala.io.Source
import scala.collection.mutable.Map
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
import Console.{GREEN, RED, RESET}
import scala.language.reflectiveCalls

import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.nio.channels.FileChannel.MapMode._
import java.nio.ByteOrder._
import java.nio.ByteBuffer

import de.valtech.foss.proto.RosbagIdxOuterClass.RosbagIdx

object Main extends App {
  def help() = {
    Console.err.printf(s"""
${RESET}${GREEN}Usage:
	--file <ros.bag> file to process
	--version print Rosbag version and exit
	--offset <offset> --number <records> Seek at offset < 1073741824 and read the specified number of records
${RESET}By default will just create the protobuf idx file needed for configuration.\n\n""")
    sys.exit(0)
  }

  val pargs = Map[String,AnyRef]()
  def process_cli(args: List[String]) :Boolean = args match {
    case Nil => true // parse success
    case "-v" :: rest => pargs += ("version" -> Some(true)); process_cli(rest)
    case "--version" :: rest => pargs += ("version" -> Some(true)); process_cli(rest)
    case "-f" :: x :: rest => pargs += ("file" -> x); process_cli(rest)
    case "--file" :: x :: rest => pargs += ("file" -> x); process_cli(rest)
    case "-n" :: x :: rest => pargs += ("number" -> Some(x.toInt)); process_cli(rest)
    case "--number" :: x :: rest => pargs += ("number" -> Some(x.toInt)); process_cli(rest)
    case "-o" :: x :: rest => pargs += ("offset" -> Some(x.toInt)); process_cli(rest)
    case "--offset" :: x :: rest => pargs += ("offset" -> Some(x.toInt)); process_cli(rest)
    case "-h" :: rest => help(); false
    case "--help" :: rest => help(); false
    case _ => Console.err.printf(s"${RESET}${RED}Unknown argument " + args.head); false
  }
  process_cli(args.toList)

  def use[T <: { def close() }]
    (resource: T)
    (code: T ⇒ Unit) =
    try
      code(resource)
    finally
      resource.close()

  pargs("file") match {
    case f:String => process()
    case _ => help()
  }

  def process(): Unit = {
    val fin = new File(pargs("file").asInstanceOf[String])
    use(new FileInputStream(fin)) { stream => {
      //printf("min: %s\n", Math.min(1073741824, fin.length) )
      val buffer = stream.getChannel.map(READ_ONLY, 0, Math.min(1073741824, fin.length)).order(LITTLE_ENDIAN)
      val p:RosbagParser = new RosbagParser(buffer)
      val version = p.read_version()
      val h = p.read_record().get
      if(pargs contains "version") {
        printf("%s\n%s\n\n", version, h)
        return
      }
      if(pargs contains "number"){
        buffer position pargs.getOrElse("offset",None).asInstanceOf[Option[Int]].getOrElse(0)
        for(i <- List.range(0,pargs("number").asInstanceOf[Option[Int]].getOrElse(0)))
          println(p.read_record)
        return
      }
      val idxpos = h.header.fields("index_pos").asInstanceOf[Long]
      //printf("idxpos: %s %s\n", idxpos, Math.min(1073741824, fin.length) )
      val b = stream.getChannel.map(READ_ONLY, idxpos, Math.min(1073741824, fin.length - idxpos)).order(LITTLE_ENDIAN)
      val pp:RosbagParser = new RosbagParser(b)
      val c = pp.read_connections(h.header, Nil)
      val chunk_idx = pp.read_chunk_infos(c)
      Console.err.printf(s"""${RESET}${GREEN}Found: """
          + chunk_idx.size
          +s""" chunks\n${RESET}It should be the same number reported by rosbag tool.\nIf you encounter any issues try reindexing your file and submit an issue.
          ${RESET}\n""")
      val fout = new FileOutputStream(pargs("file").asInstanceOf[String] + ".idx.bin")
      val builder = RosbagIdx.newBuilder
      for(i <- chunk_idx) builder.addArray(i)
      builder.build().writeTo(fout)
      fout.close()
      //printf("[%s]\n",chunk_idx.toArray.mkString(","))
    }}
  }
} 
Example 56
Source File: RunProcess.scala    From zorechka-bot   with MIT License 5 votes vote down vote up
package com.wix.zorechka.clients.process

import java.nio.file.Path

import zio.{Task, ZIO}

import scala.collection.mutable.ListBuffer
import scala.sys.process.{Process, ProcessLogger}

case class ClientOutput(value: List[String]) extends AnyVal

object RunProcess {
  def execCmd(command: List[String], workDir: Path, extraEnv: List[(String, String)] = List.empty): Task[ClientOutput] = ZIO.effect {
    val lb = ListBuffer.empty[String]
    val log = new ProcessLogger {
      override def out(s: => String): Unit = {
        println(s)
        lb.append(s)
      }
      override def err(s: => String): Unit = {
        println(s)
        lb.append(s)
      }
      override def buffer[T](f: => T): T = f
    }

    println(command.mkString(" "))
    val exitStatus = Process(command, Some(workDir.toFile), extraEnv: _*).!(log)
    if (exitStatus != 0 && exitStatus != 3)
      throw new IllegalStateException(s"Got status $exitStatus")
    ClientOutput(lb.result())
  }
} 
Example 57
Source File: UtilsTest.scala    From sparkMeasure   with Apache License 2.0 5 votes vote down vote up
package ch.cern.sparkmeasure

import java.io.File

import scala.collection.mutable.ListBuffer

import org.scalatest.{FlatSpec, Matchers}

class UtilsTest extends FlatSpec with Matchers {

  val stageVals0 = StageVals(jobId = 1, jobGroup = "test", stageId= 2, name = "stageVal",
    submissionTime = 10, completionTime = 11, stageDuration = 12, numTasks = 13,
    executorRunTime = 14, executorCpuTime = 15,
    executorDeserializeTime = 16, executorDeserializeCpuTime = 17,
    resultSerializationTime = 18, jvmGCTime = 19, resultSize = 20, numUpdatedBlockStatuses = 21,
    diskBytesSpilled = 30, memoryBytesSpilled = 31, peakExecutionMemory = 32, recordsRead = 33,
    bytesRead = 34, recordsWritten = 35, bytesWritten = 36,
    shuffleFetchWaitTime = 40, shuffleTotalBytesRead = 41, shuffleTotalBlocksFetched = 42,
    shuffleLocalBlocksFetched = 43, shuffleRemoteBlocksFetched = 44, shuffleWriteTime = 45,
    shuffleBytesWritten = 46, shuffleRecordsWritten = 47
  )

  val taskVals0 = TaskVals(jobId = 1, jobGroup = "test", stageId = 2, index = 3, launchTime = 4, finishTime = 5,
    duration = 10, schedulerDelay = 11, executorId = "exec0", host = "host0", taskLocality = 12,
    speculative = false, gettingResultTime = 12, successful = true,
    executorRunTime = 14, executorCpuTime = 15,
    executorDeserializeTime = 16, executorDeserializeCpuTime = 17,
    resultSerializationTime = 18, jvmGCTime = 19, resultSize = 20, numUpdatedBlockStatuses = 21,
    diskBytesSpilled = 30, memoryBytesSpilled = 31, peakExecutionMemory = 32, recordsRead = 33,
    bytesRead = 34, recordsWritten = 35, bytesWritten = 36,
    shuffleFetchWaitTime = 40, shuffleTotalBytesRead = 41, shuffleTotalBlocksFetched = 42,
    shuffleLocalBlocksFetched = 43, shuffleRemoteBlocksFetched = 44, shuffleWriteTime = 45,
    shuffleBytesWritten = 46, shuffleRecordsWritten = 47
  )

  it should "write and read back StageVal (Java Serialization)" in {
    val file = File.createTempFile("stageVal", ".tmp")
    try {
      IOUtils.writeSerialized(file.getAbsolutePath, ListBuffer(stageVals0))
      val stageVals = IOUtils.readSerializedStageMetrics(file.getAbsolutePath)
      stageVals should have length 1
      stageVals.head shouldEqual stageVals0
    } finally {
      file.delete()
    }
  }

  it should "write and read back TaskVal (Java Serialization)" in {
    val file = File.createTempFile("taskVal", ".tmp")
    try {
      IOUtils.writeSerialized(file.getAbsolutePath, ListBuffer(taskVals0))
      val taskVals = IOUtils.readSerializedTaskMetrics(file.getAbsolutePath)
      taskVals should have length 1
      taskVals.head shouldEqual taskVals0
    } finally {
      file.delete()
    }
  }

  it should "write and read back StageVal JSON" in {
    val file = File.createTempFile("stageVal", ".json")
    try {
      IOUtils.writeSerializedJSON(file.getAbsolutePath, ListBuffer(stageVals0))
      val stageVals = IOUtils.readSerializedStageMetricsJSON(file.getAbsolutePath)
      stageVals should have length 1
      stageVals.head shouldEqual stageVals0
    } finally {
      file.delete()
    }
  }

  it should "write and read back TaskVal JSON" in {
    val file = File.createTempFile("taskVal", ".json")
    try {
      IOUtils.writeSerializedJSON(file.getAbsolutePath, ListBuffer(taskVals0))
      val taskVals = IOUtils.readSerializedTaskMetricsJSON(file.getAbsolutePath)
      taskVals should have length 1
      taskVals.head shouldEqual taskVals0
    } finally {
      file.delete()
    }
  }

} 
Example 58
Source File: PrettyPrinterSpec.scala    From random-data-generator   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.randomdatagenerator.utils

import org.specs2.mock.Mockito
import org.specs2.mutable._
import org.specs2.specification.Scope
import scala.collection.mutable.ListBuffer

class PrettyPrinterSpec extends SpecificationLike with Mockito {

  abstract class PrettyPrinterSpecContext extends Scope {
    val logs: ListBuffer[String] = new ListBuffer
    private def append(log: String): Unit = synchronized { logs += log; () }

    val printer = {
      val mockPrintF = (log: String) => append(log)
      new PrettyPrinter(mockPrintF)
    }
  }

  "PrettyPrinter" should {

    "print an info message" in new PrettyPrinterSpecContext {
      val msg = "This is my info message"
      printer.info(msg)
      logs must haveSize(1)
      logs must_== ListBuffer(s"[info] [RandomDataGenerator] $msg")
    }

    "print an warning message" in new PrettyPrinterSpecContext {
      val msg = "This is my warning message"
      printer.warning(msg)
      logs must haveSize(1)
      logs must_== ListBuffer(s"[warn] [RandomDataGenerator] $msg")
    }

    "print an error message" in new PrettyPrinterSpecContext {
      val msg = "This is my error message"
      printer.error(msg)
      logs must haveSize(1)
      logs must_== ListBuffer(s"[error] [RandomDataGenerator] $msg")
    }

    "print an debug message" in new PrettyPrinterSpecContext {
      val msg = "This is my debug message"
      printer.debug(msg)
      logs must haveSize(1)
      logs must_== ListBuffer(s"[debug] [RandomDataGenerator] $msg")
    }
  }

} 
Example 59
Source File: Dictionary.scala    From topwords   with GNU General Public License v3.0 5 votes vote down vote up
package io.github.qf6101.topwords

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel

import scala.collection.mutable.ListBuffer


  def apply(corpus: RDD[String],
            tauL: Int,
            tauF: Int,
            useProbThld: Double): Dictionary = {
    //enumerate all the possible words: corpus -> words
    val words = corpus.flatMap { text =>
      val permutations = ListBuffer[String]()
      for (i <- 1 to tauL) {
        for (j <- 0 until text.length) {
          if (j + i <= text.length) permutations += text.substring(j, j + i)
        }
      }
      permutations
    }.map(_ -> 1).reduceByKey(_ + _).filter { case (word, freq) =>
      // leave the single characters in dictionary for smoothing reason even if they are low frequency
      word.length == 1 || freq >= tauF
    }.persist(StorageLevel.MEMORY_AND_DISK_SER_2)
    //filter words by the use probability threshold: words -> prunedWords
    val sumWordFreq = words.map(_._2).sum()
    val prunedWords = words.map { case (word, freq) =>
      (word, freq, freq / sumWordFreq)
    }.filter { case (word, _, theta) =>
      // leave the single characters in dictionary for smoothing reason even if they have small theta
      word.length == 1 || theta >= useProbThld
    }
    words.unpersist()
    prunedWords.persist(StorageLevel.MEMORY_AND_DISK_SER_2)
    //normalize the word use probability: prunedWords -> normalizedWords
    val sumPrunedWordFreq = prunedWords.map(_._2).sum()
    val normalizedWords = prunedWords.map { case (word, freq, _) =>
      word -> freq / sumPrunedWordFreq
    }.collectAsMap().toMap
    prunedWords.unpersist()
    //return the overcomplete dictionary: normalizedWords -> dictionary
    new Dictionary(normalizedWords)
  }
} 
Example 60
Source File: TopElementsAggregator.scala    From salt-core   with Apache License 2.0 5 votes vote down vote up
package software.uncharted.salt.core.analytic.collection

import software.uncharted.salt.core.analytic.Aggregator

import scala.collection.Map
import scala.collection.mutable.HashMap
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.{Map => MutableMap}
import scala.collection.mutable.PriorityQueue
import scala.reflect.ClassTag


class TopElementsAggregator[ET: ClassTag](elementLimit: Int)
extends Aggregator[Seq[ET], Map[ET, Int], List[(ET, Int)]] {

  def default(): Map[ET, Int] = {
    Map[ET, Int]()
  }

  override def add(current: Map[ET, Int], next: Option[Seq[ET]]): Map[ET, Int] = {
    if (next.isDefined) {
      // If our current map is mutable, add new data in directly.
      // If not, convert to a mutable map, and then add data in
      val sum = current match {
        case hm: MutableMap[ET, Int] => hm
        case _ => {
          // The current value isn't itself a mutable hashmap yet; convert to one.
          val hm = new HashMap[ET, Int]()
          hm ++= current
          hm
        }
      }
      next.get.foreach(t => sum.put(t, sum.getOrElse(t, 0) + 1))
      sum
    } else {
      current
    }
  }

  override def merge(left: Map[ET, Int], right: Map[ET, Int]): Map[ET, Int] = {
    // If either input map is mutable, merge the other into it.
    // If neither is, convert one to mutable, and add the other into it.
    val (to, from) = left match {
      case hm: MutableMap[ET, Int] => (hm, right)
      case _ =>
        right match {
          case hm: MutableMap[ET, Int] => (hm, left)
          case _ =>
            val hm = new HashMap[ET, Int]()
            hm ++= left
            (hm, right)
        }
    }
    from.foreach(t => {
      to.put(t._1, to.getOrElse(t._1, 0) + t._2)
    })
    to
  }

  override def finish(intermediate: Map[ET, Int]): List[(ET, Int)] = {
    val x = new PriorityQueue[(ET, Int)]()(Ordering.by(
      a => a._2
    ))
    intermediate.foreach(t => {
      x.enqueue(t)
    })
    var result = new ListBuffer[(ET, Int)]
    for (i <- 0 until Math.min(elementLimit, x.size)) {
      result.append(x.dequeue)
    }
    result.toList
  }
} 
Example 61
Source File: S2StreamQueryWriter.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.spark.sql.streaming

import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.{GraphElement, JSONParser}
import org.apache.s2graph.s2jobs.S2GraphHelper
import org.apache.s2graph.spark.sql.streaming.S2SinkConfigs._
import org.apache.spark.TaskContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.types.StructType
import play.api.libs.json.{JsObject, Json}

import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.Try

private [sql] class S2StreamQueryWriter(
                                         serializedConf:String,
                                         schema: StructType ,
                                         commitProtocol: S2CommitProtocol
                                       ) extends Serializable with Logger {
  private val config = ConfigFactory.parseString(serializedConf)
  private val s2Graph = S2GraphHelper.getS2Graph(config)
  private val encoder: ExpressionEncoder[Row] = RowEncoder(schema).resolveAndBind()
  private val RESERVED_COLUMN = Set("timestamp", "from", "to", "label", "operation", "elem", "direction")


  def run(taskContext: TaskContext, iters: Iterator[InternalRow]): TaskCommit = {
    val taskId = s"stage-${taskContext.stageId()}, partition-${taskContext.partitionId()}, attempt-${taskContext.taskAttemptId()}"
    val partitionId= taskContext.partitionId()

    val groupedSize = getConfigString(config, S2_SINK_GROUPED_SIZE, DEFAULT_GROUPED_SIZE).toInt
    val waitTime = getConfigString(config, S2_SINK_WAIT_TIME, DEFAULT_WAIT_TIME_SECONDS).toInt

    commitProtocol.initTask()
    try {
      var list = new ListBuffer[(String, Int)]()
      val rst = iters.flatMap(rowToEdge).grouped(groupedSize).flatMap{ elements =>
        logger.debug(s"[$taskId][elements] ${elements.size} (${elements.map(e => e.toLogString).mkString(",\n")})")
        elements.groupBy(_.serviceName).foreach{ case (service, elems) =>
          list += ((service, elems.size))
        }

        val mutateF = s2Graph.mutateElements(elements, true)
        Await.result(mutateF, Duration(waitTime, "seconds"))
      }

      val (success, fail) = rst.toSeq.partition(r => r.isSuccess)
      val counter = list.groupBy(_._1).map{ case (service, t) =>
        val sum = t.toList.map(_._2).sum
        (service, sum)
      }
      logger.info(s"[$taskId] success : ${success.size}, fail : ${fail.size} ($counter)")


      commitProtocol.commitTask(TaskState(partitionId, success.size, fail.size, counter))

    } catch {
      case t: Throwable =>
        commitProtocol.abortTask(TaskState(partitionId))
        throw t
    }
  }

  private def rowToEdge(internalRow:InternalRow): Option[GraphElement] =
    S2GraphHelper.sparkSqlRowToGraphElement(s2Graph, encoder.fromRow(internalRow), schema, RESERVED_COLUMN)
} 
Example 62
Source File: VertexDeserializable.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.core.storage.serde.vertex.wide

import org.apache.s2graph.core.schema.ColumnMeta
import org.apache.s2graph.core.storage.CanSKeyValue
import org.apache.s2graph.core.storage.serde.Deserializable
import org.apache.s2graph.core.storage.serde.StorageDeserializable._
import org.apache.s2graph.core.types.{HBaseType, InnerVal, InnerValLike, VertexId}
import org.apache.s2graph.core.{S2Graph, S2GraphLike, S2Vertex, S2VertexLike}

import scala.collection.mutable.ListBuffer

class VertexDeserializable(graph: S2GraphLike,
                           bytesToInt: (Array[Byte], Int) => Int = bytesToInt) extends Deserializable[S2VertexLike] {
  val builder = graph.elementBuilder
  def fromKeyValues[T: CanSKeyValue](_kvs: Seq[T],
                                          cacheElementOpt: Option[S2VertexLike]): Option[S2VertexLike] = {
    try {
      val kvs = _kvs.map { kv => implicitly[CanSKeyValue[T]].toSKeyValue(kv) }
      val kv = kvs.head
      val version = HBaseType.DEFAULT_VERSION
      val (vertexId, _) = VertexId.fromBytes(kv.row, 0, kv.row.length, version)

      var maxTs = Long.MinValue
      val propsMap = new collection.mutable.HashMap[ColumnMeta, InnerValLike]
      val belongLabelIds = new ListBuffer[Int]

      for {
        kv <- kvs
      } {
        val propKey =
          if (kv.qualifier.length == 1) kv.qualifier.head.toInt
          else bytesToInt(kv.qualifier, 0)

        val ts = kv.timestamp
        if (ts > maxTs) maxTs = ts

        if (S2Vertex.isLabelId(propKey)) {
          belongLabelIds += S2Vertex.toLabelId(propKey)
        } else {
          val v = kv.value
          val (value, _) = InnerVal.fromBytes(v, 0, v.length, version)
          val columnMeta = vertexId.column.metasMap(propKey)
          propsMap += (columnMeta -> value)
        }
      }
      assert(maxTs != Long.MinValue)
      val vertex = builder.newVertex(vertexId, maxTs, S2Vertex.EmptyProps, belongLabelIds = belongLabelIds)
      S2Vertex.fillPropsWithTs(vertex, propsMap.toMap)

      Option(vertex)
    } catch {
      case e: Exception => None
    }
  }
} 
Example 63
Source File: DimensionPropsTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.loader.core

import org.scalatest.{FunSuite, Matchers}

import scala.collection.mutable.ListBuffer

class DimensionPropsTest extends FunSuite with Matchers {
  test("makeRequestBody with Seq") {
    val requestBody =
      """
        |{
        |  "_from" => [[_from]]
        |}
      """.stripMargin
    val requestBodyExpected =
      """
        |{
        |  "_from" => 1
        |}
      """.stripMargin
    val requestBodyResult = DimensionProps.makeRequestBody(requestBody, Seq(("[[_from]]", "1")).toList)

    requestBodyResult shouldEqual requestBodyExpected
  }

  test("makeRequestBody with ListBuffer") {
    val requestBody =
      """
        |{
        |  "_from" => [[_from]]
        |}
      """.stripMargin
    val requestBodyExpected =
      """
        |{
        |  "_from" => 1
        |}
      """.stripMargin
    val requestBodyResult = DimensionProps.makeRequestBody(requestBody, ListBuffer(("[[_from]]", "1")).toList)

    requestBodyResult shouldEqual requestBodyExpected
  }
} 
Example 64
Source File: Entity.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.util


import org.apache.spark.sql.Row

import scala.collection.mutable.ListBuffer

class Entity( id : String,
              label : String,
              prop : Map[String, Array[String]],
              val schema : Seq[String]
            ) extends Serializable {
  var propSeq : Array[AnyRef] = {
    var ret : ListBuffer[AnyRef] = new ListBuffer[AnyRef]()
    ret +:= id
    val l =  for (name <- schema) yield {
      val value : Array[String] = {
        if (prop.contains(name))
          prop(name)
        else
          Array("")
      }
      val str = value
        .map(f => if (f == "") "\"\"" else f)
        .map(_.replaceAll(";", " "))
        .reduce((a,b) => a + ";" + b)

      if (str.contains(";")) str.split(";")
      else str
    }
    ret ++= l
    ret += label
    ret.map {
      case c: String =>
        if (c.contains(","))
          "\"" + c + "\""
        else
          c
      case c: Array[String] => c.asInstanceOf[Array[String]].map(a => if (a.contains(",")) "\"" + a + "\"" else a)
      case _ => ""
    }.toArray
  }

  override def toString: String = {
    val list = this.propSeq
    val ret = for (c <- list) yield {
      c match {
        case c : String => c.asInstanceOf[String]
        case c : Array[String] => c.asInstanceOf[Array[String]].reduce(_ + ";" + _)
        case _ => ""
      }
    }
    ret.reduce(_ + "," + _)
  }

  def getEntityRow : Row = {
    Row(propSeq)
  }
}
object Entity{


  def main(args: Array[String]): Unit = {
//    val m : Map[String, Any] = Map("sc1" -> "test1", "sc2" -> Array("2","1"))
//    val e = new Entity("id1","l1", m, Array("sc1","sc2"))
//    println(e.toString)  //"label1","test1","2;1","id2"
  }
} 
Example 65
Source File: NaiveBayesTest.scala    From AI   with Apache License 2.0 5 votes vote down vote up
package com.bigchange.test

import com.bigchange.datamining.CustomNaiveBayes

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.io.Source


  def main(args: Array[String]) {

    // val Array(dataPath) = args
    val data = Source.fromFile("src/main/resources/nbData/i100-i500").getLines().toList
    // 可实现打完包后读取jar包中对应文件数据
    val data2 = Source.fromInputStream(this.getClass.getResourceAsStream("src/main/resources/nbData/i100-i500")).getLines().toList
    // 十折交叉验证(index,List(item1,item2))
    val splitData  = data.zipWithIndex.map(x => (x._2 % 10,x._1)).groupBy(_._1).mapValues(x => x.map(_._2))
    val modelMap = new mutable.HashMap[Int,String]()

    val model = CustomNaiveBayes.model(0, splitData)
    var list = List((0,model))

    for (id <- 1 until 10) {
      // 训练
      val model = CustomNaiveBayes.model(id, splitData)
      list = list ::: List((id,model))

    }

    // 分类
    val listP = new ListBuffer[(String, Double)]
    list.foreach(x => {
      println("model:" + x)
      val pred = CustomNaiveBayes.predict(Array("health", "moderate", "moderate1", "yes"), x._2)
     listP.+=(pred)
    })
    println("tobe:"+ listP)
    println("tobe:"+ listP.max)

  }

} 
Example 66
Source File: SparkRDDTest.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.spark.rdd

import com.basho.riak.client.core.query.Location
import com.basho.riak.spark._
import com.basho.riak.spark.rdd.connector.RiakConnector
import org.apache.spark.rdd.RDD
import org.junit.Assert._
import org.junit.experimental.categories.Category
import org.junit.{Before, Test}

import scala.collection.mutable.ListBuffer

case class UserData(timestamp: String, user_id: String)

@Category(Array(classOf[RiakCommonTests]))
class SparkRDDTest extends AbstractRiakSparkTest {
  private val CREATION_INDEX = "creationNo"

  protected override val jsonData = Option(
    """ [
      |   {key: 'key-1', indexes: {creationNo: 1}, value: {timestamp: '2014-11-24T13:14:04.823Z', user_id: 'u1'}},
      |   {key: 'key-2', indexes: {creationNo: 2}, value: {timestamp: '2014-11-24T13:15:04.823Z', user_id: 'u1'}},
      |   {key: 'key-3', indexes: {creationNo: 3}, value: {timestamp: '2014-11-24T13:18:04', user_id: 'u1'}},
      |   {key: 'key-4', indexes: {creationNo: 4}, value: {timestamp: '2014-11-24T13:14:04Z', user_id: 'u2'}},
      |   {key: 'key-5', indexes: {creationNo: 5}, value: {timestamp: '2014-11-24T13:16:04.823Z', user_id: 'u3'}},
      |   {key: 'key-6', indexes: {creationNo: 6}, value: {timestamp: '2014-11-24T13:21:04.823Z', user_id: 'u3'}}
      | ]
    """.stripMargin)

  var rdd: RDD[UserData] = _

  protected override def initSparkConf() = super.initSparkConf()
      .setAppName("RDD tests")

  @Before
  def initializeRDD(): Unit ={
    rdd = sc.riakBucket[UserData](DEFAULT_NAMESPACE.getBucketNameAsString)
      .query2iRange(CREATION_INDEX, 1, 6)
  }

  @Test
  def calculateCount(): Unit = {
    val count = rdd.count()
    assertEquals(6, count) // scalastyle:ignore
  }

  @Test
  def firstElement(): Unit ={
    // Initial implementation fails on this operation
    val first = rdd.first()
  }

  
  private def calculateUserOrderedTotals() = {
    rdd.map(x => (x.user_id, 1))
      .reduceByKey((a, b) => a + b)
      .sortBy(_._1)
  }

  @Test
  def checkActions(): Unit ={
    val perUserTotalRDD = calculateUserOrderedTotals()
    val data = perUserTotalRDD.collect()
    assertEqualsUsingJSON("[['u1',3],['u2',1],['u3',2]]", data)
  }

  @Test
  def storePairRDDWithDefaultMapper(): Unit = {
    val perUserTotalRDD = calculateUserOrderedTotals()
    perUserTotalRDD.saveToRiak(DEFAULT_NAMESPACE_4STORE.getBucketNameAsString)

    // Read data from riak and populate data buffer
    val data =  ListBuffer[(String,Long)]()
    RiakConnector(sc.getConf).withSessionDo { session => {
      foreachKeyInBucket(session.unwrap(), DEFAULT_NAMESPACE_4STORE, (RiakConnector, l: Location) =>{
        val v = readByLocation[Long](session.unwrap(), l)
        data += ((l.getKeyAsString,v))
      })
    }}
    assertEquals(3, data.size)
  }
} 
Example 67
Source File: AbstractRiakSparkTest.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.spark.rdd

import com.basho.riak.client.api.RiakClient
import com.basho.riak.client.core.RiakNode
import com.basho.riak.client.core.query.{Location, Namespace, RiakObject}
import com.basho.riak.client.core.util.HostAndPort
import com.basho.riak.test.cluster.DockerRiakCluster
import com.basho.riak.test.rule.DockerRiakClusterRule
import org.apache.spark.SparkContext
import org.junit.After

import scala.collection.mutable.ListBuffer
import scala.reflect.ClassTag
import com.basho.riak.spark.rdd.AbstractRiakSparkTest._
import com.basho.riak.spark.rdd.mapper.ReadValueDataMapper
import org.apache.spark.SparkConf
import org.junit.ClassRule

import scala.collection.JavaConversions._


abstract class AbstractRiakSparkTest extends AbstractRiakTest {
  // SparkContext, created per test case
  protected var sc: SparkContext = _

  protected override def riakHosts: Set[HostAndPort] =  HostAndPort.hostsFromString(
    dockerCluster.enabled() match {
      case true => dockerCluster.getIps.mkString(",")
      case _ => System.getProperty(RIAK_PBCHOST_PROPERTY, RiakNode.Builder.DEFAULT_REMOTE_ADDRESS)
    }, RiakNode.Builder.DEFAULT_REMOTE_PORT).toSet

  protected def initSparkConf(): SparkConf = new SparkConf(false)
    .setMaster("local[2]")
    .setAppName(getClass.getSimpleName)
    .set("spark.riak.write.replicas", "1")
    .set("spark.riak.input.fetch-size", "2")
    .set("spark.riak.connection.host", riakHosts.map(hp => s"${hp.getHost}:${hp.getPort}").mkString(","))

  override def initialize(): Unit = {
    super.initialize()
    sc = createSparkContext(initSparkConf())
  }

  protected def createSparkContext(conf: SparkConf): SparkContext = new SparkContext(conf)

  @After
  def destroySparkContext(): Unit = Option(sc).foreach(x => x.stop())

  protected def fetchAllFromBucket(ns: Namespace): List[(String, String)] = {
    val data = ListBuffer[(String, String)]()
    withRiakDo(session =>
      foreachKeyInBucket(session, ns, (client, l: Location) => {
        val v = readByLocation[String](client, l)
        data += l.getKeyAsString -> v
      })
    )
    data.toList
  }

  protected def readByLocation[T: ClassTag](riakSession: RiakClient, location: Location): T = {
    readByLocation(riakSession, location, (l: Location, ro: RiakObject) => ReadValueDataMapper.mapValue[T](l, ro))
  }

  protected def stringify = (s: Array[String]) => s.mkString("[", ",", "]")
}

object AbstractRiakSparkTest {
  val RIAK_PBCHOST_PROPERTY = "com.basho.riak.pbchost"

  @ClassRule
  def dockerCluster: DockerRiakClusterRule = _dockerCluster

  val _dockerCluster: DockerRiakClusterRule = new DockerRiakClusterRule(DockerRiakCluster.builder()
    .withNodes(1)
    .withTimeout(2)
    .withForcePull(false),
    System.getProperties.containsKey(RIAK_PBCHOST_PROPERTY))
} 
Example 68
Source File: WikipediaToDBpediaClosure.scala    From dbpedia-spotlight-model   with Apache License 2.0 5 votes vote down vote up
package org.dbpedia.spotlight.db

import org.semanticweb.yars.nx.parser.NxParser
import java.io.InputStream
import org.dbpedia.spotlight.log.SpotlightLog
import collection.immutable.ListSet
import scala.Predef._
import org.dbpedia.spotlight.exceptions.NotADBpediaResourceException
import java.net.URLDecoder
import org.dbpedia.spotlight.model.SpotlightConfiguration
import org.dbpedia.extraction.util.WikiUtil
import scala.collection.mutable.ListBuffer


  def wikipediaToDBpediaURI(url: String): String = {

    val uri = if(url.startsWith("http:")) {
      getEndOfChainURI(decodedNameFromURL(url))
    } else {
      getEndOfChainURI(decodeURL(url))
    }

    if (disambiguationsSet.contains(uri) || uri == null)
      throw new NotADBpediaResourceException("Resource is a disambiguation page.")
    else
      uri
  }

  def getEndOfChainURI(uri: String): String = getEndOfChainURI(uri, Set(uri))

  private def getEndOfChainURI(uri: String, alreadyTraversed:Set[String]): String = linkMap.get(uri) match {
    case Some(s: String) => if (alreadyTraversed.contains(s)) uri else getEndOfChainURI(s, alreadyTraversed + s)
    case None => uri
  }

} 
Example 69
Source File: Implicit_1_Class.scala    From HadoopLearning   with MIT License 5 votes vote down vote up
package com.c503.scala

import java.io.{BufferedReader, File, FileReader}

import scala.collection.mutable.ListBuffer


  implicit class Files(file: File) {
    def lines: List[String] = {
      val fileReader = new FileReader(file)
      val reader = new BufferedReader(fileReader)
      try {
        var lines = ListBuffer[String]()
        var line = reader.readLine()
        while (line != null) {
          lines = lines :+ line
          line = reader.readLine()
        }
        lines.toList
      } finally {
        if (fileReader != null) {
          fileReader.close()
        }
        if (reader != null) {
          reader.close()
        }
      }
    }
  }

  def main(args: Array[String]): Unit = {

    val file = new File("")
    file.lines.foreach(e => {
      println(e)
    })

  }

} 
Example 70
Source File: _10_MutableCollections.scala    From LearningScala   with Apache License 2.0 5 votes vote down vote up
package _020_collections


object _10_MutableCollections {
  def main(args: Array[String]): Unit = {
    println("===== List buffers =====")
    listBufferExample()
    println()

    println("===== Array buffers =====")
    println(arrayBufferExample())
    println()

    println("===== Mutable Sets =====")
    mutableSetExample()
    println()

    println("===== Mutable Maps =====")
    mutableMapExample()
  }

  private def mutableMapExample(): Unit = {
    import scala.collection.mutable
    val map = mutable.Map.empty[String, Int]
    println(map)
    map("hello") = 1
    map("there") = 2
    println(map)
    println(map("hello"))
    println("======")
    val nums = mutable.Map("i" -> 1, "ii" -> 2)
    println(nums)
    nums += ("vi" -> 6)
    println(nums)
    nums -= "ii"
    println(nums)
    nums ++= List("iii" -> 3, "v" -> 5)
    println(nums)
    nums --= List("i", "ii")
    println(nums)
    println("=====")
    println(s"nums.size: ${nums.size}")
    print("nums.contains(\"ii\"): ")
    println(nums.contains("ii"))
    print("nums(\"iii\"): ")
    println(nums("iii"))
    println(s"nums.keys ==> ${nums.keys}")
    println(s"nums.keySet ==> ${nums.keySet}")
    println(s"nums.values ==> ${nums.values}")
    println(s"nums.isEmpty: ${nums.isEmpty}")
  }

  def arrayBufferExample(): List[Int] = {
    import scala.collection.mutable.ArrayBuffer
    val ab = ArrayBuffer[Int](10, 20)
    ab += 30
    ab += 40
    ab.prepend(5)
    ab.toList //return immutable
  }

  private def listBufferExample(): Unit = {
    import scala.collection.mutable.ListBuffer
    val listBuffer = new ListBuffer[Int]
    listBuffer += 1
    listBuffer += 2
    println(listBuffer)
    3 +=: listBuffer
    println(listBuffer)
    val list = listBuffer.toList
    println(list)
  }

  private def mutableSetExample(): Unit = {
    import scala.collection.mutable
    val emptySet = mutable.Set.empty[Int]
    println(emptySet)
    val nums = mutable.Set(1, 2, 3)
    println(nums)
    nums += 5
    println(nums)
    nums -= 3
    println(nums)
    nums ++= List(5, 6)
    println(nums)
    nums --= List(1, 2)
    println(nums)
    println(nums & Set(1, 3, 5, 7)) // intersection of two sets
    nums.clear()
    println(nums)
  }
} 
Example 71
Source File: GameStatsTableGenerator.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.journalist.stats

import liigavoitto.journalist.MatchData
import liigavoitto.journalist.text.CommonImplicits
import liigavoitto.journalist.values.MatchDataValues
import liigavoitto.transform._

import scala.collection.mutable.ListBuffer

case class TableBlock(rows: List[TableRow], `type`: String) extends ContentBlock

case class TableRow(cells: List[TableCell], `type`: String = "table-row")

case class TableCell(text: String, `type`: String = "text")

class GameStatsTableGenerator(matchData: MatchData, language: String) extends CommonImplicits {

  val lang = language
  val values = MatchDataValues(matchData, lang)

  private val filePath = "template/stats/stats.edn"

  private def tmpl = getTemplateFn(filePath)

  def getTable: List[ContentBlock] =
    List(heading, table, attendance)

  val homeGroupedPenalties = createPenaltiesText(values.homePenaltyMinutesGrouped)
  val awayGroupedPenalties = createPenaltiesText(values.awayPenaltyMinutesGrouped)

  private def createPenaltiesText(minutes: Map[String, Int]): String = {
    if (!minutes.isEmpty) minutes.map(m => s"${m._2} x ${m._1} min + ").mkString.dropRight(3) + " = "
    else ""
  }

  def heading = HeadingBlock(3, tmpl("heading").head.template, "heading")

  def attendance = TextBlock(attendanceText, "text")

  def table =
    TableBlock(
      List(titleRow) ++
      List(shotsRowOption).flatten ++
      List(penaltyMinutesRow, savesRow) ++
      List(faceoffsRowOption).flatten
    , "table")

  def titleRow = TableRow(List(
    TableCell("**" + values.home.name + "**"),
    TableCell(""),
    TableCell("**" + values.away.name + "**")
  ))

  def shotsRow = TableRow(List(
    TableCell(values.homeShotsOnGoal.toString),
    TableCell("**" + tmpl("shots").head.template + "**"),
    TableCell(values.awayShotsOnGoal.toString)
  ))

  def shotsRowOption: Option[TableRow] = values.homeShotsOnGoal + values.awayShotsOnGoal match {
    case 0 => None
    case _ => Some(shotsRow)
  }

  def penaltyMinutesRow = TableRow(List(
    TableCell(s"${homeGroupedPenalties}${values.homePenaltyMinutesTotal} min"),
    TableCell("**" + tmpl("penalties").head.template + "**"),
    TableCell(s"${awayGroupedPenalties}${values.awayPenaltyMinutesTotal} min")
  ))

  def savesRow = TableRow(List(
    TableCell(values.homeGoalieSavesCombinedByPeriod.mkString(" + ") + " = " + values.homeGoalieSavesTotal.toString),
    TableCell("**" + tmpl("saves").head.template + "**"),
    TableCell(values.awayGoalieSavesCombinedByPeriod.mkString(" + ") + " = " + values.awayGoalieSavesTotal.toString)
  ))

  def faceoffsRow = TableRow(List(
    TableCell(values.homeFaceOffWins.toString + " (" + values.homeFaceOffPercentage.round.toString + " %)"),
    TableCell("**" + tmpl("faceoffs").head.template + "**"),
    TableCell(values.awayFaceOffWins.toString + " (" + values.awayFaceOffPercentage.round.toString + " %)")
  ))

  def faceoffsRowOption: Option[TableRow] = values.homeFaceOffWins + values.awayFaceOffWins match {
    case 0 => None
    case _ => Some(faceoffsRow)
  }

  def attendanceText = "**" + tmpl("attendance").head.template + ":** " + values.attendance.toString + " (" + values.venue.toString + ")"
} 
Example 72
Source File: InputDataFlow.scala    From spark-graphx   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.graphx.pregel.social

import org.apache.spark.graphx.{Edge, VertexId}

import scala.collection.mutable.ListBuffer

object InputDataFlow {

  def parseNames(line: String): Option[(VertexId, String)] = {
    val fields = line.split('\t')
    if (fields.length > 1)
      Some(fields(0).trim().toLong, fields(1))
    else None
  }

  def makeEdges(line: String): List[Edge[Int]] = {
    var edges = new ListBuffer[Edge[Int]]()
    val fields = line.split(" ")
    val origin = fields(0)
    (1 until fields.length)
      .foreach { p =>
        edges += Edge(origin.toLong, fields(p).toLong, 0)
      }
    edges.toList
  }

} 
Example 73
Source File: TextMessageGenerator.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.kafka

import java.util.concurrent.ThreadLocalRandom

import scala.collection.mutable.ListBuffer


object TextMessageGenerator {
  val alphabetSet: Set[Char] = ('a' to 'z').toSet
  val alphabets = alphabetSet.toList
  val vowelSet: Set[Char] = Set('a', 'e', 'i', 'o', 'u')
  val vowels = vowelSet.toList
  val consonantSet: Set[Char] = alphabetSet -- vowelSet
  val consonants = consonantSet.toList

  // Subset of Punct character class """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
  val puncts: String = """.,;?!"""

  def random = ThreadLocalRandom.current

  def randomChar: Char = alphabets(random.nextInt(0, alphabets.length))

  def mostlyVowelChar: Char = {
    // 4/5th chance of vowel
    val isVowel: Boolean = if (random.nextInt(0, 5) > 0) true else false
    if (isVowel) vowels(random.nextInt(0, vowels.length)) else consonants(random.nextInt(0, consonants.length))
  }

  def maybeUpperChar: Char = {
    // 1/5th chance of uppercase
    val isUppercase: Boolean = if (random.nextInt(0, 5) == 0) true else false
    if (isUppercase) Character.toUpperCase(randomChar) else randomChar
  }

  // Generate a word within a range of lengths
  def genRandWord(minLen: Int, maxLen: Int): String = {
    var word = new ListBuffer[Char]()

    val wordLen: Int = random.nextInt(minLen, maxLen + 1)

    for (i <- 1 to wordLen) {
      val char = if (i == 1) maybeUpperChar else if (i % 2 == 0) mostlyVowelChar else randomChar
      word += char
    }

    word.mkString
  }

  def genRandTextWithKeyword(minWordsInText: Int, maxWordsInText: Int,
                  minWordLen: Int = 2, maxWordLen: Int = 8,
                  minWordsInClause: Int = 1, maxWordsInClause: Int = 10, keyword: String
                 ): String = {

    val randomLevel: Double = 0.05
    var text = new ListBuffer[String]()

    val numWordsInText: Int = random.nextInt(minWordsInText, maxWordsInText + 1)

    var wordCount: Int = 0
    var textLen: Int = 0

    while (wordCount < numWordsInText) {
      val numWords = random.nextInt(minWordsInClause, maxWordsInClause + 1)

      val numWordsInClause = if (numWordsInText - wordCount < numWords) numWordsInText - wordCount else
        numWords

      var clauseLen: Int = 0

      // Generate a clause
      for (i <- 1 to numWordsInClause) {
        val word: String = genRandWord(minWordLen, maxWordLen)
        text += word

        if (math.random < randomLevel) text += " " + keyword

        clauseLen += word.length
        wordCount += 1

        if (i < numWordsInClause) {
          text += " "
          clauseLen += 1
        }
      }

      // Add a punctuation
      text += puncts.charAt(random.nextInt(0, puncts.length)).toString
      clauseLen += 1

      if (wordCount < numWordsInText) {
        text += " "
        clauseLen += 1
      }

      textLen += clauseLen
    }

    // println(s"textLen (in chars): is $textLen")
    text.mkString
  }
} 
Example 74
Source File: Tree.scala    From spatial   with MIT License 5 votes vote down vote up
package utils

import scala.collection.mutable.ListBuffer

object Tree {
  
  def LCAWithPaths[T](x: T, y: T)(parent: T => T): (T, Seq[T], Seq[T]) = {
    getLCAWithPaths(x,y)(parent).getOrElse{
      throw new Exception(s"""No LCA for $x (${parent(x)}) and $y (${parent(y)})""")
    }
  }

  def getLCAWithPaths[T](x: T, y: T)(parent: T => T): Option[(T, Seq[T], Seq[T])] = {
    val pathX = ancestors(x)(parent)
    val pathY = ancestors(y)(parent)
    val lca = pathX.zip(pathY).reverse.find{case (a,b) => a == b}.map(_._1)

    lca match {
      case Some(ctrl) =>
        // Choose last node where paths are the same
        val pathToX = pathX.drop(pathX.indexOf(ctrl))
        val pathToY = pathY.drop(pathY.indexOf(ctrl))
        Some(ctrl,pathToX,pathToY)

      case None => None
    }
  }

  def LCA[T](x: T, y: T)(parent: T => T): T = LCAWithPaths(x,y)(parent)._1

  def getLCA[T](x: T, y: T)(parent: T => T): Option[T] = getLCAWithPaths(x,y)(parent).map(_._1)

} 
Example 75
Source File: Subprocess.scala    From spatial   with MIT License 5 votes vote down vote up
package utils.process

import java.io._

import scala.collection.mutable.ListBuffer

class Subprocess(args: String*)(react: (String,BufferedReader) => Option[String]) {
  private var reader: BufferedReader = _
  private var writer: BufferedWriter = _
  private var logger: BufferedReader = _
  private var p: Process = _

  private def println(x: String): Unit = {
    writer.write(x)
    writer.newLine()
    writer.flush()
  }

  def isAlive: Boolean = p.isAlive

  
  def errors(): List[String] = {
    val lines = ListBuffer[String]()
    while (logger.ready()) {
      lines += logger.readLine()
    }
    lines.toList
  }
  def stdout(): List[String] = {
    val lines = ListBuffer[String]()
    while (reader.ready()) {
      lines += reader.readLine()
    }
    lines.toList
  }

  def run(dir: String = ""): Unit = if (p eq null) {
    val pb = new ProcessBuilder(args:_*)
    if (dir.nonEmpty) pb.directory(new File(dir))
    p = pb.start()
    reader = new BufferedReader(new InputStreamReader(p.getInputStream))
    writer = new BufferedWriter(new OutputStreamWriter(p.getOutputStream))
    logger = new BufferedReader(new InputStreamReader(p.getErrorStream))
  } else {
    throw new Exception(s"Cannot run process $args while it is already running.")
  }

  def send(line: String): Unit = println(line)

  def block(dir: String = ""): Int = {
    if (p eq null) run(dir)
    var isConnected = true
    while (isConnected) {
      // Otherwise react to the stdout of the subprocess
      val input = reader.readLine()
      if (input ne null) {
        val response = react(input,reader)
        response.foreach{r => println(r) }
      }
      else {
        // TODO[5]: What to do when process ended unexpectedly?
        isConnected = false // Process ended
      }
    }
    p.waitFor() // This is a java.lang.Process
  }

  def blockAndReturnOut(dir: String = ""): (Seq[String],Seq[String]) = {
    if (p eq null) run(dir)
    p.waitFor()
    var lines: Seq[String] = Nil
    var errs: Seq[String] = Nil
    var line = ""
    while (reader.ready && (line ne null)) {
      line = reader.readLine()
      if (line ne null) lines = line +: lines
    }
    line = ""
    while (logger.ready && (line ne null)) {
      line = logger.readLine()
      if (line ne null) errs = line +: errs
    }
    (lines.reverse, errs.reverse)
  }

  def kill(): Unit = if (p eq null) () else p.destroyForcibly()
} 
Example 76
Source File: WikipediaSuggest.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package suggestions
package gui

import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.swing._
import scala.util.{ Try, Success, Failure }
import scala.swing.event._
import swing.Swing._
import javax.swing.UIManager
import Orientation._
import rx.subscriptions.CompositeSubscription
import rx.lang.scala.Observable
import rx.lang.scala.Subscription
import observablex._
import search._

object WikipediaSuggest extends SimpleSwingApplication with ConcreteSwingApi with ConcreteWikipediaApi {

  {
    try {
      UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName())
    } catch {
      case t: Throwable =>
    }
  }

  def top = new MainFrame {

    

    // TO IMPLEMENT
    val searchTerms: Observable[String] = ???

    // TO IMPLEMENT
    val suggestions: Observable[Try[List[String]]] = ???

    // TO IMPLEMENT
    val suggestionSubscription: Subscription =  suggestions.observeOn(eventScheduler) subscribe {
      x => ???
    }

    // TO IMPLEMENT
    val selections: Observable[String] = ???

    // TO IMPLEMENT
    val pages: Observable[Try[String]] = ???

    // TO IMPLEMENT
    val pageSubscription: Subscription = pages.observeOn(eventScheduler) subscribe {
      x => ???
    }

  }

}


trait ConcreteWikipediaApi extends WikipediaApi {
  def wikipediaSuggestion(term: String) = Search.wikipediaSuggestion(term)
  def wikipediaPage(term: String) = Search.wikipediaPage(term)
}


trait ConcreteSwingApi extends SwingApi {
  type ValueChanged = scala.swing.event.ValueChanged
  object ValueChanged {
    def unapply(x: Event) = x match {
      case vc: ValueChanged => Some(vc.source.asInstanceOf[TextField])
      case _ => None
    }
  }
  type ButtonClicked = scala.swing.event.ButtonClicked
  object ButtonClicked {
    def unapply(x: Event) = x match {
      case bc: ButtonClicked => Some(bc.source.asInstanceOf[Button])
      case _ => None
    }
  }
  type TextField = scala.swing.TextField
  type Button = scala.swing.Button
} 
Example 77
Source File: MainchainSynchronizer.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.forge

import com.horizen.SidechainHistory
import com.horizen.block.MainchainBlockReference
import com.horizen.chain.{MainchainHeaderHash, byteArrayToMainchainHeaderHash}
import com.horizen.utils.BytesUtils
import com.horizen.websocket.MainchainNodeChannel
import com.horizen.utils._

import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Success, Try}

class MainchainSynchronizer(mainchainNodeChannel: MainchainNodeChannel) {
  // Get divergent mainchain suffix between SC Node and MC Node
  // Return last common header with height + divergent suffix
  def getMainchainDivergentSuffix(history: SidechainHistory, limit: Int): Try[(Int, Seq[MainchainHeaderHash])] = Try {
    val (_: Int, commonHashHex: String) = getMainchainCommonBlockHashAndHeight(history).get
    mainchainNodeChannel.getNewBlockHashes(Seq(commonHashHex), limit) match {
      case Success((height, hashes)) => (height, hashes.map(hex => byteArrayToMainchainHeaderHash(BytesUtils.fromHexString(hex))))
      case Failure(ex) => throw ex
    }
  }

  // Return common block height and hash as a hex string.
  def getMainchainCommonBlockHashAndHeight(history: SidechainHistory): Try[(Int, String)] = Try {
    // Bitcoin-style Locator is ordered from tip to genesis
    val locatorHashes: Seq[String] = history.getMainchainHashesLocator.map(baw => BytesUtils.toHexString(baw.data))
    val (commonHeight, commonHashHex) = mainchainNodeChannel.getBestCommonPoint(locatorHashes).get
    val commonHash: MainchainHeaderHash = byteArrayToMainchainHeaderHash(BytesUtils.fromHexString(commonHashHex))

    if(commonHashHex == locatorHashes.head) {
      // No orphan mainchain blocks -> return result as is
      (commonHeight, commonHashHex)
    } else {
      // Orphan mainchain blocks present
      // Check if there is more recent common block, that was not a part of locatorHashes
      val commonHashLocatorIndex: Int = locatorHashes.indexOf(commonHashHex)
      val firstOrphanedMainchainHeaderHash: MainchainHeaderHash = byteArrayToMainchainHeaderHash(BytesUtils.fromHexString(locatorHashes(commonHashLocatorIndex - 1)))
      // Get the list of MainchainHeader Hashes between previously found common point and first orphaned point.
      // Order them from newest to oldest same as bitcoin-style locator.
      val locator: Seq[String] = history.getMainchainHashes(commonHash, firstOrphanedMainchainHeaderHash).map(baw => BytesUtils.toHexString(baw.data)).reverse

      mainchainNodeChannel.getBestCommonPoint(locator) match {
        case Success((height, hash)) => (height, hash)
        case Failure(ex) => throw ex
      }
    }
  }

  def getMainchainBlockReferences(history: SidechainHistory, hashes: Seq[MainchainHeaderHash]): Try[Seq[MainchainBlockReference]] = Try {
    val references = ListBuffer[MainchainBlockReference]()
    for(hash <- hashes) {
      mainchainNodeChannel.getBlockByHash(BytesUtils.toHexString(hash.data)) match {
        case Success(ref) =>
          references.append(ref)
        case Failure(ex) =>
          throw new IllegalStateException(s"Can't retrieve MainchainBlockReference for hash $hash. Connection error.", ex)
      }
    }
    references
  }
}

object MainchainSynchronizer {
  val MAX_BLOCKS_REQUEST: Int = 50
} 
Example 78
Source File: TiRDD.scala    From tispark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.tispark

import com.pingcap.tikv._
import com.pingcap.tikv.exception.TiInternalException
import com.pingcap.tikv.meta.TiDAGRequest
import com.pingcap.tikv.types.Converter
import com.pingcap.tikv.util.RangeSplitter
import com.pingcap.tikv.util.RangeSplitter.RegionTask
import com.pingcap.tispark.{TiPartition, TiTableReference}
import org.apache.spark.Partition
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow

import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer

abstract class TiRDD(
    val dagRequest: TiDAGRequest,
    val physicalId: Long,
    val tiConf: TiConfiguration,
    val tableRef: TiTableReference,
    @transient private val session: TiSession,
    @transient private val sparkSession: SparkSession)
    extends RDD[InternalRow](sparkSession.sparkContext, Nil) {

  private lazy val partitionPerSplit = tiConf.getPartitionPerSplit

  protected def checkTimezone(): Unit = {
    if (!tiConf.getLocalTimeZone.equals(Converter.getLocalTimezone)) {
      throw new TiInternalException(
        "timezone are different! driver: " + tiConf.getLocalTimeZone + " executor:" + Converter.getLocalTimezone +
          " please set user.timezone in spark.driver.extraJavaOptions and spark.executor.extraJavaOptions")
    }
  }

  override protected def getPartitions: Array[Partition] = {
    val keyWithRegionTasks = RangeSplitter
      .newSplitter(session.getRegionManager)
      .splitRangeByRegion(dagRequest.getRangesByPhysicalId(physicalId), dagRequest.getStoreType)

    val hostTasksMap = new mutable.HashMap[String, mutable.Set[RegionTask]]
      with mutable.MultiMap[String, RegionTask]

    var index = 0
    val result = new ListBuffer[TiPartition]
    for (task <- keyWithRegionTasks) {
      hostTasksMap.addBinding(task.getHost, task)
      val tasks = hostTasksMap(task.getHost)
      if (tasks.size >= partitionPerSplit) {
        result.append(new TiPartition(index, tasks.toSeq, sparkContext.applicationId))
        index += 1
        hostTasksMap.remove(task.getHost)
      }

    }
    // add rest
    for (tasks <- hostTasksMap.values) {
      result.append(new TiPartition(index, tasks.toSeq, sparkContext.applicationId))
      index += 1
    }
    result.toArray
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] =
    split.asInstanceOf[TiPartition].tasks.head.getHost :: Nil
} 
Example 79
Source File: Preprocess.scala    From Scala-Machine-Learning-Projects   with MIT License 5 votes vote down vote up
package com.packt.ScalaML.BitCoin

import java.io.{ BufferedWriter, File, FileWriter }
import org.apache.spark.sql.types.{ DoubleType, IntegerType, StructField, StructType }
import org.apache.spark.sql.{ DataFrame, Row, SparkSession }
import scala.collection.mutable.ListBuffer

object Preprocess {
  //how many of first rows are omitted
    val dropFirstCount: Int = 612000

    def rollingWindow(data: DataFrame, window: Int, xFilename: String, yFilename: String): Unit = {
      var i = 0
      val xWriter = new BufferedWriter(new FileWriter(new File(xFilename)))
      val yWriter = new BufferedWriter(new FileWriter(new File(yFilename)))

      val zippedData = data.rdd.zipWithIndex().collect()
      System.gc()
      val dataStratified = zippedData.drop(dropFirstCount) //todo slice fisrt 614K
      while (i < (dataStratified.length - window)) {
        val x = dataStratified
          .slice(i, i + window)
          .map(r => r._1.getAs[Double]("Delta")).toList
        val y = dataStratified.apply(i + window)._1.getAs[Integer]("label")
        val stringToWrite = x.mkString(",")
        xWriter.write(stringToWrite + "\n")
        yWriter.write(y + "\n")

        i += 1
        if (i % 10 == 0) {
          xWriter.flush()
          yWriter.flush()
        }
      }

      xWriter.close()
      yWriter.close()
    }
    
  def main(args: Array[String]): Unit = {
    //todo modify these variables to match desirable files
    val priceDataFileName: String = "C:/Users/admin-karim/Desktop/bitstampUSD_1-min_data_2012-01-01_to_2017-10-20.csv/bitstampUSD_1-min_data_2012-01-01_to_2017-10-20.csv"
    val outputDataFilePath: String = "output/scala_test_x.csv"
    val outputLabelFilePath: String = "output/scala_test_y.csv"

    val spark = SparkSession
      .builder()
      .master("local[*]")
      .config("spark.sql.warehouse.dir", "E:/Exp/")
      .appName("Bitcoin Preprocessing")
      .getOrCreate()

    val data = spark.read.format("com.databricks.spark.csv").option("header", "true").load(priceDataFileName)
    data.show(10)
    println((data.count(), data.columns.size))

    val dataWithDelta = data.withColumn("Delta", data("Close") - data("Open"))

    import org.apache.spark.sql.functions._
    import spark.sqlContext.implicits._

    val dataWithLabels = dataWithDelta.withColumn("label", when($"Close" - $"Open" > 0, 1).otherwise(0))
    rollingWindow(dataWithLabels, 22, outputDataFilePath, outputLabelFilePath)    
    spark.stop()
  }
} 
Example 80
Source File: DefaultUsers.scala    From meteorite-core   with Apache License 2.0 5 votes vote down vote up
package bi.meteorite.core.security.hibernate

import bi.meteorite.core.api.objects.Event
import bi.meteorite.core.api.objects.MeteoriteUser
import bi.meteorite.core.api.objects.MeteoriteRole
import bi.meteorite.core.api.persistence.EventService
import bi.meteorite.core.api.persistence.UserService
import bi.meteorite.objects.EventImpl
import bi.meteorite.objects.RoleImpl
import bi.meteorite.objects.UserImpl
import java.util.Date
import java.util.UUID
import javax.annotation.PostConstruct
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer


class DefaultUsers {
  private var userService: UserService = null
  private var eventService: EventService = null

  @PostConstruct def insertUsers() {
    if (eventService.getEventByEventName("Start Adding Users") == null) {
      val uuid: String = UUID.randomUUID.toString
      val e: Event = eventService.addEvent(new EventImpl(uuid, this.getClass.getName, "Start Adding users", "Adding users to user list", new Date))
      var u: MeteoriteUser = new UserImpl
      u.setUsername("admin")
      u.setPassword("admin")
      val r: MeteoriteRole = new RoleImpl
      r.setUserId(u)
      r.setRole("ROLE_ADMIN")
      val r2: MeteoriteRole = new RoleImpl
      r2.setUserId(u)
      r2.setRole("ROLE_USER")
      val l = ListBuffer[MeteoriteRole](r, r2)
      u.setRoles(l.asJava)
      u = userService.addUser(u)

      u = new UserImpl
      u.setUsername("smith")
      u.setPassword("smith")
      val s2 = List[MeteoriteRole](new RoleImpl("ROLE_USER", u.asInstanceOf[UserImpl]))
      u.setRoles(s2.asJava)
      userService.addUser(u)
      e.setEndDate(new Date)
      e.setDuration(e.getEndDate.getTime - e.getStartDate.getTime)
      eventService.updateEvent(e)
    }
  }

  def setUserService(userService: UserService) {
    this.userService = userService
  }

  def setEventService(eventService: EventService) {
    this.eventService = eventService
  }
} 
Example 81
Source File: TokenAuthorizingInterceptor.scala    From meteorite-core   with Apache License 2.0 5 votes vote down vote up
package bi.meteorite.core.security.authorization

import java.lang.reflect.Method
import org.apache.cxf.security.SecurityContext

import java.util

import scala.collection.mutable.HashMap
import scala.collection.mutable.ListBuffer

import scala.collection.JavaConversions._
import TokenAuthorizingInterceptor._
import scala.collection.JavaConverters._

object TokenAuthorizingInterceptor {

  private def parseRolesMap(rolesMap: Map[String, String]): scala.collection.mutable.HashMap[String, List[String]] = {
    val map = new scala.collection.mutable.HashMap[String, List[String]]()
    for ((key, value) <- rolesMap) {
      map.put(key, value.split(" ").toList)
    }
    map
  }
}


class TokenAuthorizingInterceptor(uniqueId: Boolean) extends TokenAbstractAutorizingInInterceptor(uniqueId) {

  private val methodRolesMap = new HashMap[String, List[String]]()

  private var userRolesMap = new scala.collection.mutable.HashMap[String, List[String]]

  private var globalRoles =  new scala.collection.mutable.ListBuffer[String]

  private var checkConfiguredRolesOnly: Boolean = _

  def this() {
    this(true)
  }

  protected override def isUserInRole(sc: SecurityContext, roles: util.List[String], deny: Boolean): Boolean = {
    if (!checkConfiguredRolesOnly && !super.isUserInRole(sc, roles, deny)) {
      return false
    }
    if (userRolesMap.nonEmpty) {
      val userRoles = userRolesMap.get(sc.getUserPrincipal.getName)
      if (userRoles == null) {
        return false
      }
      for (role <- roles if userRoles.get.contains(role)) {
        return true
      }
      false
    } else {
      !checkConfiguredRolesOnly
    }
  }

  private def createMethodSig(method: Method): String = {
    val b = new StringBuilder(method.getReturnType.getName)
    b.append(' ').append(method.getName).append('(')
    for (cls <- method.getParameterTypes) {
      b.append(cls.getName)
    }
    b.append(')')
    b.toString

    method.getName
  }

  protected override def getExpectedRoles(method: Method): util.List[String] = {

    var roles = methodRolesMap.get(createMethodSig(method))

    if(roles.isEmpty) {
      roles = methodRolesMap.get(method.getName)
    }

    if(roles.isEmpty){
      globalRoles.toList
    }
    else{
      roles.get
    }

  }

  def setMethodRolesMap(rolesMap: java.util.Map[String, String]) =
    methodRolesMap.putAll(parseRolesMap(rolesMap.asScala.toMap))

  def setUserRolesMap(rolesMap: java.util.Map[String, String]) = userRolesMap = parseRolesMap(rolesMap.asScala.toMap)

  def setGlobalRoles(roles: String) = globalRoles = roles.split(" ").to[ListBuffer]

  def setCheckConfiguredRolesOnly(checkConfiguredRolesOnly: Boolean) = this.checkConfiguredRolesOnly =
    checkConfiguredRolesOnly

} 
Example 82
Source File: FilterRecursiveMultiDelete.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.handler

import akka.stream.ActorMaterializer
import akka.stream.alpakka.xml.scaladsl.XmlParsing
import akka.stream.alpakka.xml.{ EndElement, StartElement, TextEvent }
import akka.stream.scaladsl.{ Sink, Source }
import akka.util.ByteString

import scala.collection.immutable
import scala.collection.mutable.ListBuffer
import scala.concurrent.Future

object FilterRecursiveMultiDelete {

  def exctractMultideleteObjectsFlow(source: Source[ByteString, Any])(implicit materializer: ActorMaterializer): Future[Seq[String]] = {
    var isKeyTag = false

    source
      .via(XmlParsing.parser)
      .statefulMapConcat(() => {
        val keys = new ListBuffer[String]
        isKeyTag = false

        parseEvent =>
          parseEvent match {
            case e: StartElement if e.localName.startsWith("Delete") =>
              keys.clear()
              immutable.Seq.empty

            case e: StartElement if e.localName == "Key" =>
              isKeyTag = true
              immutable.Seq.empty

            case e: EndElement if e.localName == "Key" =>
              isKeyTag = false
              immutable.Seq.empty

            case e: TextEvent =>
              if (isKeyTag) keys.append(e.text)
              immutable.Seq.empty

            case e: EndElement if e.localName == "Delete" =>
              immutable.Seq(keys).flatten

            case _ =>
              immutable.Seq.empty
          }
      }).runWith(Sink.seq)
  }
} 
Example 83
Source File: FilterRecursiveListBucketHandler.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.handler

import java.net.URLDecoder

import akka.NotUsed
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.alpakka.xml.scaladsl.{ XmlParsing, XmlWriting }
import akka.stream.alpakka.xml.{ EndElement, ParseEvent, StartElement, TextEvent }
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import com.ing.wbaa.rokku.proxy.data.{ Read, RequestId, S3Request, User }

import scala.collection.immutable
import scala.collection.mutable.ListBuffer


  protected[this] def filterRecursiveListObjects(user: User, requestS3: S3Request)(implicit id: RequestId): Flow[ByteString, ByteString, NotUsed] = {
    def elementResult(allContentsElements: ListBuffer[ParseEvent], isContentsTag: Boolean, element: ParseEvent): immutable.Seq[ParseEvent] = {
      if (isContentsTag) {
        allContentsElements += element
        immutable.Seq.empty
      } else {
        immutable.Seq(element)
      }
    }

    def isPathOkInRangerPolicy(path: String)(implicit id: RequestId): Boolean = {
      val pathToCheck = normalizePath(path)
      val isUserAuthorized = isUserAuthorizedForRequest(requestS3.copy(s3BucketPath = Some(pathToCheck)), user)
      isUserAuthorized
    }

    def normalizePath(path: String): String = {
      val delimiter = "/"
      val decodedPath = URLDecoder.decode(path, "UTF-8")
      val delimiterIndex = decodedPath.lastIndexOf(delimiter)
      val pathToCheckWithoutLastSlash = if (delimiterIndex > 0) delimiter + decodedPath.substring(0, delimiterIndex) else ""
      val s3BucketName = requestS3.s3BucketPath.getOrElse(delimiter)
      val s3pathWithoutLastDelimiter = if (s3BucketName.length > 1 && s3BucketName.endsWith(delimiter)) s3BucketName.substring(0, s3BucketName.length - 1) else s3BucketName
      s3pathWithoutLastDelimiter + pathToCheckWithoutLastSlash
    }

    Flow[ByteString].via(XmlParsing.parser)
      .statefulMapConcat(() => {
        // state
        val keyTagValue = StringBuilder.newBuilder
        val allContentsElements = new ListBuffer[ParseEvent]
        var isContentsTag = false
        var isKeyTag = false

        // aggregation function
        parseEvent =>
          parseEvent match {
            //catch <Contents> to start collecting elements
            case element: StartElement if element.localName == "Contents" =>
              isContentsTag = true
              allContentsElements.clear()
              allContentsElements += element
              immutable.Seq.empty
            //catch end </Contents> to validate the path in ranger
            case element: EndElement if element.localName == "Contents" =>
              isContentsTag = false
              allContentsElements += element
              if (isPathOkInRangerPolicy(keyTagValue.stripMargin)) {
                allContentsElements.toList
              } else {
                immutable.Seq.empty
              }
            // catch <Key> where is the patch name to match in ranger
            case element: StartElement if element.localName == "Key" =>
              keyTagValue.clear()
              isKeyTag = true
              elementResult(allContentsElements, isContentsTag, element)
            //catch end </Key>
            case element: EndElement if element.localName == "Key" =>
              isKeyTag = false
              elementResult(allContentsElements, isContentsTag, element)
            //catch all element text <..>text<\..> but only set the text from <Key>
            case element: TextEvent =>
              if (isKeyTag) keyTagValue.append(element.text)
              elementResult(allContentsElements, isContentsTag, element)
            //just past through the rest of elements
            case element =>
              elementResult(allContentsElements, isContentsTag, element)
          }
      })
      .via(XmlWriting.writer)
  }

} 
Example 84
Source File: FilterRecursiveMultiDeleteSpec.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.handler

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import com.ing.wbaa.rokku.proxy.handler.FilterRecursiveMultiDelete._
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AsyncWordSpec

import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext
import scala.util.Random

class FilterRecursiveMultiDeleteSpec extends AsyncWordSpec with Diagrams {

  implicit val system: ActorSystem = ActorSystem.create("test-system")
  override implicit val executionContext: ExecutionContext = system.dispatcher

  implicit def materializer: ActorMaterializer = ActorMaterializer()(system)

  val multiDeleteRequestXml: String = scala.io.Source.fromResource("multiDeleteRequest.xml").mkString.stripMargin.trim
  val multiDeleteRequestV4Xml: String = scala.io.Source.fromResource("multiDeleteRequestV4.xml").mkString.stripMargin.trim
  val multiPartComplete: String = scala.io.Source.fromResource("multipartUploadComplete.xml").mkString.stripMargin.trim
  val data: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiDeleteRequestXml))
  val dataV4: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiDeleteRequestV4Xml))
  val otherData: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiPartComplete))

  val numberOfObjects = 1000

  "multiDelete request" should {
    "should be parsed to objects list" in {
      exctractMultideleteObjectsFlow(data).map { r =>
        assert(r.contains("testuser/file1"))
        assert(r.contains("testuser/file2"))
        assert(r.contains("testuser/file3"))
      }
    }
    "v4 should be parsed to objects list" in {
      exctractMultideleteObjectsFlow(dataV4).map { r =>
        assert(r.contains("testuser/issue"))
        assert(!r.contains("true"))
      }
    }

    "should return empty list" in {
      exctractMultideleteObjectsFlow(otherData).map(r => assert(r == Vector()))
    }

    "should return correct size for large xml objects" in {
      val rand = new Random()
      val doc = new ListBuffer[String]()
      for (c <- 1 to numberOfObjects) doc +=
        s"<Object><Key>testuser/one/two/three/four/five/six/seven/eight/nine/ten/eleven/twelve/sub$c/${rand.alphanumeric.take(32).mkString}=${rand.alphanumeric.take(12).mkString}.txt</Key></Object>"

      exctractMultideleteObjectsFlow(Source.single(ByteString("<Delete>" + doc.mkString + "</Delete>"))).map { r =>
        assert(r.length == numberOfObjects)
      }
    }
  }
} 
Example 85
Source File: KernelBlockLinearMapper.scala    From keystone   with Apache License 2.0 5 votes vote down vote up
package keystoneml.nodes.learning

import scala.reflect.ClassTag
import scala.collection.mutable.ListBuffer

import breeze.linalg._

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD

import keystoneml.nodes.stats.{StandardScalerModel, StandardScaler}
import keystoneml.nodes.util.{VectorSplitter, Identity}

import keystoneml.utils.{MatrixUtils, Stats}
import keystoneml.workflow.{Transformer, LabelEstimator}


class KernelBlockLinearMapper[T: ClassTag](
    val model: Seq[DenseMatrix[Double]],
    blockSize: Int,
    kernelTransformer: KernelTransformer[T],
    nTrain: Long,
    blocksBeforeCheckpoint: Int = 25)
  extends Transformer[T, DenseVector[Double]] {

  val numClasses = model(0).cols
  val numBlocks = model.size

  override def apply(in: RDD[T]): RDD[DenseVector[Double]] = {
    val testKernelMat = kernelTransformer(in)
    // Initially all predictions are 0
    var predictions = in.mapPartitions { iter =>
      if (iter.hasNext) {
        val out = DenseMatrix.zeros[Double](iter.size, numClasses)
        Iterator.single(out)
      } else {
        Iterator.empty
      }
    }.cache()

    val modelBCs = new ListBuffer[Broadcast[DenseMatrix[Double]]]

    (0 until numBlocks).foreach { block =>
      val blockIdxs = (blockSize * block) until (math.min(nTrain.toInt, (block + 1) * blockSize))
      val testKernelBlock = testKernelMat(blockIdxs.toSeq)
      val modelBlockBC = in.context.broadcast(model(block))
      modelBCs += modelBlockBC

      // Update predictions
      var predictionsNew = predictions.zip(testKernelBlock).map { case(pred, testKernelBB) =>
        pred :+ (testKernelBB * modelBlockBC.value)
      }

      predictionsNew.cache()
      predictionsNew.count()
      predictions.unpersist(true)

      testKernelMat.unpersist(blockIdxs.toSeq)
      modelBlockBC.unpersist(true)

      // If we are checkpointing update our cache
      if (in.context.getCheckpointDir.isDefined &&
          block % blocksBeforeCheckpoint == (blocksBeforeCheckpoint - 1)) {
        predictionsNew = MatrixUtils.truncateLineage(predictionsNew, false)
      }
      predictions = predictionsNew
    }
    predictions.flatMap(x => MatrixUtils.matrixToRowArray(x))
  }

  def apply(in: T): DenseVector[Double]  = {
    val testKernelRow = kernelTransformer(in)
    val predictions = DenseVector.zeros[Double](numClasses)
    (0 until numBlocks).foreach { block =>
      val blockIdxs = (blockSize * block) until (math.min(nTrain.toInt, (block + 1) * blockSize))
      predictions += (testKernelRow(blockIdxs) * model(block)).toDenseVector
    }
    predictions
  }
} 
Example 86
Source File: TruckAndTrafficJoinBolt.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.storm.bolts

import java.util

import com.orendainx.trucking.commons.models.{EnrichedTruckAndTrafficData, EnrichedTruckData, TrafficData}
import com.typesafe.scalalogging.Logger
import org.apache.storm.task.{OutputCollector, TopologyContext}
import org.apache.storm.topology.OutputFieldsDeclarer
import org.apache.storm.topology.base.BaseWindowedBolt
import org.apache.storm.tuple.{Fields, Values}
import org.apache.storm.windowing.TupleWindow

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import scala.collection.{Map, mutable}
import scala.language.implicitConversions


  private def processAndEmitData(truckDataPerRoute: Map[Int, ListBuffer[EnrichedTruckData]],
                                 trafficDataPerRoute: Map[Int, ListBuffer[TrafficData]]) {

    // For each EnrichedTruckData object, find the TrafficData object with the closest timestamp
    truckDataPerRoute.foreach { case (routeId, truckDataList) =>
      trafficDataPerRoute.get(routeId) match {
        case None => // No traffic data for this routeId, so drop/ignore truck data
        case Some(trafficDataList) =>
          truckDataList foreach { truckData =>
            trafficDataList.sortBy(data => math.abs(data.eventTime - truckData.eventTime)).headOption match {
              case None => // Window didn't capture any traffic data for this truck's route
              case Some(trafficData) =>

                val joinedData = EnrichedTruckAndTrafficData(truckData.eventTime, truckData.truckId, truckData.driverId, truckData.driverName,
                  truckData.routeId, truckData.routeName, truckData.latitude, truckData.longitude, truckData.speed,
                  truckData.eventType, truckData.foggy, truckData.rainy, truckData.windy, trafficData.congestionLevel)

                outputCollector.emit(new Values("EnrichedTruckAndTrafficData", joinedData))
            }
          }
      }
    }
  }

  override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("dataType", "data"))
} 
Example 87
Source File: Connection.scala    From finagle-postgres   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.postgres.connection

import java.util.concurrent.atomic.AtomicInteger

import com.twitter.finagle.postgres.messages._
import com.twitter.logging.Logger
import scala.collection.mutable.ListBuffer


class Connection(startState: State = AuthenticationRequired) {
  val id = Connection.nextId()
  private[this] val logger = Logger(s"${getClass.getName}.connection-$id")
  private[this] val stateMachine = new ConnectionStateMachine(startState, id)


  def send(msg: FrontendMessage) = {
    logger.ifDebug("Sent frontend message of type: %s".format(msg.getClass.getName))

    msg match {
      case q: Query =>
        logger.ifDebug("Query: %s".format(q.str))
      case _ =>
    }

    stateMachine.onEvent(msg)
  }

  def receive(msg: BackendMessage): Option[PgResponse] = {
    logger.ifDebug("Received backend message of type: %s".format(msg.getClass.getName))

    val result = stateMachine.onEvent(msg)
    result foreach {
      r => logger.ifDebug(s"Emitting result ${r.getClass.getName}")
    }
    result
  }
}

object Connection {
  private[this] val currentId = new AtomicInteger(0)
  private def nextId() = currentId.getAndIncrement()
} 
Example 88
Source File: AbstractInMemAntlrGrammar.scala    From rug   with GNU General Public License v3.0 5 votes vote down vote up
package com.atomist.tree.content.text.grammar.antlr

import com.atomist.rug.RugRuntimeException
import com.typesafe.scalalogging.LazyLogging
import org.antlr.v4.Tool
import org.antlr.v4.tool.{ANTLRMessage, ANTLRToolListener}
import org.snt.inmemantlr.GenericParser
import org.snt.inmemantlr.tool.ToolCustomizer
import org.stringtemplate.v4.ST

import scala.collection.mutable.ListBuffer

case class ParserSetup(
                        grammars: Seq[String],
                        parser: GenericParser,
                        production: String
                      )


  protected def setup: ParserSetup

  private def compileGrammar(parser: GenericParser): Unit = {
    try {
      parser.compile()
    }
    catch {
      case t: Throwable =>
        logger.warn(s"Encountered Antlr exception ${t.getMessage}", t)
    }
    finally {
      if (errorStore.hasErrors)
        throw new RugRuntimeException(null, errorStore.toMessage, null)
    }
  }

  protected val config: ParserSetup = setup

  logger.debug(s"Compiling grammar-----\n$config\n-----")
  compileGrammar(config.parser)

  override def customize(tool: Tool): Unit = {
    errorStore.setTool(tool)
    tool.addListener(errorStore)
  }
}

@FunctionalInterface
trait ToolListenerCreator {

  def createListener(tool: Tool): ANTLRToolListener
}

class ErrorStoringToolListener extends ANTLRToolListener {

  private var tool: Tool = _

  private val _errors = new ListBuffer[String]

  def setTool(t: Tool): Unit = {
    this.tool = t
  }

  def errors: Seq[String] = _errors

  private def toSingleLineIfNecessary(msg: String) =
    if (tool.errMgr.formatWantsSingleLineMessage)
      msg.replace('\n', ' ')
    else msg

  override def info(msg: String) {
    val toShow = toSingleLineIfNecessary(msg)
  }

  override def error(msg: ANTLRMessage) {
    val msgST: ST = tool.errMgr.getMessageTemplate(msg)
    val outputMsg: String = msgST.render
    _errors.append(toSingleLineIfNecessary(outputMsg))
  }

  override def warning(msg: ANTLRMessage) {
    val msgST: ST = tool.errMgr.getMessageTemplate(msg)
    val outputMsg: String = msgST.render
  }

  def hasErrors: Boolean = _errors.nonEmpty

  def toMessage: String = {
    errors.mkString("\n")
  }
} 
Example 89
Source File: OverwritableTextInFile.scala    From rug   with GNU General Public License v3.0 5 votes vote down vote up
package com.atomist.tree.content.text

import com.atomist.rug.kind.core.FileArtifactBackedMutableView
import com.atomist.source.StringFileArtifact
import com.atomist.tree.{AddressableTreeNode, PaddingTreeNode, ParentAwareTreeNode, TreeNode}
import com.atomist.tree.TreeNode.Noise
import com.atomist.tree.utils.TreeNodeUtils

import scala.collection.mutable.ListBuffer



  private def requireReady[T](result: => T): T = {
    if (state != Ready)
      throw new IllegalStateException(s"This is only valid when the node is Ready but I am in $state")
    result
  }

  private def claimChildren(): Unit = allKids.foreach {
    case ch: OverwritableTextTreeNodeChild =>
      ch.setParent(this, determineLocationStep(visibleChildren, ch), this)
    case _ => // padding, whatever
  }

  private def determineLocationStep(visibleChildren: Seq[TreeNode], forChild: TreeNode): String = {
    if (allKids.size == 1) {
      // there's only one thing here. We parsed the whole file.
      s"$dynamicType()"
    } else {
      // there are multiple matches here. Index
      val childrenBeforeThis = visibleChildren.takeWhile(_ != forChild)
      val thisChildsIndex = childrenBeforeThis.size
      // XPath indexes from 1
      s"$dynamicType()[${thisChildsIndex + 1}]"
    }
  }
} 
Example 90
Source File: ExtractApplicationProperties.scala    From rug   with GNU General Public License v3.0 5 votes vote down vote up
package com.atomist.rug.kind.java

import com.atomist.tree.content.project.{ConfigValue, Configuration, SimpleConfigValue, SimpleConfiguration}
import com.atomist.source.FileArtifact
import org.apache.commons.lang3.StringUtils

import scala.collection.mutable.ListBuffer
import scala.io.Source


class ExtractApplicationProperties(source: String) extends Function[FileArtifact, Configuration] {

  override def apply(f: FileArtifact): Configuration = {
    val isWhiteSpace: String => Boolean = line => StringUtils.isWhitespace(line)
    val isComment: String => Boolean = line => !isWhiteSpace(line) && line.dropWhile(c => c.isWhitespace).startsWith("#")
    val isContent: String => Boolean = line => !(isWhiteSpace(line) || isComment(line))

    trait State
    object InComment extends State
    object InBlanks extends State

    var state: State = InComment
    var comment = ""
    val configValues = new ListBuffer[ConfigValue]()

    // Strip # and whitespace from comments (respecting multiline comments)
    def extractComment(comment: String): String = {

      def toCommentContentLine(l: String) = {
        val r = l.dropWhile(c => c.isWhitespace || '#'.equals(c))
        r
      }

      val r = comment.lines.map(l => toCommentContentLine(l)).mkString("\n")
      r
    }

    // Return None if not a valid property line
    def parseContentLine(line: String): Option[ConfigValue] = {
      val stripped = line.dropWhile(c => c.isWhitespace)
      val idx = stripped.indexOf("=")
      if (idx == -1) {
        None
      }
      else {
        val (key, value) = stripped.splitAt(idx)
        val profile = ""
        Some(SimpleConfigValue(key, value.substring(1), source, profile, description = extractComment(comment)))
      }
    }

    def appendToComment(l: String): Unit = {
      if ("".equals(comment)) comment = l
      else comment = comment + "\n" + l
    }

    val lines = Source.fromString(f.content).getLines()
    for (line <- lines) {
      if (isContent(line)) {
        parseContentLine(line).foreach(cv => configValues.append(cv))
        comment = ""
      }
      else state match {
        case InBlanks if isComment(line) =>
          state = InComment
          appendToComment(line)
        case InComment if isComment(line) || isWhiteSpace(line) =>
          appendToComment(line)
        case InComment =>
          comment = ""
          state = InBlanks
        case _ =>
      }
    }
    new SimpleConfiguration(configValues)
  }
} 
Example 91
Source File: EventHandlerScenarioWorld.scala    From rug   with GNU General Public License v3.0 5 votes vote down vote up
package com.atomist.rug.test.gherkin.handler.event

import com.atomist.graph.GraphNode
import com.atomist.project.archive.Rugs
import com.atomist.rug.RugNotFoundException
import com.atomist.rug.runtime.js.{JavaScriptEventHandler, RugContext}
import com.atomist.rug.runtime.js.interop.NashornMapBackedGraphNode
import com.atomist.rug.runtime.{EventHandler, SystemEvent}
import com.atomist.rug.test.gherkin.{Definitions, GherkinExecutionListener, GherkinRunnerConfig, PathExpressionEvaluation}
import com.atomist.rug.test.gherkin.handler.AbstractHandlerScenarioWorld
import com.atomist.tree.TreeMaterializer
import com.atomist.tree.pathexpression.PathExpression

import scala.collection.mutable.ListBuffer


  def sendEvent(e: AnyRef): Unit = {
    val gn = NashornMapBackedGraphNode.toGraphNode(e).getOrElse(
      throw new IllegalArgumentException(s"Cannot make a GraphNode out of $e")
    )
    if (registeredHandlers.isEmpty)
      throw new IllegalStateException("No handler is registered")
    for (h <- registeredHandlers) {
      handleEventNode(gn, h)
    }
  }

  private def handleEventNode(eventNode: GraphNode, h: EventHandler) = {
    if (eventNode.nodeTags.contains(h.rootNodeName)) {
      val tm = new TreeMaterializer {
        override def rootNodeFor(e: SystemEvent, pe: PathExpression) = eventNode

        override def hydrate(teamId: String, rawRootNode: GraphNode, pe: PathExpression) = rawRootNode
      }
      val rugContext: RugContext = createRugContext(tm)

      // Check if it matches, if we can
      notifyListenersOfMatchResult(eventNode, h, rugContext)

      //println("About to handle event")
      val plan = h.handle(rugContext, SystemEvent(rugContext.teamId, h.rootNodeName, 1))
      plan.foreach(recordPlan(h.name, _))
    }
    else {
      notifyListenersOfMatchPossible(eventNode, h)
    }
  }

  private def notifyListenersOfMatchResult(eventNode: GraphNode, h: EventHandler, rugContext: RugContext) = {
    h match {
      case jsh: JavaScriptEventHandler =>
        rugContext.pathExpressionEngine.ee.evaluate(JavaScriptEventHandler.rootNodeFor(eventNode), jsh.pathExpression, rugContext) match {
          case Right(nodes) =>
            //println(s"Results for [${jsh.pathExpressionStr}] were ${nodes}")
            for (l <- listeners)
              l.pathExpressionResult(PathExpressionEvaluation(jsh.pathExpressionStr, eventNode, nodes))
          case Left(_) =>
          // The evaluation failed. Who cares. The test will blow up anyway
        }
      case _ =>
      // We can't find the path expression to check the match for
    }
  }

  private def notifyListenersOfMatchPossible(eventNode: GraphNode, h: EventHandler) = {
    h match {
      case jsh: JavaScriptEventHandler =>
        for (l <- listeners)
          l.pathExpressionResult(PathExpressionEvaluation(jsh.pathExpressionStr, eventNode, Nil))
      case _ =>
      // We can't find the path expression to check the match for
    }
  }

} 
Example 92
Source File: CallLogger.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web.guide.demos.activity

import scala.collection.mutable.ListBuffer

class CallLogger {
  private val _calls = ListBuffer.empty[Call]

  def append(call: Call): Unit = _calls.synchronized {
    _calls += call
    if (_calls.size > 20) _calls.remove(0, _calls.size-20)
  }

  def calls: List[Call] = _calls.synchronized {
    _calls.toList
  }
} 
Example 93
Source File: UrlLoggingTest.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.routing

import io.udash._
import io.udash.core.Url
import io.udash.testing._

import scala.collection.mutable.ListBuffer

class UrlLoggingTest extends AsyncUdashFrontendTest with TestRouting {
  "UrlLogging" should {
    "call logging impl on url change" in {
      val urlWithRef = ListBuffer.empty[(String, Option[String])]

      new TestViewFactory[TestState]: ViewFactory[_ <: TestState]

      initTestRouting(default = () => new TestViewFactory[TestState])
      val initUrl = Url("/")
      val urlProvider: TestUrlChangeProvider = new TestUrlChangeProvider(initUrl)
      val app = new Application[TestState](routing, vpRegistry, urlProvider) with UrlLogging[TestState] {
        override protected def log(url: String, referrer: Option[String]): Unit = {
          urlWithRef += ((url, referrer))
        }
      }
      app.run(emptyComponent())

      val urls = Seq("/", "/next", "/abc/1", "/next")
      val expected = (urls.head, Some("")) :: urls.sliding(2).map { case Seq(prev, current) => (current, Some(prev)) }.toList
      urls.foreach(str => app.goTo(routing.matchUrl(Url(str))))
      retrying(urlWithRef.toList shouldBe expected)
    }
  }
} 
Example 94
Source File: Benchmark.scala    From cct-nn   with Apache License 2.0 5 votes vote down vote up
package toolkit.neuralnetwork.performance

import com.typesafe.scalalogging.StrictLogging
import toolkit.neuralnetwork.examples.networks.CIFAR

import scala.collection.mutable.ListBuffer
import libcog._


object Benchmark extends App with StrictLogging {
  val (net, batchSize) = args.length match {
    case 0 => ("cifar10_quick", 256)
    case 1 => (args(0), 256)
    case 2 => (args(0), args(1).toInt)
    case _ => throw new RuntimeException(s"illegal arguments (${args.toList})")
  }

  require(net == "cifar10_quick", s"network $net isn't supported")

  logger.info(s"net: $net")
  logger.info(s"batch size: $batchSize")

  val cg1 = new ComputeGraph {
    val net = new CIFAR(useRandomData = true, learningEnabled = false, batchSize = batchSize)
  }

  val forward = new ListBuffer[Double]()
  val backward = new ListBuffer[Double]()

  cg1 withRelease {
    logger.info(s"starting compilation (inference)")
    cg1.step
    logger.info(s"compilation finished (inference)")

    for (i <- 1 to 50) {
      val start = System.nanoTime()
      cg1.step
      val stop = System.nanoTime()
      val elapsed = (stop - start).toDouble / 1e6
      logger.info(s"Iteration: $i forward time: $elapsed ms.")
      forward += elapsed
    }
  }

  val cg2 = new ComputeGraph {
    val net = new CIFAR(useRandomData = true, learningEnabled = true, batchSize = batchSize)
  }

  cg2 withRelease {
    logger.info(s"starting compilation (learning)")
    cg2.step
    logger.info(s"compilation finished (learning)")

    for (i <- 1 to 50) {
      val start = System.nanoTime()
      cg2.step
      val stop = System.nanoTime()
      val elapsed = (stop - start).toDouble / 1e6
      logger.info(s"Iteration: $i forward-backward time: $elapsed ms.")
      backward += elapsed
    }
  }

  logger.info(s"Average Forward pass: ${forward.sum / forward.length} ms.")
  logger.info(s"Average Forward-Backward: ${backward.sum / backward.length} ms.")
} 
Example 95
Source File: CosmosDBTestSupport.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb

import com.microsoft.azure.cosmosdb.{Database, SqlParameter, SqlParameterCollection, SqlQuerySpec}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike}
import pureconfig._
import pureconfig.generic.auto._
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable

import scala.collection.mutable.ListBuffer
import scala.util.{Random, Try}

trait CosmosDBTestSupport extends FlatSpecLike with BeforeAndAfterAll with RxObservableImplicits {
  private val dbsToDelete = ListBuffer[Database]()

  lazy val storeConfigTry = Try { loadConfigOrThrow[CosmosDBConfig](ConfigKeys.cosmosdb) }
  lazy val client = storeConfig.createClient()
  val useExistingDB = java.lang.Boolean.getBoolean("whisk.cosmosdb.useExistingDB")

  def storeConfig = storeConfigTry.get

  override protected def withFixture(test: NoArgTest) = {
    assume(storeAvailable(storeConfigTry), "CosmosDB not configured or available")
    super.withFixture(test)
  }

  protected def generateDBName() = {
    s"travis-${getClass.getSimpleName}-${Random.alphanumeric.take(5).mkString}"
  }

  protected def createTestDB() = {
    if (useExistingDB) {
      val db = getOrCreateDatabase()
      println(s"Using existing database ${db.getId}")
      db
    } else {
      val databaseDefinition = new Database
      databaseDefinition.setId(generateDBName())
      val db = client.createDatabase(databaseDefinition, null).blockingResult()
      dbsToDelete += db
      println(s"Created database ${db.getId}")
      db
    }
  }

  private def getOrCreateDatabase(): Database = {
    client
      .queryDatabases(querySpec(storeConfig.db), null)
      .blockingOnlyResult()
      .getOrElse {
        client.createDatabase(newDatabase, null).blockingResult()
      }
  }

  protected def querySpec(id: String) =
    new SqlQuerySpec("SELECT * FROM root r WHERE r.id=@id", new SqlParameterCollection(new SqlParameter("@id", id)))

  private def newDatabase = {
    val databaseDefinition = new Database
    databaseDefinition.setId(storeConfig.db)
    databaseDefinition
  }

  override def afterAll(): Unit = {
    super.afterAll()
    if (!useExistingDB) {
      dbsToDelete.foreach(db => client.deleteDatabase(db.getSelfLink, null).blockingResult())
    }
    client.close()
  }
} 
Example 96
Source File: ActivationStoreBehaviorBase.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.test.behavior

import java.time.Instant

import akka.stream.ActorMaterializer
import common.{StreamLogging, WskActorSystem}
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.{ActivationStore, CacheChangeNotification, UserContext}
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable
import org.apache.openwhisk.core.entity._
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers, Outcome}

import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
import scala.util.{Random, Try}

trait ActivationStoreBehaviorBase
    extends FlatSpec
    with ScalaFutures
    with Matchers
    with StreamLogging
    with WskActorSystem
    with IntegrationPatience
    with BeforeAndAfterEach {

  protected implicit val materializer: ActorMaterializer = ActorMaterializer()
  protected implicit val notifier: Option[CacheChangeNotification] = None

  def context: UserContext
  def activationStore: ActivationStore
  private val docsToDelete = ListBuffer[(UserContext, ActivationId)]()

  def storeType: String

  protected def transId() = TransactionId(Random.alphanumeric.take(32).mkString)

  override def afterEach(): Unit = {
    cleanup()
    stream.reset()
  }

  override protected def withFixture(test: NoArgTest): Outcome = {
    assume(storeAvailable(storeAvailableCheck), s"$storeType not configured or available")
    val outcome = super.withFixture(test)
    if (outcome.isFailed) {
      println(logLines.mkString("\n"))
    }
    outcome
  }

  protected def storeAvailableCheck: Try[Any] = Try(true)
  //~----------------------------------------< utility methods >

  protected def store(activation: WhiskActivation, context: UserContext)(
    implicit transid: TransactionId,
    notifier: Option[CacheChangeNotification]): DocInfo = {
    val doc = activationStore.store(activation, context).futureValue
    docsToDelete.append((context, ActivationId(activation.docid.asString)))
    doc
  }

  protected def newActivation(ns: String, actionName: String, start: Long): WhiskActivation = {
    WhiskActivation(
      EntityPath(ns),
      EntityName(actionName),
      Subject(),
      ActivationId.generate(),
      Instant.ofEpochMilli(start),
      Instant.ofEpochMilli(start + 1000))
  }

  
  def cleanup()(implicit timeout: Duration = 10 seconds): Unit = {
    implicit val tid: TransactionId = transId()
    docsToDelete.map { e =>
      Try {
        Await.result(activationStore.delete(e._2, e._1), timeout)
      }
    }
    docsToDelete.clear()
  }

} 
Example 97
Source File: WorkflowTools.scala    From regressr   with Apache License 2.0 5 votes vote down vote up
package org.ebayopensource.regression.internal.workflow

import java.net.URI

import org.ebayopensource.regression.internal.http.{BaseHttpClient, HTTPRequest, HTTPResponse}
import org.ebayopensource.regression.internal.reader.{RequestEntry, TestStrategy}

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Success, Try}


object WorkflowTools {

  def performRequest(testIdentifier: String, strategy: TestStrategy, request: RequestEntry, httpClient: BaseHttpClient): Try[HTTPResponse] = Try {
    // scalastyle:off
    val tryHTTPResponse = for {
      httpRequest <- convertRequestEntryToRequest(request, strategy)
      httpResponse <- httpClient.execute(httpRequest)
    } yield (httpResponse)
    // scalastyle:on

    tryHTTPResponse match {
      case Success(r) => r
      case Failure(t) => throw t
    }
  }

  def convertRequestEntryToRequest(request: RequestEntry, strategy: TestStrategy): Try[HTTPRequest] = Try {
    val body = if (request.requestBuilder.isEmpty) None else Some(request.requestBuilder.get.buildRequest(request.dataInput).get)
    HTTPRequest(new URI(strategy.service.baseURL.toString + request.path), strategy.headers ++ request.extraHeaders, request.method, body)
  }

  // scalastyle:off
  def performContinuations(testIdentifier: String, testStrategy: TestStrategy, requestEntry: RequestEntry, httpClient: BaseHttpClient): Try[Seq[HTTPResponse]] = Try {
    val queue = mutable.Queue[HTTPRequest]()
    val httpResponses = ListBuffer[HTTPResponse]()

    val firstRequest = convertRequestEntryToRequest(requestEntry, testStrategy)
    if (firstRequest.isFailure) throw firstRequest.failed.get

    queue.enqueue(firstRequest.get)

    while (!queue.isEmpty) {
      httpClient.execute(queue.dequeue()) match {
        case Success(httpResponse) => {
          if (requestEntry.progressPrinter.isDefined) requestEntry.progressPrinter.get.printProgress(httpResponse)
          httpResponses += httpResponse
          val continuationRequests = requestEntry.continuation.get.getContinuations(httpResponse)
          continuationRequests match {
            case Success(requests) => {
              requests.foreach(request => {
                queue.enqueue(request)
              })
            }
            case Failure(t) => {
              throw t
            }
          }
        }
        case Failure(t) => {
          throw t
        }
      }
    }
    httpResponses
  }
  // scalastyle:on

} 
Example 98
Source File: SimpleHTTPJsonComparator.scala    From regressr   with Apache License 2.0 5 votes vote down vote up
package org.ebayopensource.regression.internal.components.comparator

import org.ebayopensource.regression.internal.common.Util
import org.ebayopensource.regression.internal.components.comparator.common.{Comparator, CompareMessage}
import org.ebayopensource.regression.internal.components.recorder.common.{RecordingEntry, RequestRecordingEntry, RequestRecordingEntryTypes}
import org.ebayopensource.regression.json.CustomJsonAssert

import scala.collection.mutable.ListBuffer
import scala.util.Try



class SimpleHTTPJsonComparator extends Comparator {

  override def compare(recorded: RequestRecordingEntry, replayed: RequestRecordingEntry): Try[Seq[CompareMessage]] = Try {
    if (recorded.entries.size != replayed.entries.size) {
      Seq(CompareMessage(s"Recorded entries and replayed entries were not equal ${recorded.entries.size} vs ${replayed.entries.size}.",
        recorded.entries.size.toString, replayed.entries.size.toString))
    }
    else {
      val entries: Seq[(RecordingEntry, RecordingEntry)] = {
        for ((recordedEntry, replayedEntry) <- recorded.entries zip replayed.entries) yield (recordedEntry, replayedEntry)
      }

      val compareDataSeq = ListBuffer[CompareMessage]()
      entries.foreach {
        case (recorded, replayed) => {
          if (recorded.entryType != replayed.entryType) {
            compareDataSeq += CompareMessage(s"Recorded entry type and replayed entry type do not match ${recorded.entryType} vs ${replayed.entryType}",
              recorded.entryType.toString, replayed.entryType.toString)
          }
          else {
            recorded.entryType match {
              case RequestRecordingEntryTypes.JSON => {
                getJSONComparisonErrors(recorded.data, replayed.data).foreach {
                  entry => {
                    entry.split(" ; ").foreach {
                      token => compareDataSeq += CompareMessage(token, recorded.data.replace("\\", ""), replayed.data.replace("\\", ""))
                    }
                  }
                }
              }
              case RequestRecordingEntryTypes.STRING => {
                if (!recorded.data.equals(replayed.data)) {
                  compareDataSeq += CompareMessage(s"The ${recorded.description} did not match between recorded and replayed. " +
                    s"${recorded.data} vs ${replayed.data}", recorded.data, replayed.data)
                }
              }
            }
          }
        }
      }

      compareDataSeq
    }
  }

  val mapper = Util.getMapper()

  def getJSONComparisonErrors(prev: String, newOne: String): Option[String] = {
    var cleansedOne = prev.replaceAll("\\\\","")
    var cleansedTwo = newOne.replaceAll("\\\\","")
    cleansedOne = if (cleansedOne.startsWith("\"")) cleansedOne.substring(1, cleansedOne.length-1) else cleansedOne
    cleansedTwo = if (cleansedTwo.startsWith("\"")) cleansedTwo.substring(1, cleansedTwo.length-1) else cleansedTwo
    val tryingComparison = Try(CustomJsonAssert.assertEquals(cleansedOne, cleansedTwo, false))
    if (tryingComparison.isFailure) Some(tryingComparison.failed.get.getMessage) else None
  }
} 
Example 99
package com.tomekl007.chapter_2

import java.util.concurrent.{CountDownLatch, Executors}

import org.scalatest.FunSuite

import scala.collection.mutable.ListBuffer

class MultithreadedImmutabilityTest extends FunSuite {

  test("warning: race condition with mutability") {
    //given
    var listMutable = new ListBuffer[String]()
    val executors = Executors.newFixedThreadPool(2)
    val latch = new CountDownLatch(2)

    //when
    executors.submit(new Runnable {
      override def run(): Unit = {
        latch.countDown()
        listMutable += "A"
      }
    })

    executors.submit(new Runnable {
      override def run(): Unit = {
        latch.countDown()
        if(!listMutable.contains("A")) {
          listMutable += "A"
        }
      }
    })

    latch.await()

    //then
    //listMutable can have ("A") or ("A","A")

  }

} 
Example 100
Source File: HmLabeledPoint.scala    From hivemall-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.feature

import java.util.StringTokenizer

import scala.collection.mutable.ListBuffer

import hivemall.HivemallException

// Used for DataFrame#explode
case class HmFeature(feature: String)


case class HmLabeledPoint(label: Float = 0.0f, features: Seq[String]) {
  override def toString: String = {
    "%s,%s".format(label, features.mkString("[", ",", "]"))
  }
}

object HmLabeledPoint {

  // Simple parser for HivemallLabeledPoint
  def parse(s: String) = {
    val (label, features) = s.indexOf(',') match {
      case d if d > 0 => (s.substring(0, d), s.substring(d + 1))
      case _ => ("0.0", "[]") // Dummy
    }
    HmLabeledPoint(label.toFloat, parseTuple(new StringTokenizer(features, "[],", true)))
  }

  // TODO: Support to parse rows without labels
  private[this] def parseTuple(tokenizer: StringTokenizer): Seq[String] = {
    val items = ListBuffer.empty[String]
    var parsing = true
    var allowDelim = false
    while (parsing && tokenizer.hasMoreTokens()) {
      val token = tokenizer.nextToken()
      if (token == "[") {
        items ++= parseTuple(tokenizer)
        parsing = false
        allowDelim = true
      } else if (token == ",") {
        if (allowDelim) {
          allowDelim = false
        } else {
          throw new HivemallException("Found ',' at a wrong position.")
        }
      } else if (token == "]") {
        parsing = false
      } else {
        items.append(token)
        allowDelim = true
      }
    }
    if (parsing) {
      throw new HivemallException(s"A tuple must end with ']'.")
    }
    items
  }
} 
Example 101
Source File: Application.scala    From spring-scala-examples   with Apache License 2.0 5 votes vote down vote up
package hello

import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.boot.CommandLineRunner
import org.springframework.boot.SpringApplication
import org.springframework.boot.autoconfigure.SpringBootApplication
import org.springframework.jdbc.core.JdbcTemplate

import collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}

@SpringBootApplication
class Application extends CommandLineRunner {

    @Autowired
    var jdbcTemplate: JdbcTemplate = _


    val log: Logger = LoggerFactory.getLogger(classOf[Application])

    override def run(args: String*): Unit = {
        log.info("Creating tables")
        jdbcTemplate.execute("DROP TABLE customers IF EXISTS")
        jdbcTemplate.execute("CREATE TABLE customers(" +
                "id SERIAL, first_name VARCHAR(255), last_name VARCHAR(255))")

        // Split up the array of whole names into an array of first/last names
        val splitUpNames: mutable.Buffer[Array[AnyRef]] = ListBuffer("John Woo", "Jeff Dean", "Josh Bloch", "Josh Long").map(_.split(" ")).asInstanceOf[mutable.Buffer[Array[AnyRef]]]

        // Use a Java 8 stream to print out each tuple of the list
      for(name <- splitUpNames)
        splitUpNames.foreach{ case(Array(name)) => log.info("Inserting customer record for %s %s".format(name(0), name(1)))}

        // Uses JdbcTemplate's batchUpdate operation to bulk load data
        jdbcTemplate.batchUpdate("INSERT INTO customers(first_name, last_name) VALUES (?,?)", splitUpNames.asJava)

        log.info("Querying for customer records where first_name = 'Josh':")
//        jdbcTemplate.query(
//                "SELECT id, first_name, last_name FROM customers WHERE first_name = ?", new Object[] { "Josh" },
//                (rs, rowNum) -> new Customer(rs.getLong("id"), rs.getString("first_name"), rs.getString("last_name"))
//        ).forEach(customer -> log.info(customer.toString()))

    }
}

object Application extends App {
  SpringApplication.run(classOf[Application], args:_*)
} 
Example 102
Source File: Application.scala    From spring-scala-examples   with Apache License 2.0 5 votes vote down vote up
package hello

import java.sql.ResultSet

import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.boot.CommandLineRunner
import org.springframework.boot.SpringApplication
import org.springframework.boot.autoconfigure.SpringBootApplication
import org.springframework.jdbc.core.{JdbcTemplate, RowMapper}

import collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer

@SpringBootApplication
class Application extends CommandLineRunner {

  @Autowired
  var jdbcTemplate: JdbcTemplate = _


  val log: Logger = LoggerFactory.getLogger(classOf[Application])

  override def run(args: String*): Unit = {
    log.info("Creating tables")
    jdbcTemplate.execute("DROP TABLE customers IF EXISTS")
    jdbcTemplate.execute("CREATE TABLE customers(" +
      "id SERIAL, first_name VARCHAR(255), last_name VARCHAR(255))")

    val splitUpNames = ListBuffer("John Woo", "Jeff Dean", "Josh Bloch", "Josh Long").map(_.split(" "))
    splitUpNames.foreach(name => log.info("Inserting customer record for %s %s".format(name(0), name(1))))

    jdbcTemplate.batchUpdate("INSERT INTO customers(first_name, last_name) VALUES (?,?)", splitUpNames.asInstanceOf[mutable.Buffer[Array[AnyRef]]].asJava)

    log.info("Querying for customer records where first_name = 'Josh':")
    jdbcTemplate.query(
      "SELECT id, first_name, last_name FROM customers WHERE first_name = ?",
      Array("Josh").asInstanceOf[Array[AnyRef]],
      // no Java 8 Lambda support in Scala pre 2.12
      new RowMapper[Customer]{
        override def mapRow(rs: ResultSet, rowNum: Int): Customer = new Customer(rs.getLong("id"), rs.getString("first_name"), rs.getString("last_name"))
      })
      // Works in Scala 2.12
      // (rs: ResultSet, rowNum: Int) => new Customer(rs.getLong("id"), rs.getString("first_name"), rs.getString("last_name"))    )
      .asScala.foreach((customer:Customer) => log.info(customer.toString))
  }
}

object Application extends App {
  SpringApplication.run(classOf[Application], args:_*)
} 
Example 103
Source File: ConnectedComponentsSpec.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import java.nio.ByteBuffer

import org.slf4j.{Logger => Slf4jLogger}
import org.scalatest.{FlatSpec, Matchers}
import tech.sourced.gemini.util.Logger

import scala.collection.mutable.ListBuffer

class TestConnectedComponents(log: Slf4jLogger) extends ConnectedComponents(log) {
  def getHashtables(): List[Byte] = List(0, 1, 2)

  def intToHash(x: Byte): ByteBuffer = ByteBuffer.wrap(Array[Byte](x))

  // emulate database, restrictions:
  // - each hashtable must have all elements
  // - results must be sorted by (hash values, key)
  def getHashValues(hashtable: Byte): Iterable[FileHash] = {
    hashtable match {
      // bucket for a&b and d&3
      case 0 => List(
        FileHash("a", intToHash(1)),
        FileHash("b", intToHash(1)),
        FileHash("c", intToHash(2)),
        FileHash("d", intToHash(3)),
        FileHash("e", intToHash(3))
      )
      // bucket for b&c
      case 1 => List(
        FileHash("a", intToHash(1)),
        FileHash("b", intToHash(2)),
        FileHash("c", intToHash(2)),
        FileHash("d", intToHash(3)),
        FileHash("e", intToHash(4))
      )
      // no bucket
      case 2 => List(
        FileHash("a", intToHash(1)),
        FileHash("b", intToHash(2)),
        FileHash("c", intToHash(3)),
        FileHash("d", intToHash(4)),
        FileHash("e", intToHash(5))
      )
    }
  }
}

class ConnectedComponentsSpec extends FlatSpec
  with Matchers {

    val logger = Logger("gemini")
    val cc = new TestConnectedComponents(logger)

    "makeBuckets" should "correctly create buckets" in {
      cc.makeBuckets()._1 shouldEqual List[List[Int]](
        // buckets from hashtable 0
        List(0, 1),
        List(3, 4),
        // bucket from hashtable 1
        List(1, 2)
      )
    }

    "elementsToBuckets" should "create correct map" in {
      val (buckets, _) = cc.makeBuckets()
      cc.elementsToBuckets(buckets) shouldEqual Map[Int, List[Int]](
        0 -> List(0),
        1 -> List(0, 2),
        2 -> List(2),
        3 -> List(1),
        4 -> List(1)
      )
    }

  "findInBuckets" should "return connected components" in {
    val (buckets, _) = cc.makeBuckets()
    val elementToBuckets = cc.elementsToBuckets(buckets)
    cc.findInBuckets(buckets, elementToBuckets) shouldEqual Map[Int, Set[Int]](
      0 -> Set(1, 2, 0),
      1 -> Set(3, 4)
    )
  }
} 
Example 104
Source File: Structure.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.dsl

import scala.Array.canBuildFrom
import scala.collection.mutable.ListBuffer

abstract class Structure extends StructureDsl {
  override def namingBase = this.getClass().getSimpleName()

  private val fieldOrder = ListBuffer[Field[_]]()

  def registerField(f: Field[_]) {
    fieldOrder += f
    f.assignTo(this)
  }

  
  def fields = {
    val fieldsWithWeightsAndPosition = ListBuffer[(Long, Int, Field[_])]()

    for (i <- 0 until fieldOrder.length) {
      val field = fieldOrder(i)
      fieldsWithWeightsAndPosition.append((field.orderWeight, i, field))
    }

    fieldsWithWeightsAndPosition
      .sortWith { case ((w1, i1, _), (w2, i2, _)) => (w1 > w2) || ((w1 == w2) && (i1 < i2)) }
      .map { case (_, _, f) => f }.toSeq
  }

  lazy val fieldLikeGetters =
    this.getClass
      .getMethods()
      .filter {
        _.getParameterTypes().length == 0
      }
      .filter {
        !_.getName().contains("$")
      }
      .filter { m => classOf[FieldLike[_]].isAssignableFrom(m.getReturnType()) }

  def nameOf[F <: FieldLike[_]](f: F) =
    fieldLikeGetters
      .filter {
        _.getReturnType().isAssignableFrom(f.getClass())
      }
      .filter {
        _.invoke(this) eq f
      }
      .map {
        _.getName()
      }
      .headOption

} 
Example 105
Source File: Database.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.test

import java.sql.{Connection, ResultSet, Statement}

import org.schedoscope.dsl.{FieldLike, View}
import org.schedoscope.schema.ddl.HiveQl

import scala.collection.mutable.{HashMap, ListBuffer}

class Database(conn: Connection, url: String) {

  def selectForViewByQuery(v: View, query: String, orderByField: Option[FieldLike[_]]): List[Map[String, Any]] = {
    val res = ListBuffer[Map[String, Any]]()
    var statement: Statement = null
    var rs: ResultSet = null

    try {
      statement = conn.createStatement()
      rs = statement.executeQuery(query)

      while (rs.next()) {
        val row = HashMap[String, Any]()
        v.fields.view.zipWithIndex.foreach(f => {
          row.put(f._1.n, ViewSerDe.deserializeField(f._1.t, rs.getString(f._2 + 1)))
        })
        res.append(row.toMap)
      }
    }
    finally {
      if (rs != null) try {
        rs.close()
      } catch {
        case _: Throwable =>
      }

      if (statement != null) try {
        statement.close()
      } catch {
        case _: Throwable =>
      }
    }

    orderByField match {
      case Some(f) => res.sortBy {
        _ (f.n) match {
          case null => ""
          case other => other.toString
        }
      } toList
      case None => res.toList
    }
  }

  def selectView(v: View, orderByField: Option[FieldLike[_]]): List[Map[String, Any]] =
    selectForViewByQuery(v, HiveQl.selectAll(v), orderByField)

} 
Example 106
Source File: ShapeInputFormat.scala    From magellan   with Apache License 2.0 5 votes vote down vote up
package magellan.mapreduce

import com.google.common.base.Stopwatch
import magellan.io.{ShapeKey, ShapeWritable}
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.fs.{LocatedFileStatus, Path}
import org.apache.hadoop.mapreduce.lib.input._
import org.apache.hadoop.mapreduce.{InputSplit, JobContext, TaskAttemptContext}

import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer

private[magellan] class ShapeInputFormat
  extends FileInputFormat[ShapeKey, ShapeWritable] {

  private val log = LogFactory.getLog(classOf[ShapeInputFormat])

  override def createRecordReader(inputSplit: InputSplit,
    taskAttemptContext: TaskAttemptContext) = {
    new ShapefileReader
  }

  override def isSplitable(context: JobContext, filename: Path): Boolean = true

  override def getSplits(job: JobContext): java.util.List[InputSplit] = {
    val splitInfos = SplitInfos.SPLIT_INFO_MAP.get()
    computeSplits(job, splitInfos)
  }

  private def computeSplits(
       job: JobContext,
       splitInfos: scala.collection.Map[String, Array[Long]]) = {

    val sw = new Stopwatch().start
    val splits = ListBuffer[InputSplit]()
    val files = listStatus(job)
    for (file <- files) {
      val path = file.getPath
      val length = file.getLen
      val blkLocations = if (file.isInstanceOf[LocatedFileStatus]) {
        file.asInstanceOf[LocatedFileStatus].getBlockLocations
      } else {
        val fs = path.getFileSystem(job.getConfiguration)
        fs.getFileBlockLocations(file, 0, length)
      }
      val key = path.getName.split("\\.shp$")(0)
      if (splitInfos == null || !splitInfos.containsKey(key)) {
        val blkIndex = getBlockIndex(blkLocations, 0)
        splits.+= (makeSplit(path, 0, length, blkLocations(blkIndex).getHosts,
          blkLocations(blkIndex).getCachedHosts))
      } else {
        val s = splitInfos(key).toSeq
        val start = s
        val end = s.drop(1) ++ Seq(length)
        start.zip(end).foreach { case (startOffset: Long, endOffset: Long) =>
          val blkIndex = getBlockIndex(blkLocations, startOffset)
          splits.+=(makeSplit(path, startOffset, endOffset - startOffset, blkLocations(blkIndex).getHosts,
            blkLocations(blkIndex).getCachedHosts))
        }
      }
    }
    sw.stop
    if (log.isDebugEnabled) {
      log.debug("Total # of splits generated by getSplits: " + splits.size + ", TimeTaken: " + sw.elapsedMillis)
    }
    splits
  }
}

object SplitInfos {

  // TODO: Can we get rid of this hack to pass split calculation to the Shapefile Reader?
  val SPLIT_INFO_MAP = new ThreadLocal[scala.collection.Map[String, Array[Long]]]

} 
Example 107
Source File: DataQueryFrame.scala    From Squerall   with Apache License 2.0 5 votes vote down vote up
package org.squerall.model

import scala.collection.mutable.ListBuffer

class DataQueryFrame {

    private var _selects : ListBuffer[(String, String, String)] = ListBuffer()
    private var _filters : ListBuffer[String] = ListBuffer()
    private var _joins : ListBuffer[(String, String, String, String)] = ListBuffer()
    private var _project : (Seq[String], Boolean) = (null,false)
    private var _orderBy : (String, Int) = ("",0)
    private var _groupBy : ListBuffer[String] = ListBuffer()
    private var _aggregate : List[(String, String)] = List()
    private var _limit : Int = 0
    private var _transform : Map[String,Array[String]] = Map()

    def addSelect(cols_table: (String, String, String)): Unit = {
        _selects += cols_table
    }

    def addFilter(condition: String) : Unit = {
        _filters += condition
    }

    def addJoin(join: (String, String, String, String)): Unit = {
        _joins += join
    }

    def addProject(p: (Seq[String], Boolean)) : Unit = {
        _project = p
    }

    def addOrderBy(ob: (String, Int)) : Unit = {
        _orderBy = ob
    }

    def addGroupBy(cols: ListBuffer[String]) : Unit = {
        _groupBy = cols
    }

    def addAggregate(agg: List[(String, String)]) : Unit = {
        _aggregate = agg
    }

    def addLimit(limitValue: Int) : Unit = {
        _limit = limitValue
    }

    // Add one transfomration
    def addTransform(col: String, transformations: Array[String]) : Unit = {
        _transform += (col -> transformations)
    }

    // Append new to old transformations
    def addTransformations(transformations: Map[String, Array[String]]) : Unit = {
        for (t <- transformations) {
            val col = t._1
            val trans = t._2
            if (_transform.contains(col)) {
                val ondTrans = _transform(col)
                trans :+ ondTrans
            }
            _transform += (col -> trans) // overwrite the old with the full new
        }
    }


    
    def getSelects : ListBuffer[(String, String, String)] = _selects

    def getFilters : ListBuffer[String] = _filters

    def getJoins : ListBuffer[(String, String, String, String)] = _joins

    def getProject : (Seq[String], Boolean) = _project

    def getOrderBy : (String, Int) = _orderBy

    def getGroupBy : ListBuffer[String] = _groupBy

    def getAggregate : List[(String, String)] = _aggregate

    def getLimit : Int = _limit

    def getTransform : Map[String, Array[String]] = _transform

} 
Example 108
Source File: P90.scala    From S99   with MIT License 5 votes vote down vote up
package jp.co.dwango.s99

import scala.collection.mutable.ListBuffer

object P90 {
  def solve: List[List[Int]] = getPatterns(8)()
  def getPatterns(boardSize: Int)(
      col: Int = 0,
      restRow: Set[Int] = (0 until boardSize).toSet,
      limitations: List[Equation] = Nil
  ): List[List[Int]] =
    if (col == boardSize) List(Nil)
    else {
      val patterns = ListBuffer.empty[List[Int]]
      for (
        row <- restRow
        if limitations.forall(equation => !equation.check(col, row))
      ) {
        val f1 = Equation(Add(X, Y), Const(col + row)) // x + y = col + row
        val f2 = Equation(Y, Add(X, Const(row - col))) // y = x + row - col
        val restPatterns = getPatterns(boardSize)(
          col + 1,
          restRow - row,
          f1 :: f2 :: limitations
        )
        patterns ++= restPatterns.map(restPattern => row :: restPattern)
      }
      patterns.toList
    }

  sealed trait Formula {
    def calc(x: Int, y: Int): Int
  }
  case object X extends Formula {
    override def calc(x: Int, y: Int): Int = x
  }
  case object Y extends Formula {
    override def calc(x: Int, y: Int): Int = y
  }
  case class Const(i: Int) extends Formula {
    override def calc(x: Int, y: Int): Int = i
  }
  case class Add(v1: Formula, v2: Formula) extends Formula {
    override def calc(x: Int, y: Int): Int = v1.calc(x, y) + v2.calc(x, y)
  }
  case class Equation(v1: Formula, v2: Formula) {
    def check(x: Int, y: Int): Boolean = v1.calc(x, y) == v2.calc(x, y)
  }
} 
Example 109
Source File: ReaderMacros.scala    From grafter   with MIT License 5 votes vote down vote up
package org.zalando.grafter.macros

import scala.collection.mutable.ListBuffer
import scala.reflect.macros.whitebox

object ReaderMacros {

  
  def typeParameter(name: String)(c: whitebox.Context) = {
    import c.universe._
    val traverser = new Traverser {
      val types: ListBuffer[c.universe.TypeName] = new ListBuffer[TypeName]()

      override def traverse(tree: Tree): Unit = tree match {
        case New(AppliedTypeTree(Ident(TypeName(_)), typeIds)) =>
          types.appendAll(typeIds.collect {  case Ident(typeName: TypeName) => typeName })

        case _ =>
          super.traverse(tree)
      }
    }

    traverser.traverse(c.macroApplication)
    traverser.types.headOption match {
      case Some(t) => t
      case None    => c.abort(c.enclosingPosition, s"the @$name annotation requires a type parameter")
    }
  }

} 
Example 110
Source File: KMFeature.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.features

import grizzled.slf4j.Logging
import kafka.manager.model.{Kafka_0_8_1_1, ClusterConfig}

import scala.collection.mutable.ListBuffer
import scala.util.{Success, Failure, Try}



trait KMFeature

sealed trait ClusterFeature extends KMFeature

case object KMLogKafkaFeature extends ClusterFeature
case object KMDeleteTopicFeature extends ClusterFeature
case object KMJMXMetricsFeature extends ClusterFeature
case object KMDisplaySizeFeature extends ClusterFeature
case object KMPollConsumersFeature extends ClusterFeature

object ClusterFeature extends Logging {
  import scala.reflect.runtime.universe

  val runtimeMirror = universe.runtimeMirror(getClass.getClassLoader)

  def from(s: String) : Option[ClusterFeature] = {
    Try {
          val clazz = s"features.$s"
          val module = runtimeMirror.staticModule(clazz)
          val obj = runtimeMirror.reflectModule(module)
          obj.instance match {
            case f: ClusterFeature =>
              f
            case _ =>
              throw new IllegalArgumentException(s"Unknown application feature $s")
          }
        } match {
      case Failure(t) =>
        error(s"Unknown application feature $s")
        None
      case Success(f) => Option(f)
    }
  }

}

case class ClusterFeatures(features: Set[ClusterFeature])

object ClusterFeatures {
  val default = ClusterFeatures(Set())
  
  def from(clusterConfig: ClusterConfig) : ClusterFeatures = {
    val buffer = new ListBuffer[ClusterFeature]
    
    if(clusterConfig.logkafkaEnabled)
      buffer+=KMLogKafkaFeature

    if(clusterConfig.jmxEnabled)
      buffer+=KMJMXMetricsFeature

    if(clusterConfig.displaySizeEnabled)
      buffer+=KMDisplaySizeFeature
    
    if(clusterConfig.version != Kafka_0_8_1_1)
      buffer+=KMDeleteTopicFeature

    if(clusterConfig.pollConsumers)
      buffer+=KMPollConsumersFeature

    ClusterFeatures(buffer.toSet)
  }
} 
Example 111
Source File: HttpClientTestSupport.scala    From wix-http-testkit   with MIT License 5 votes vote down vote up
package com.wix.e2e.http.drivers

import java.io.DataOutputStream
import java.net.{HttpURLConnection, URL}

import akka.http.scaladsl.model.HttpMethods.GET
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.`Transfer-Encoding`
import akka.stream.scaladsl.Source
import com.wix.e2e.http.client.extractors._
import com.wix.e2e.http.info.HttpTestkitVersion
import com.wix.e2e.http.matchers.{RequestMatcher, ResponseMatcher}
import com.wix.e2e.http.{BaseUri, HttpRequest, RequestHandler}
import com.wix.test.random._

import scala.collection.immutable
import scala.collection.mutable.ListBuffer

trait HttpClientTestSupport {
  val parameter = randomStrPair
  val header = randomStrPair
  val formData = randomStrPair
  val userAgent = randomStr
  val cookie = randomStrPair
  val path = s"$randomStr/$randomStr"
  val anotherPath = s"$randomStr/$randomStr"
  val someObject = SomeCaseClass(randomStr, randomInt)

  val somePort = randomPort
  val content = randomStr
  val anotherContent = randomStr

  val requestData = ListBuffer.empty[String]


  val bigResponse = 1024 * 1024

  def issueChunkedPostRequestWith(content: String, toPath: String)(implicit baseUri: BaseUri) = {
    val serverUrl = new URL(s"http://localhost:${baseUri.port}/$toPath")
    val conn = serverUrl.openConnection.asInstanceOf[HttpURLConnection]
    conn.setRequestMethod("POST")
    conn.setRequestProperty("Content-Type", "text/plain")
    conn.setChunkedStreamingMode(0)
    conn.setDoOutput(true)
    conn.setDoInput(true)
    conn.setUseCaches(false)
    conn.connect()

    val out = new DataOutputStream(conn.getOutputStream)
    out.writeBytes(content)
    out.flush()
    out.close()
    conn.disconnect()
  }
}

object HttpClientTestResponseHandlers {
  def handlerFor(path: String, returnsBody: String): RequestHandler = {
    case r: HttpRequest if r.uri.path.toString.endsWith(path) => HttpResponse(entity = returnsBody)
  }

  def unmarshallingAndStoringHandlerFor(path: String, storeTo: ListBuffer[String]): RequestHandler = {
    case r: HttpRequest if r.uri.path.toString.endsWith(path) =>
      storeTo.append( r.extractAsString )
      HttpResponse()
  }

  def bigResponseWith(size: Int): RequestHandler = {
    case HttpRequest(GET, uri, _, _, _) if uri.path.toString().contains("big-response") =>
      HttpResponse(entity = HttpEntity(randomStrWith(size)))
  }

  def chunkedResponseFor(path: String): RequestHandler = {
    case r: HttpRequest if r.uri.path.toString.endsWith(path) =>
      HttpResponse(entity = HttpEntity.Chunked(ContentTypes.`text/plain(UTF-8)`, Source.single(randomStr)))
  }

  def alwaysRespondWith(transferEncoding: TransferEncoding, toPath: String): RequestHandler = {
    case r: HttpRequest if r.uri.path.toString.endsWith(toPath) =>
      HttpResponse().withHeaders(immutable.Seq(`Transfer-Encoding`(transferEncoding)))
  }

  val slowRespondingServer: RequestHandler = { case _ => Thread.sleep(500); HttpResponse() }
}

case class SomeCaseClass(s: String, i: Int)

object HttpClientMatchers {
  import com.wix.e2e.http.matchers.RequestMatchers._

  def haveClientHttpTestkitUserAgentWithLibraryVersion: RequestMatcher =
    haveAnyHeadersOf("User-Agent" -> s"client-http-testkit/$HttpTestkitVersion")
}

object HttpServerMatchers {
  import com.wix.e2e.http.matchers.ResponseMatchers._

  def haveServerHttpTestkitHeaderWithLibraryVersion: ResponseMatcher =
    haveAnyHeadersOf("Server" -> s"server-http-testkit/$HttpTestkitVersion")
} 
Example 112
package com.wix.e2e.http.marshaller

import com.wix.e2e.http.api.{Marshaller, NopMarshaller}
import org.specs2.mutable.Spec

import scala.collection.mutable.ListBuffer

class HttpClientMalformedMarshallerContractTest extends Spec {

  "RequestTransformers with malformed marshaller" should {
    "try to create all malformed marshallers and fallback to NopMarshaller when all is failing" in {
      Marshaller.Implicits.marshaller must beAnInstanceOf[NopMarshaller]

      MarshallerCalled.contractorsCalled must containTheSameElementsAs(Seq(classOf[MalformedCustomMarshaller], classOf[MalformedCustomMarshaller2]))
    }
  }
}

class MalformedCustomMarshaller(dummy: Int) extends BaseMalformedCustomMarshaller {
  def this() = {
    this(5)
    markConstractorCalledAndExplode
  }
}

class MalformedCustomMarshaller2(dummy: Int) extends BaseMalformedCustomMarshaller {
  def this() = {
    this(5)
    markConstractorCalledAndExplode
  }
}

abstract class BaseMalformedCustomMarshaller extends Marshaller {
  def markConstractorCalledAndExplode = {
    MarshallerCalled.markConstructorCalled(getClass)
    throw new RuntimeException("whatever")
  }

  def unmarshall[T : Manifest](jsonStr: String): T = ???
  def marshall[T](t: T): String = ???
}

object MarshallerCalled {
  private val called = ListBuffer.empty[Class[_]]

  def markConstructorCalled(clazz: Class[_]) = this.synchronized {
    called.append(clazz)
  }

  def contractorsCalled: Seq[Class[_]] = this.synchronized {
    called
  }
} 
Example 113
Source File: StubAkkaHttpMockWebServer.scala    From wix-http-testkit   with MIT License 5 votes vote down vote up
package com.wix.e2e.http.server.internals

import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import com.wix.e2e.http._
import com.wix.e2e.http.api.{AdjustableServerBehavior, MockWebServer, StubWebServer}

import scala.collection.mutable.ListBuffer

class StubAkkaHttpMockWebServer(initialHandlers: Seq[RequestHandler], specificPort: Option[Int])
  extends AkkaHttpMockWebServer(specificPort, initialHandlers)
  with StubWebServer {


  def recordedRequests: Seq[HttpRequest] = this.synchronized {
    requests.toSeq
  }

  def clearRecordedRequests() = this.synchronized {
    requests.clear()
  }

  private val requests = ListBuffer.empty[HttpRequest]

  private val SuccessfulHandler: RequestHandler = { case _ => HttpResponse(status = StatusCodes.OK) }
  private def StubServerHandlers = (currentHandlers :+ SuccessfulHandler).reduce(_ orElse _)
  private val RequestRecorderHandler: RequestHandler = { case r =>
    this.synchronized {
      requests.append(r)
    }
    StubServerHandlers.apply(r)
  }

  protected val serverBehavior = RequestRecorderHandler
}

class MockAkkaHttpWebServer(initialHandlers: Seq[RequestHandler], specificPort: Option[Int])
  extends AkkaHttpMockWebServer(specificPort, initialHandlers)
  with MockWebServer {

  private val NotFoundHandler: RequestHandler = { case _ => HttpResponse(status = StatusCodes.NotFound) }
  private def MockServerHandlers = (currentHandlers :+ NotFoundHandler).reduce(_ orElse _)
  private val AdjustableHandler: RequestHandler = { case r =>
    MockServerHandlers.apply(r)
  }

  protected val serverBehavior = AdjustableHandler
}

trait AdjustableServerBehaviorSupport extends AdjustableServerBehavior {
  private val localHandlers: ListBuffer[RequestHandler] = ListBuffer(initialHandlers:_*)

  def initialHandlers: Seq[RequestHandler]

  def currentHandlers: Seq[RequestHandler] = this.synchronized {
    localHandlers.toSeq
  }

  def appendAll(handlers: RequestHandler*) = this.synchronized {
    localHandlers.appendAll(handlers)
  }

  def replaceWith(handlers: RequestHandler*) = this.synchronized {
    localHandlers.clear()
    appendAll(handlers:_*)
  }
} 
Example 114
Source File: ColumnarShuffledHashJoinExec.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.execution

import java.util.concurrent.TimeUnit._

import com.intel.sparkColumnarPlugin.vectorized._

import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{BinaryExecNode, CodegenSupport, SparkPlan}
import org.apache.spark.sql.execution.metric.SQLMetrics

import scala.collection.JavaConverters._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector}
import scala.collection.mutable.ListBuffer
import org.apache.arrow.vector.ipc.message.ArrowFieldNode
import org.apache.arrow.vector.ipc.message.ArrowRecordBatch
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field
import org.apache.arrow.vector.types.pojo.Schema
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.gandiva.evaluator._

import io.netty.buffer.ArrowBuf
import com.google.common.collect.Lists;

import com.intel.sparkColumnarPlugin.expression._
import com.intel.sparkColumnarPlugin.vectorized.ExpressionEvaluator
import org.apache.spark.sql.execution.joins.ShuffledHashJoinExec
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide}


class ColumnarShuffledHashJoinExec(
    leftKeys: Seq[Expression],
    rightKeys: Seq[Expression],
    joinType: JoinType,
    buildSide: BuildSide,
    condition: Option[Expression],
    left: SparkPlan,
    right: SparkPlan) extends ShuffledHashJoinExec(
    leftKeys,
    rightKeys,
    joinType,
    buildSide,
    condition,
    left,
    right) {

  override lazy val metrics = Map(
    "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
    "joinTime" -> SQLMetrics.createTimingMetric(sparkContext, "join time"),
    "buildTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to build hash map"))

  override def supportsColumnar = true

  //TODO() Disable code generation
  //override def supportCodegen: Boolean = false

  override def doExecuteColumnar(): RDD[ColumnarBatch] = {
    val numOutputRows = longMetric("numOutputRows")
    val joinTime = longMetric("joinTime")
    val buildTime = longMetric("buildTime")
    val resultSchema = this.schema
    streamedPlan.executeColumnar().zipPartitions(buildPlan.executeColumnar()) { (streamIter, buildIter) =>
      //val hashed = buildHashedRelation(buildIter)
      //join(streamIter, hashed, numOutputRows)
      val vjoin = ColumnarShuffledHashJoin.create(leftKeys, rightKeys, resultSchema, joinType, buildSide, condition, left, right, buildTime, joinTime, numOutputRows)
      val vjoinResult = vjoin.columnarInnerJoin(streamIter, buildIter)
      TaskContext.get().addTaskCompletionListener[Unit](_ => {
        vjoin.close()
      })
      new CloseableColumnBatchIterator(vjoinResult)
    }
  }
} 
Example 115
Source File: ColumnarInOperator.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists
import com.google.common.collect.Sets

import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer


class ColumnarIn(value: Expression, list: Seq[Expression], original: Expression)
    extends In(value: Expression, list: Seq[Expression])
    with ColumnarExpression
    with Logging {
  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val (value_node, valueType): (TreeNode, ArrowType) =
      value.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)

    val resultType = new ArrowType.Bool()

    if (value.dataType == StringType) {
      val newlist :List[String]= list.toList.map (expr => {
        expr.asInstanceOf[Literal].value.toString
      });
      val tlist = Lists.newArrayList(newlist:_*);

      val funcNode = TreeBuilder.makeInExpressionString(value_node, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else if (value.dataType == IntegerType) {
      val newlist :List[Integer]= list.toList.map (expr => {
        expr.asInstanceOf[Literal].value.asInstanceOf[Integer]
      });
      val tlist = Lists.newArrayList(newlist:_*);

      val funcNode = TreeBuilder.makeInExpressionInt32(value_node, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else if (value.dataType == LongType) {
      val newlist :List[java.lang.Long]= list.toList.map (expr => {
        expr.asInstanceOf[Literal].value.asInstanceOf[java.lang.Long]
      });
      val tlist = Lists.newArrayList(newlist:_*);

      val funcNode = TreeBuilder.makeInExpressionBigInt(value_node, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else if (value.dataType == DateType) {
      val newlist :List[Integer]= list.toList.map (expr => {
        expr.asInstanceOf[Literal].value.asInstanceOf[Integer]
      });
      val tlist = Lists.newArrayList(newlist:_*);
      val cast_func = TreeBuilder.makeFunction("castINT", Lists.newArrayList(value_node), new ArrowType.Int(32, true))

      val funcNode = TreeBuilder.makeInExpressionInt32(cast_func, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else {
      throw new UnsupportedOperationException(s"not currently supported: ${value.dataType}.")
    }
  }
}

object ColumnarInOperator {

  def create(value: Expression, list: Seq[Expression], original: Expression): Expression = original match {
    case i: In =>
      new ColumnarIn(value, list, i)
    case other =>
      throw new UnsupportedOperationException(s"not currently supported: $other.")
  }
} 
Example 116
Source File: ColumnarCoalesceOperator.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists
import com.google.common.collect.Sets
import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer



//TODO(): coalesce(null, null, null) => null

class ColumnarCoalesce(exps: Seq[Expression], original: Expression)
    extends Coalesce(exps: Seq[Expression])
    with ColumnarExpression
    with Logging {
  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val iter: Iterator[Expression] = exps.iterator
    val exp = exps.head

    val (exp_node, expType): (TreeNode, ArrowType) =
      exp.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
    val isnotnullNode =
      TreeBuilder.makeFunction("isnotnull", Lists.newArrayList(exp_node), new ArrowType.Bool())

    val funcNode = TreeBuilder.makeIf(isnotnullNode, exp_node, innerIf(args, exps, iter), expType)
    (funcNode, expType)
  }

  def innerIf(args: java.lang.Object, exps: Seq[Expression], iter: Iterator[Expression]): TreeNode = {
    if (!iter.hasNext) {
      // Return the last element no matter if it is null
      val (exp_node, expType): (TreeNode, ArrowType) =
        exps.last.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
      val isnotnullNode =
        TreeBuilder.makeFunction("isnotnull", Lists.newArrayList(exp_node), new ArrowType.Bool())
      val funcNode = TreeBuilder.makeIf(isnotnullNode, exp_node, exp_node, expType)
      funcNode
    } else {
      val exp = iter.next()
      val (exp_node, expType): (TreeNode, ArrowType) =
        exp.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
      val isnotnullNode =
        TreeBuilder.makeFunction("isnotnull", Lists.newArrayList(exp_node), new ArrowType.Bool())
      val funcNode = TreeBuilder.makeIf(isnotnullNode, exp_node, innerIf(args, exps, iter), expType)
      funcNode
    }
  }
}

object ColumnarCoalesceOperator {

  def create(exps: Seq[Expression], original: Expression): Expression = original match {
    case c: Coalesce =>
      new ColumnarCoalesce(exps, original)
    case other =>
      throw new UnsupportedOperationException(s"not currently supported: $other.")
  }
} 
Example 117
Source File: ColumnarNamedExpressions.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer

class ColumnarAlias(child: Expression, name: String)(
    override val exprId: ExprId,
    override val qualifier: Seq[String],
    override val explicitMetadata: Option[Metadata])
    extends Alias(child, name)(exprId, qualifier, explicitMetadata)
    with ColumnarExpression {

  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    child.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
  }

}

class ColumnarAttributeReference(
    name: String,
    dataType: DataType,
    nullable: Boolean = true,
    override val metadata: Metadata = Metadata.empty)(
    override val exprId: ExprId,
    override val qualifier: Seq[String])
    extends AttributeReference(name, dataType, nullable, metadata)(exprId, qualifier)
    with ColumnarExpression {

  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val resultType = CodeGeneration.getResultType(dataType)
    val field = Field.nullable(s"${name}#${exprId.id}", resultType)
    (TreeBuilder.makeField(field), resultType)
  }

} 
Example 118
Source File: ColumnarTernaryOperator.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists

import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer


class ColumnarSubString(str: Expression, pos: Expression, len: Expression, original: Expression)
    extends Substring(str: Expression, pos: Expression, len: Expression)
    with ColumnarExpression
    with Logging {
  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val (str_node, strType): (TreeNode, ArrowType) =
      str.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
    val (pos_node, posType): (TreeNode, ArrowType) =
      pos.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
    val (len_node, lenType): (TreeNode, ArrowType) =
      len.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)

    //FIXME(): gandiva only support pos and len with int64 type
    val lit_pos :ColumnarLiteral= pos.asInstanceOf[ColumnarLiteral]
    val lit_pos_val = lit_pos.value
    val long_pos_node = TreeBuilder.makeLiteral(lit_pos_val.asInstanceOf[Integer].longValue() :java.lang.Long)

    val lit_len :ColumnarLiteral= len.asInstanceOf[ColumnarLiteral]
    val lit_len_val = lit_len.value
    val long_len_node = TreeBuilder.makeLiteral(lit_len_val.asInstanceOf[Integer].longValue() :java.lang.Long)

    val resultType = new ArrowType.Utf8()
    val funcNode =
      TreeBuilder.makeFunction("substr", Lists.newArrayList(str_node, long_pos_node, long_len_node), resultType)
    (funcNode, resultType)
  }
}

object ColumnarTernaryOperator {

  def create(str: Expression, pos: Expression, len: Expression, original: Expression): Expression = original match {
    case ss: Substring =>
      new ColumnarSubString(str, pos, len, ss)
    case other =>
      throw new UnsupportedOperationException(s"not currently supported: $other.")
  }
} 
Example 119
Source File: ColumnarInSetOperator.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists
import com.google.common.collect.Sets

import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer

class ColumnarInSet(value: Expression, hset: Set[Any], original: Expression)
    extends InSet(value: Expression, hset: Set[Any])
    with ColumnarExpression
    with Logging {
  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val (value_node, valueType): (TreeNode, ArrowType) =
      value.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)

    val resultType = new ArrowType.Bool()
    if (value.dataType == StringType) {
      val newlist = hset.toList.map (expr => {
        expr.toString
      });
      val tlist = Lists.newArrayList(newlist:_*);
      val funcNode = TreeBuilder.makeInExpressionString(value_node, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else if (value.dataType == IntegerType) {
      val newlist = hset.toList.map (expr => {
        expr.asInstanceOf[Integer]
      });
      val tlist = Lists.newArrayList(newlist:_*);
      val funcNode = TreeBuilder.makeInExpressionInt32(value_node: TreeNode, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else if (value.dataType == LongType) {
      val newlist = hset.toList.map (expr => {
        expr.asInstanceOf[java.lang.Long]
      });
      val tlist = Lists.newArrayList(newlist:_*);
      val funcNode = TreeBuilder.makeInExpressionBigInt(value_node, Sets.newHashSet(tlist))
      (funcNode, resultType)
    } else {
      throw new UnsupportedOperationException(s"not currently supported: ${value.dataType}.")
    }
  }
}

object ColumnarInSetOperator {

  def create(value: Expression, hset: Set[Any], original: Expression): Expression = original match {
    case i: InSet =>
      new ColumnarInSet(value, hset, i)
    case other =>
      throw new UnsupportedOperationException(s"not currently supported: $other.")
  }
} 
Example 120
Source File: ColumnarLiterals.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists
import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field
import org.apache.arrow.vector.types.DateUnit

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer

class ColumnarLiteral(lit: Literal)
    extends Literal(lit.value, lit.dataType)
    with ColumnarExpression {

  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val resultType = CodeGeneration.getResultType(dataType)
    dataType match {
      case t: StringType =>
        (TreeBuilder.makeStringLiteral(value.toString().asInstanceOf[String]), resultType)
      case t: IntegerType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[Integer]), resultType)
      case t: LongType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Long]), resultType)
      case t: DoubleType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Double]), resultType)
      case d: DecimalType =>
        val v = value.asInstanceOf[Decimal]
        (TreeBuilder.makeDecimalLiteral(v.toString, v.precision, v.scale), resultType)
      case d: DateType =>
        val origIntNode = TreeBuilder.makeLiteral(value.asInstanceOf[Integer])
        val dateNode = TreeBuilder.makeFunction("castDATE", Lists.newArrayList(origIntNode), new ArrowType.Date(DateUnit.DAY))
        (dateNode, new ArrowType.Date(DateUnit.DAY))
      case b: BooleanType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Boolean]), resultType)
    }
  }
} 
Example 121
Source File: ColumnarCaseWhenOperator.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists
import com.google.common.collect.Sets

import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer


class ColumnarCaseWhen(branches: Seq[(Expression, Expression)], elseValue: Option[Expression], original: Expression)
    extends CaseWhen(branches: Seq[(Expression, Expression)] ,elseValue: Option[Expression])
    with ColumnarExpression
    with Logging {
  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    logInfo(s"children: ${branches.flatMap(b => b._1 :: b._2 :: Nil) ++ elseValue}")
    logInfo(s"branches: $branches")
    logInfo(s"else: $elseValue")
    var i = 0
    val size = branches.size
    //TODO(): handle leveled branches

    val exprs = branches.flatMap(b => b._1 :: b._2 :: Nil) ++ elseValue
    val exprList = { exprs.filter(expr => !expr.isInstanceOf[Literal]) }
    val inputAttributes = exprList.toList.zipWithIndex.map{case (expr, i) =>
      ConverterUtils.getResultAttrFromExpr(expr)
    }

    var colCondExpr = branches(i)._1
    val (cond_node, condType): (TreeNode, ArrowType) =
      colCondExpr.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)

    var colRetExpr = branches(i)._2
    if (colRetExpr.isInstanceOf[AttributeReference]) {
      colRetExpr = new ColumnarBoundReference(inputAttributes.indexOf(colRetExpr),
                                              colRetExpr.dataType, colRetExpr.nullable)
    }
    val (ret_node, retType): (TreeNode, ArrowType) =
      colRetExpr.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)

    var else_node :TreeNode= null
    if (elseValue.isDefined) {
      val elseValueExpr = elseValue.getOrElse(null)
      var colElseValueExpr = ColumnarExpressionConverter.replaceWithColumnarExpression(elseValueExpr)
      if (colElseValueExpr.isInstanceOf[AttributeReference]) {
        colElseValueExpr = new ColumnarBoundReference(inputAttributes.indexOf(colElseValueExpr),
                                                      colElseValueExpr.dataType, colElseValueExpr.nullable)
      }
      val (else_node_, elseType): (TreeNode, ArrowType) =
        colElseValueExpr.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
      else_node = else_node_
    }

    val funcNode = TreeBuilder.makeIf(cond_node, ret_node, else_node, retType)
    (funcNode, retType)

  }
}

object ColumnarCaseWhenOperator {

  def create(branches: Seq[(Expression, Expression)], elseValue: Option[Expression],
             original: Expression): Expression = original match {
    case i: CaseWhen =>
      new ColumnarCaseWhen(branches, elseValue, i)
    case other =>
      throw new UnsupportedOperationException(s"not currently supported: $other.")
  }
} 
Example 122
Source File: ColumnarBoundAttribute.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import com.google.common.collect.Lists
import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector}

import scala.collection.mutable.ListBuffer

class ColumnarBoundReference(ordinal: Int, dataType: DataType, nullable: Boolean)
    extends BoundReference(ordinal, dataType, nullable)
    with ColumnarExpression with Logging {

  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val resultType = CodeGeneration.getResultType(dataType)
    val field = Field.nullable(s"c_$ordinal", resultType)
    val fieldTypes = args.asInstanceOf[java.util.List[Field]]
    fieldTypes.add(field)
    (TreeBuilder.makeField(field), resultType)
  }

} 
Example 123
Source File: ColumnarSubquery.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package com.intel.sparkColumnarPlugin.expression

import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.{expressions, InternalRow}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.execution.BaseSubqueryExec
import org.apache.spark.sql.execution.ExecSubqueryExpression
import org.apache.spark.sql.execution.ScalarSubquery
import org.apache.spark.sql.types._

import scala.collection.mutable.ListBuffer

class ColumnarScalarSubquery(
  query: ScalarSubquery)
  extends Expression with ColumnarExpression {

  override def dataType: DataType = query.dataType
  override def children: Seq[Expression] = Nil
  override def nullable: Boolean = true
  override def toString: String = query.toString
  override def eval(input: InternalRow): Any = query.eval(input)
  override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = query.doGenCode(ctx, ev)
  override def canEqual(that: Any): Boolean = query.canEqual(that)
  override def productArity: Int = query.productArity
  override def productElement(n: Int): Any = query.productElement(n)
  override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
    val value = query.eval(null)
    val resultType = CodeGeneration.getResultType(query.dataType)
    query.dataType match {
      case t: StringType =>
        (TreeBuilder.makeStringLiteral(value.toString().asInstanceOf[String]), resultType)
      case t: IntegerType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[Integer]), resultType)
      case t: LongType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Long]), resultType)
      case t: DoubleType =>
        (TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Double]), resultType)
      case d: DecimalType =>
        val v = value.asInstanceOf[Decimal]
        (TreeBuilder.makeDecimalLiteral(v.toString, v.precision, v.scale), resultType)
      case d: DateType =>
        throw new UnsupportedOperationException(s"DateType is not supported yet.")
    }
  }
} 
Example 124
Source File: KerasParser.scala    From jigg   with Apache License 2.0 5 votes vote down vote up
package jigg.ml.keras


  private val tagset:Map[Int, String] = Map(0 -> "B", 1 -> "I", 2 -> "O")

  def parsing(str: String): Array[(Int, Int)] = {
    // For dummy input to indicate boundaries of sentence.
    val s = "\n" + str + "\n\n"
    val inputData = table.encodeCharacter(s)
    val outputData = model.convert(inputData)

    val tags = for {
      i <- 1 until outputData.rows - 2
      maxID = argmax(outputData(i, ::))
    } yield maxID

    getOffsets(tags.toArray)
  }

  def parsing(tokens: Node): Array[Array[String]] = {
    // For dummy input to indicate boundaries of sentence.
    val words = Array("\n").union(
      (tokens \\ "tokens").flatMap(x => x \\ "@lemma").toArray.map(x => x.toString)).union(Array("\n\n"))
    val ids = (tokens \\ "tokens").flatMap(x => x \\ "@id").toArray.map(x => x.toString)

    val inputData = table.encodeWords(words)
    val outputData = model.convert(inputData)

    val tags = for {
      i <- 1 until outputData.rows - 2
      maxID = argmax(outputData(i, ::))
    } yield maxID

    val ranges = getOffsets(tags.toArray)

    ranges.map(x => ids.slice(x._1, x._2))
  }

  def getOffsets(data: Array[Int]): Array[(Int, Int)]= {
    val ranges = ArrayBuffer[(Int, Int)]()
    var bpos = -1

    for(i <- data.indices){
      tagset(data(i)) match{
        case "B" =>
          if(bpos >= 0)
            ranges += ((bpos, i))
          bpos = i
        case "I" if i == 0 || bpos == -2 =>
          bpos = i
        case "O" =>
          if (bpos >= 0)
            ranges += ((bpos, i))
          bpos = -2
        case _ if i == data.indices.last =>
          ranges += ((bpos, i + 1))
        case _ =>
      }
    }
    ranges.toArray
  }
} 
Example 125
Source File: KernelOutputStream.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.stream

import java.io.OutputStream
import java.nio.charset.Charset

import org.apache.toree.kernel.protocol.v5.content.StreamContent
import org.apache.toree.kernel.protocol.v5.{SystemActorType, MessageType, KMBuilder}
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.utils.{LogLike, ScheduledTaskManager}
import scala.collection.mutable.ListBuffer
import KernelOutputStream._

object KernelOutputStream {
  val DefaultStreamType = "stdout"
  val DefaultSendEmptyOutput = false
}


  override def write(b: Int): Unit = internalBytes.synchronized {
    // Begin periodic flushing if this is a new set of bytes
    enableAutoFlush()

    internalBytes += b.toByte
  }
} 
Example 126
Source File: KernelInputStream.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.stream

import java.io.InputStream
import java.nio.charset.Charset

import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.content.InputRequest
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.kernel.Utilities.timeout
import org.apache.toree.kernel.protocol.v5.{KMBuilder, MessageType}

import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, Future}

import KernelInputStream._

object KernelInputStream {
  val DefaultPrompt = ""
  val DefaultPassword = false
}


  override def read(): Int = {
    if (!this.hasByte) this.requestBytes()

    this.nextByte()
  }

  private def hasByte: Boolean = internalBytes.nonEmpty

  private def nextByte(): Int = {
    val byte = internalBytes.head

    internalBytes = internalBytes.tail

    byte
  }

  private def requestBytes(): Unit = {
    val inputRequest = InputRequest(prompt, password)
    // NOTE: Assuming already provided parent header and correct ids
    val kernelMessage = kmBuilder
      .withHeader(MessageType.Outgoing.InputRequest)
      .withContentString(inputRequest)
      .build

    // NOTE: The same handler is being used in both request and reply
    val responseFuture: Future[String] =
      (actorLoader.load(MessageType.Incoming.InputReply) ? kernelMessage)
      .mapTo[String]

    // Block until we get a response
    import scala.concurrent.duration._
    internalBytes ++=
      Await.result(responseFuture, Duration.Inf).getBytes(EncodingType)
  }
} 
Example 127
Source File: CustomSinkSuite.scala    From spark-structured-streaming-ml   with Apache License 2.0 5 votes vote down vote up
package com.highperformancespark.examples.structuredstreaming

import com.holdenkarau.spark.testing.DataFrameSuiteBase

import scala.collection.mutable.ListBuffer

import org.scalatest.FunSuite

import org.apache.spark._
import org.apache.spark.sql.{Dataset, DataFrame, Encoder, SQLContext}
import org.apache.spark.sql.execution.streaming.MemoryStream

class CustomSinkSuite extends FunSuite with DataFrameSuiteBase {

  test("really simple test of the custom sink") {
    import spark.implicits._
    val input = MemoryStream[String]
    val doubled = input.toDS().map(x => x + " " + x)
    val formatName = ("com.highperformancespark.examples" +
      "structuredstreaming.CustomSinkCollectorProvider")
    val query = doubled.writeStream
      .queryName("testCustomSinkBasic")
      .format(formatName)
      .start()
    val inputData = List("hi", "holden", "bye", "pandas")
    input.addData(inputData)
    assert(query.isActive === true)
    query.processAllAvailable()
    assert(query.exception === None)
    assert(Pandas.results(0) === inputData.map(x => x + " " + x))
  }
}

object Pandas{
  val results = new ListBuffer[Seq[String]]()
}

class CustomSinkCollectorProvider extends ForeachDatasetSinkProvider {
  override def func(df: DataFrame) {
    val spark = df.sparkSession
    import spark.implicits._
    Pandas.results += df.as[String].rdd.collect()
  }
} 
Example 128
Source File: package.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core

import cats._
import cats.effect.Bracket
import cats.implicits._
import fs2.Pipe
import scala.collection.mutable.ListBuffer

package object util {
  final type Nel[+A] = cats.data.NonEmptyList[A]
  final val Nel = cats.data.NonEmptyList

  type ApplicativeThrowable[F[_]] = ApplicativeError[F, Throwable]

  type MonadThrowable[F[_]] = MonadError[F, Throwable]

  type BracketThrowable[F[_]] = Bracket[F, Throwable]

  
  def takeUntil[F[_], A, N](limit: N)(weight: A => N)(implicit N: Numeric[N]): Pipe[F, A, A] = {
    import N._
    _.map(a => (a, weight(a)))
      .scan1[(A, N)] { case ((_, total), (a, i)) => (a, total + i) }
      .takeThrough { case (_, total) => total < limit }
      .map { case (a, _) => a }
  }
} 
Example 129
Source File: process.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.io

import cats.effect._
import cats.implicits._
import fs2.Stream
import java.io.{File, IOException, InputStream}
import org.scalasteward.core.util._
import scala.collection.mutable.ListBuffer
import scala.concurrent.TimeoutException
import scala.concurrent.duration.FiniteDuration

object process {
  def slurp[F[_]](
      cmd: Nel[String],
      cwd: Option[File],
      extraEnv: Map[String, String],
      timeout: FiniteDuration,
      log: String => F[Unit],
      blocker: Blocker
  )(implicit contextShift: ContextShift[F], timer: Timer[F], F: Concurrent[F]): F[List[String]] =
    createProcess(cmd, cwd, extraEnv).flatMap { process =>
      F.delay(new ListBuffer[String]).flatMap { buffer =>
        val readOut = {
          val out = readInputStream[F](process.getInputStream, blocker)
          out.evalMap(line => F.delay(appendBounded(buffer, line, 4096)) >> log(line)).compile.drain
        }

        val showCmd = (extraEnv.map { case (k, v) => s"$k=$v" }.toList ++ cmd.toList).mkString_(" ")
        val result = readOut >> F.delay(process.waitFor()) >>= { exitValue =>
          if (exitValue === 0) F.pure(buffer.toList)
          else {
            val msg = s"'$showCmd' exited with code $exitValue"
            F.raiseError[List[String]](new IOException(makeMessage(msg, buffer.toList)))
          }
        }

        val fallback = F.delay(process.destroyForcibly()) >> {
          val msg = s"'$showCmd' timed out after ${timeout.toString}"
          F.raiseError[List[String]](new TimeoutException(makeMessage(msg, buffer.toList)))
        }

        Concurrent.timeoutTo(result, timeout, fallback)
      }
    }

  private def createProcess[F[_]](
      cmd: Nel[String],
      cwd: Option[File],
      extraEnv: Map[String, String]
  )(implicit F: Sync[F]): F[Process] =
    F.delay {
      val pb = new ProcessBuilder(cmd.toList: _*)
      val env = pb.environment()
      cwd.foreach(pb.directory)
      extraEnv.foreach { case (key, value) => env.put(key, value) }
      pb.redirectErrorStream(true)
      pb.start()
    }

  private def readInputStream[F[_]](is: InputStream, blocker: Blocker)(implicit
      F: Sync[F],
      cs: ContextShift[F]
  ): Stream[F, String] =
    fs2.io
      .readInputStream(F.pure(is), chunkSize = 4096, blocker)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)

  private def makeMessage(prefix: String, output: List[String]): String =
    (prefix :: output).mkString("\n")
} 
Example 130
Source File: utilTest.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.util

import cats.implicits._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import scala.collection.mutable.ListBuffer

class utilTest extends AnyFunSuite with Matchers with ScalaCheckPropertyChecks {
  test("appendBounded") {
    val lb = new ListBuffer[Int]
    lb.appendAll(List(1, 2, 3))

    appendBounded(lb, 4, 4)
    lb.toList shouldBe List(1, 2, 3, 4)

    appendBounded(lb, 5, 4)
    lb.toList shouldBe List(3, 4, 5)

    appendBounded(lb, 6, 4)
    lb.toList shouldBe List(3, 4, 5, 6)

    appendBounded(lb, 7, 6)
    lb.toList shouldBe List(3, 4, 5, 6, 7)

    appendBounded(lb, 8, 6)
    lb.toList shouldBe List(3, 4, 5, 6, 7, 8)

    appendBounded(lb, 9, 6)
    lb.toList shouldBe List(6, 7, 8, 9)
  }

  test("bindUntilTrue: empty list") {
    bindUntilTrue(List.empty[Option[Boolean]]) shouldBe Some(false)
  }

  test("intersects") {
    intersects(List(1, 3, 5), Vector(2, 4, 6)) shouldBe false
    intersects(List(1, 3, 5), Vector(2, 3, 6)) shouldBe true
  }
} 
Example 131
Source File: ScribeMacros.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe

import scala.annotation.compileTimeOnly
import scala.collection.mutable.ListBuffer
import scala.language.experimental.macros
import scala.reflect.macros.blackbox

@compileTimeOnly("Enable macros to expand")
object ScribeMacros {
  def formatter(c: blackbox.Context)(args: c.Tree*): c.Tree = {
    import c.universe._

    c.prefix.tree match {
      case Apply(_, List(Apply(_, rawParts))) => {
        val parts = rawParts map { case t @ Literal(Constant(const: String)) => (const, t.pos) }
        val list = ListBuffer.empty[c.Tree]
        val argsVector = args.toVector
        parts.zipWithIndex.foreach {
          case ((raw, _), index) => {
            if (raw.nonEmpty) {
              list += q"_root_.scribe.format.FormatBlock.RawString($raw)"
            }
            if (index < argsVector.size) {
              list += argsVector(index)
            }
          }
        }
        q"_root_.scribe.format.Formatter.fromBlocks(..$list)"
      }
      case _ => c.abort(c.enclosingPosition, "Bad usage of formatter interpolation.")
    }
  }
} 
Example 132
Source File: explore_movies.scala    From Machine-Learning-with-Spark-Second-Edition   with MIT License 5 votes vote down vote up
package org.sparksamples.exploredataset

import breeze.linalg.CSCMatrix
import org.apache.spark.SparkContext
import org.sparksamples.Util
import org.apache.spark.mllib.feature.Word2Vec
import scala.collection.mutable.ListBuffer


object explore_movies {

  def processRegex(input:String):String= {
    val pattern = "^[^(]*".r
    val output = pattern.findFirstIn(input)
    return output.get

  }

  def main(args: Array[String]) {
    val sc = new SparkContext("local[2]", "Explore Users in Movie Dataset")

    val raw_title = org.sparksamples.Util.getMovieDataDF().select("name")
    raw_title.show()

    raw_title.createOrReplaceTempView("titles")
    Util.spark.udf.register("processRegex", processRegex _)
    val processed_titles = Util.spark.sql("select processRegex(name) from titles")
    processed_titles.show()
    val titles_rdd = processed_titles.rdd.map(r => r(0).toString)
    val y = titles_rdd.take(5)
    titles_rdd.take(5).foreach(println)
    println(titles_rdd.first())

    //val title_terms = null
    val title_terms = titles_rdd.map(x => x.split(" "))
    title_terms.take(5).foreach(_.foreach(println))
    println(title_terms.count())

    val all_terms_dic = new ListBuffer[String]()
    val all_terms = title_terms.flatMap(title_terms => title_terms).distinct().collect()
    for (term <- all_terms){
      all_terms_dic += term
    }

    println(all_terms_dic.length)
    println(all_terms_dic.indexOf("Dead"))
    println(all_terms_dic.indexOf("Rooms"))

    val all_terms_withZip = title_terms.flatMap(title_terms => title_terms).distinct().zipWithIndex().collectAsMap()
    println(all_terms_withZip.get("Dead"))
    println(all_terms_withZip.get("Rooms"))

    val word2vec = new Word2Vec()
    val rdd_terms = titles_rdd.map(title => title.split(" ").toSeq)
    val model = word2vec.fit(rdd_terms)
    println(model.findSynonyms("Dead", 40))

    val term_vectors = title_terms.map(title_terms => create_vector(title_terms, all_terms_dic))
    term_vectors.take(5).foreach(println)

    sc.stop()
  }

  def create_vector(title_terms:Array[String], all_terms_dic:ListBuffer[String]): CSCMatrix[Int] = {
    var idx = 0
    val x = CSCMatrix.zeros[Int](1, all_terms_dic.length)
    title_terms.foreach(i => {
      if (all_terms_dic.contains(i)) {
        idx = all_terms_dic.indexOf(i)
        x.update(0, idx, 1)
      }
    })
    return x
  }

  def convert(year:String): String = {
    try{
      val mod_year = year.substring(year.length - 4,year.length)
      return mod_year
    }catch {
      case e : Exception => return "1900"
    }
  }

} 
Example 133
import org.apache.spark.SparkContext
import org.apache.spark.mllib.fpm.FPGrowth
import org.apache.spark.mllib.recommendation.Rating

import scala.collection.mutable.ListBuffer


    val rawRatings = rawData.map(_.split("\t").take(3))
    rawRatings.first()
    // 14/03/30 13:22:44 INFO SparkContext: Job finished: first at <console>:21, took 0.003703 s
    // res25: Array[String] = Array(196, 242, 3)

    val ratings = rawRatings.map { case Array(user, movie, rating) => Rating(user.toInt, movie.toInt, rating.toDouble) }
    val ratingsFirst = ratings.first()
    println(ratingsFirst)

    val userId = 789
    val K = 10

    val movies = sc.textFile(PATH + "/ml-100k/u.item")
    val titles = movies.map(line => line.split("\\|").take(2)).map(array => (array(0).toInt, array(1))).collectAsMap()
    titles(123)

    var eRDD = sc.emptyRDD
    var z = Seq[String]()

    val l = ListBuffer()
    val aj = new Array[String](100)
    var i = 0
    for( a <- 801 to 900) {
      val moviesForUserX = ratings.keyBy(_.user).lookup(a)
      val moviesForUserX_10 = moviesForUserX.sortBy(-_.rating).take(10)
      val moviesForUserX_10_1 = moviesForUserX_10.map(r => r.product)
      var temp = ""
      for( x <- moviesForUserX_10_1){
        temp = temp + " " + x
        println(temp)

      }

      aj(i) = temp
      i += 1
    }
    z = aj
    val transaction2 = z.map(_.split(" "))

    val rddx = sc.parallelize(transaction2, 2).cache()

    val fpg = new FPGrowth()
    val model6 = fpg
      .setMinSupport(0.1)
      .setNumPartitions(1)
      .run(rddx)

    model6.freqItemsets.collect().foreach { itemset =>
      println(itemset.items.mkString("[", ",", "]") + ", " + itemset.freq)
    }
    sc.stop()
  }

} 
Example 134
package com.sparksample

import org.apache.spark.mllib.fpm.FPGrowth
import org.apache.spark.mllib.recommendation.Rating

import scala.collection.mutable.ListBuffer


    val rawRatings = rawData.map(_.split("\t").take(3))
    rawRatings.first()
    val ratings = rawRatings.map { case Array(user, movie, rating) => Rating(user.toInt, movie.toInt, rating.toDouble) }
    val ratingsFirst = ratings.first()
    println(ratingsFirst)

    val movies = Util.getMovieData()
    val titles = movies.map(line => line.split("\\|").take(2)).map(array => (array(0).toInt, array(1))).collectAsMap()
    titles(123)

    var eRDD = sc.emptyRDD
    var z = Seq[String]()

    val l = ListBuffer()
    val aj = new Array[String](400)
    var i = 0
    for( a <- 501 to 900) {
      val moviesForUserX = ratings.keyBy(_.user).lookup(a)
      val moviesForUserX_10 = moviesForUserX.sortBy(-_.rating).take(10)
      val moviesForUserX_10_1 = moviesForUserX_10.map(r => r.product)
      var temp = ""
      for( x <- moviesForUserX_10_1){
        if(temp.equals(""))
          temp = x.toString
        else {
          temp =  temp + " " + x
        }
      }

      aj(i) = temp
      i += 1
    }
    z = aj

    val transaction = z.map(_.split(" "))
    val rddx = sc.parallelize(transaction, 2).cache()

    val fpg = new FPGrowth()
    val model = fpg
      .setMinSupport(0.1)
      .setNumPartitions(1)
      .run(rddx)

    model.freqItemsets.collect().foreach { itemset =>
      println(itemset.items.mkString("[", ",", "]") + ", " + itemset.freq)
    }
    sc.stop()
  }

} 
Example 135
package org.sparksamples.linearregression

import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object LinearRegression{
  def main(args: Array[String]) {
    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m =  Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val first_point = data.first()
    println("Linear Model feature vector:" + first_point.features.toString)
    println("Linear Model feature vector length: " + first_point.features.size)

    val iterations = 10
    val step = 0.025
    val intercept =true

    //LinearRegressionWithSGD.tr
    val linear_model = LinearRegressionWithSGD.train(data, iterations, step)
    val x = linear_model.predict(data.first().features)
    val true_vs_predicted = data.map(p => (p.label, linear_model.predict(p.features)))
    val true_vs_predicted_csv = data.map(p => p.label + " ,"  + linear_model.predict(p.features))
    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    val save = true
    if (save){
      true_vs_predicted_csv.saveAsTextFile("./output/linear_model_" + date + ".csv")
    }
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }

    Util.calculatePrintMetrics(true_vs_predicted, "LinearRegressioWithSGD")

  }

} 
Example 136
package org.sparksamples.linearregression

import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object LinearRegressionWithLog{



  def main(args: Array[String]) {

    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m =  Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Math.log(Util.extractLabel(r)), Util.extractFeatures(r, catLen, mappings)))
    }
    val first_point = data.first()
    println("Linear Model feature vector:" + first_point.features.toString)
    println("Linear Model feature vector length: " + first_point.features.size)

    val iterations = 10
    //val step = 0.2
    val step = 0.025
    val intercept =true

    //LinearRegressionWithSGD.tr
    val linear_model = LinearRegressionWithSGD.train(data, iterations, step)
    val x = linear_model.predict(data.first().features)
    val true_vs_predicted = data.map(p => (Math.exp(p.label), Math.exp(linear_model.predict(p.features))))
    val true_vs_predicted_csv = data.map(p => p.label + " ,"  + linear_model.predict(p.features))
    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    val save = false
    if (save){
      true_vs_predicted_csv.saveAsTextFile("./output/linear_model_" + date + ".csv")
    }
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }

    Util.calculatePrintMetrics(true_vs_predicted, "LinearRegressioWithSGD Log")

  }

} 
Example 137
package org.sparksamples.decisiontree

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.rdd.RDD
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object DecisionTreeUtil {

  def getTrainTestData(): (RDD[LabeledPoint], RDD[LabeledPoint]) = {
    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val data_dt = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extract_features_dt(r)))
    }

    val splits = data_dt.randomSplit(Array(0.8, 0.2), seed = 11L)
    val training = splits(0).cache()
    val test = splits(1)
    return (training, test)
  }

  def evaluate(train: RDD[LabeledPoint],test: RDD[LabeledPoint],
               categoricalFeaturesInfo: scala.Predef.Map[Int, Int],
                maxDepth :Int, maxBins: Int): Double = {
    val impurity = "variance"
    val decisionTreeModel = DecisionTree.trainRegressor(train, categoricalFeaturesInfo,
      impurity,maxDepth, maxBins )

    val true_vs_predicted = test.map(p => (p.label, decisionTreeModel.predict(p.features)))
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())
    return rmsle
  }

} 
Example 138
package org.sparksamples.decisiontree

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.rdd.RDD
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object DecisionTreeCategoricalFeaturesApp{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {
    val save = true
    //val sc = new SparkContext("local[2]", "First Spark App")
    val sc = Util.sc

    // we take the raw data in CSV format and convert it into a set of records
    // of the form (user, product, price)
    val rawData = sc.textFile("../data/hour_noheader.csv")
    val numData = rawData.count()

    val records = rawData.map(line => line.split(","))
    val first = records.first()

    println(numData.toInt)
    records.cache()
    print("Mapping of first categorical feature column: " +  get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    println("Feature vector length for categorical features:"+ catLen)
    println("Feature vector length for numerical features:" + numLen)
    println("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val data_dt = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extract_features_dt(r)))
    }
    val first_point = data_dt.first()
    println("Decision Tree feature vector:" + first_point.features.toString)
    println("Decision Tree feature vector length: " + first_point.features.size)


    def getCatFeatures(): scala.Predef.Map[Int, Int] = {

      var d = scala.Predef.Map[Int, Int]()

      for(a <- 2 until 10){
        d += (a-2 -> (get_mapping(records, a).size + 1))
        //d.put(a-2,get_mapping(records, a).size + 1)
      }
      return d

    }
    val cat_features = getCatFeatures()
    //dict([(i - 2, len(get_mapping(records, i)) + 1) for i in range(2,10)])

    //val categoricalFeaturesInfo = scala.Predef.Map[Int, Int]()
    val impurity = "variance"
    val maxDepth = 5
    val maxBins = 32
    val decisionTreeModel= DecisionTree.trainRegressor(data_dt, cat_features,  impurity, maxDepth, maxBins)
    //val decisionTreeModel = DecisionTree.trainRegressor(data_dt, categoricalFeaturesInfo,
    //  impurity, maxDepth, maxBins )

    val preds = decisionTreeModel.predict(data_dt.map( p=> p.features))
    val actual = data.map( p=> p.label)
    val true_vs_predicted_dt = actual.zip(preds)
    val true_vs_predicted_csv = data.map(p => p.label + " ,"  + decisionTreeModel.predict(p.features))

    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    if (save){
      true_vs_predicted_csv.saveAsTextFile("./output/decision_tree_categorical_" + date + ".csv")
    }

    print("Decision Tree depth: " + decisionTreeModel.depth)
    print("Decision Tree number of nodes: " + decisionTreeModel.numNodes)
    Util.calculatePrintMetrics(true_vs_predicted_dt, "Decision Tree Categorical Features")
  }

} 
Example 139
package org.sparksamples.decisiontree

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.rdd.RDD
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object DecisionTreeWithLog{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {
    val save = false
    val sc = Util.sc

    // we take the raw data in CSV format and convert it into a set of records
    // of the form (user, product, price)
    val rawData = sc.textFile("../data/hour_noheader.csv")
    val numData = rawData.count()

    val records = rawData.map(line => line.split(","))
    val first = records.first()

    println(numData.toInt)
    records.cache()
    print("Mapping of first categorical feature column: " +  get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    println("Feature vector length for categorical features:"+ catLen)
    println("Feature vector length for numerical features:" + numLen)
    println("Total feature vector length: " + totalLen)


    val data_dt = {
      records.map(r => LabeledPoint(Math.log(Util.extractLabel(r)), Util.extract_features_dt(r)))
    }
    val first_point = data_dt.first()
    println("Decision Tree feature vector:" + first_point.features.toString)
    println("Decision Tree feature vector length: " + first_point.features.size)

    val categoricalFeaturesInfo = scala.Predef.Map[Int, Int]()
    val impurity = "variance"
    val maxDepth = 5
    val maxBins = 32

    val decisionTreeModel = DecisionTree.trainRegressor(data_dt, categoricalFeaturesInfo,
      impurity, maxDepth, maxBins )

    val preds = decisionTreeModel.predict(data_dt.map( p=> p.features))
    val preds_2 = preds.map(p=> Math.exp(p))
    val actual = data_dt.map( p=> Math.exp(p.label))
    val true_vs_predicted_dt = actual.zip(preds)

    if(save){
      val true_vs_predicted_csv = data_dt.map(p => p.label + " ,"  + decisionTreeModel.predict(p.features))
      val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
      val date = format.format(new java.util.Date())
      true_vs_predicted_csv.saveAsTextFile("./output/decision_tree_" + date + ".csv")
    }

    print("Decision Tree depth: " + decisionTreeModel.depth)
    print("Decision Tree number of nodes: " + decisionTreeModel.numNodes)

    Util.calculatePrintMetrics(true_vs_predicted_dt, "Decision Tree With Log")
    Util.sc.stop()
  }

} 
Example 140
package org.sparksamples

import org.apache.spark.mllib.regression.{LabeledPoint, RidgeRegressionWithSGD}
import org.apache.spark.rdd.RDD

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object RidgeRegressionApp{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {
    //val sc = new SparkContext("local[2]", "First Spark App")
    val sc = Util.sc

    // we take the raw data in CSV format and convert it into a set of records
    // of the form (user, product, price)
    val rawData = sc.textFile("../data/hour_noheader.csv")
    val numData = rawData.count()
    val records = rawData.map(line => line.split(","))
    records.cache()
    //print("Mapping of first categorical feature column: " +  get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val first_point = data.first()
    println("Linear Model feature vector:" + first_point.features.toString)
    println("Linear Model feature vector length: " + first_point.features.size)

    val iterations = 10
    val step = 0.1
    val intercept =false
    val rr = new RidgeRegressionWithSGD()
    rr.optimizer.setNumIterations(iterations)
    rr.optimizer.setStepSize(0.1)
    val rrModel = rr.run(data)
    val true_vs_predicted = data.map(p => (p.label, rrModel.predict(p.features)))
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }
    val mse = true_vs_predicted.map{ case(t, p) => Util.squaredError(t, p)}.mean()
    val mae = true_vs_predicted.map{ case(t, p) => Util.absError(t, p)}.mean()
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())

    println("Ridge Regression - Mean Squared Error: "  + mse)
    println("Ridge Regression  - Mean Absolute Error: " + mae)
    println("Ridge Regression  - Root Mean Squared Log Error:" + rmsle)
  }
} 
Example 141
package org.sparksamples

import org.apache.spark.rdd.RDD

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object GenerateDataFeaturesFile{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {
    val sc = Util.sc
    // we take the raw data in CSV format and convert it into a set of records
    // of the form (user, product, price)
    val rawData = sc.textFile("../data/hour_noheader.csv")
    val numData = rawData.count()

    val records = rawData.map(line => line.split(","))
    val first = records.first()

    println(numData.toInt)
    records.cache()
    print("Mapping of first categorical feature column: " +  get_mapping(records, 2))
    print("Mapping of second categorical feature column: " +  get_mapping(records, 3))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => Util.extractLabel(r) + "," +  Util.extractSumFeature(r, catLen, mappings))
    }

    val data_collection = data.collect()
    val d_iterator = data_collection.iterator
    while(d_iterator.hasNext) {
      val x = d_iterator.next
      println(x)
    }
    val first_point = data.first()
    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    data.saveAsTextFile("./output/x_features" + date + ".csv")

    sc.stop()
  }

} 
Example 142
package org.sparksamples.gradientboosted

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.GradientBoostedTrees
import org.apache.spark.mllib.tree.configuration.BoostingStrategy
import org.apache.spark.rdd.RDD
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object GradientBoostedTreesUtil {

  def getTrainTestData(): (RDD[LabeledPoint], RDD[LabeledPoint]) = {
    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }


    val splits = data.randomSplit(Array(0.8, 0.2), seed = 11L)
    val training = splits(0).cache()
    val test = splits(1)
    return (training, test)
  }

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }



  def evaluate(train: RDD[LabeledPoint],test: RDD[LabeledPoint], iterations:Int, maxDepth:Int,
               maxBins: Int): Double ={

    var boostingStrategy = BoostingStrategy.defaultParams("Regression")
    boostingStrategy.setNumIterations(iterations)
    boostingStrategy.treeStrategy.setMaxDepth(maxDepth)
    boostingStrategy.treeStrategy.setMaxBins(maxBins)

    val model = GradientBoostedTrees.train(train, boostingStrategy)
//
//    @classmethod
//    @since("1.3.0")
//    def trainRegressor(cls, data, categoricalFeaturesInfo,
//                       loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
//                       maxBins=32):

    val true_vs_predicted = test.map(p => (p.label, model.predict(p.features)))
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())
    return rmsle
  }

} 
Example 143
package org.sparksamples.gradientboosted

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.GradientBoostedTrees
import org.apache.spark.mllib.tree.configuration.BoostingStrategy
import org.apache.spark.rdd.RDD
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object GradientBoostedTreesApp{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {
    //val conf = new SparkConf().setMaster("local").setAppName("GradientBoostedTreesRegressionApp")
    val sc = Util.sc

    // we take the raw data in CSV format and convert it into a set of records
    // of the form (user, product, price)
    val rawData = sc.textFile("../data/hour_noheader.csv")
    val numData = rawData.count()
    val records = rawData.map(line => line.split(","))
    records.cache()
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val first_point = data.first()
    println("Gradient Boosted Trees Model feature vector:" + first_point.features.toString)
    println("Gradient Boosted Trees Model feature vector length: " + first_point.features.size)


    var boostingStrategy = BoostingStrategy.defaultParams("Regression")
    boostingStrategy.setNumIterations(3)// Note: Use more iterations in practice.
    boostingStrategy.treeStrategy.setMaxDepth(5)


    val model = GradientBoostedTrees.train(data, boostingStrategy)
    val true_vs_predicted = data.map(p => (p.label, model.predict(p.features)))
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }
    val save = true
    if(save){
      val true_vs_predicted_csv = data.map(p => p.label + " ,"  + model.predict(p.features))
      val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
      val date = format.format(new java.util.Date())
      true_vs_predicted_csv.saveAsTextFile("./output/gradient_boosted_trees_" + date + ".csv")
    }
    val mse = true_vs_predicted.map{ case(t, p) => Util.squaredError(t, p)}.mean()
    val mae = true_vs_predicted.map{ case(t, p) => Util.absError(t, p)}.mean()
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())

    println("Gradient Boosted Trees - Mean Squared Error: "  + mse)
    println("Gradient Boosted Trees - Mean Absolute Error: " + mae)
    println("Gradient Boosted Trees - Root Mean Squared Log Error:" + rmsle)
  }
} 
Example 144
package org.sparksamples

import org.apache.spark.rdd.RDD

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object CalculateStdDeviation{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {

    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => Util.extractFeatures(r, catLen, mappings))
    }
    //data.saveAsTextFile("./output/temp.txt")
    val count_columns = data.first().size

    var a = 0;
    var x = new Array[Double](count_columns)
    // for loop execution with a range
    for( a <- 0 to (count_columns -1) ){
      val stddev = data.map(r => r(a)).stdev()
      //println(a +  ": " +  );
      x.update(a,stddev)

    }
    for( a <- 0 to (count_columns -1) ){
      println(a  + " : " + x(a))
    }

    //val data_1_std_dev = data.map(r => r(1)).stdev()
    //println(data_1_std_dev)


  }

} 
Example 145
package org.sparksamples.linearregression

import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object LinearRegressionWithIntercept{

  def main(args: Array[String]) {
    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m =  Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val data1 = {
      records.map(r => Util.extractFeatures(r, catLen, mappings))
    }
    val first_point = data.first()
    println("Linear Model feature vector:" + first_point.features.toString)
    println("Linear Model feature vector length: " + first_point.features.size)

    val iterations = 10
    val step = 0.025
    val intercept =true

    val linReg = new LinearRegressionWithSGD().setIntercept(intercept)
    linReg.optimizer.setNumIterations(iterations).setStepSize(step)
    val linear_model = linReg.run(data)
    print(data.first());
    val x = linear_model.predict(data.first().features)
    val true_vs_predicted = data.map(p => (p.label, linear_model.predict(p.features)))
    val true_vs_predicted_csv = data.map(p => p.label + " ,"  + linear_model.predict(p.features))
    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    val save = true
    if (save){
      true_vs_predicted_csv.saveAsTextFile("./output/linear_model_" + date + ".csv")
    }
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }
    val mse = true_vs_predicted.map{ case(t, p) => Util.squaredError(t, p)}.mean()
    val mae = true_vs_predicted.map{ case(t, p) => Util.absError(t, p)}.mean()
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())

    println("Linear Model - Mean Squared Error: "  + mse)
    println("Linear Model - Mean Absolute Error: " + mae)
    println("Linear Model - Root Mean Squared Log Error:" + rmsle)

  }

} 
Example 146
package org.sparksamples.linearregression

import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object LinearRegression{
  def main(args: Array[String]) {
    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m =  Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val first_point = data.first()
    println("Linear Model feature vector:" + first_point.features.toString)
    println("Linear Model feature vector length: " + first_point.features.size)

    val iterations = 10
    val step = 0.025
    val intercept =true

    //LinearRegressionWithSGD.tr
    val linear_model = LinearRegressionWithSGD.train(data, iterations, step)
    val x = linear_model.predict(data.first().features)
    val true_vs_predicted = data.map(p => (p.label, linear_model.predict(p.features)))
    val true_vs_predicted_csv = data.map(p => p.label + " ,"  + linear_model.predict(p.features))
    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    val save = true
    if (save){
      true_vs_predicted_csv.saveAsTextFile("./output/linear_model_" + date + ".csv")
    }
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }

    Util.calculatePrintMetrics(true_vs_predicted, "LinearRegressioWithSGD")

  }

} 
Example 147
package org.sparksamples.linearregression

import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object LinearRegressionWithLog{



  def main(args: Array[String]) {

    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m =  Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    print("Feature vector length for categorical features:"+ catLen)
    print("Feature vector length for numerical features:" + numLen)
    print("Total feature vector length: " + totalLen)

    val data = {
      records.map(r => LabeledPoint(Math.log(Util.extractLabel(r)), Util.extractFeatures(r, catLen, mappings)))
    }
    val first_point = data.first()
    println("Linear Model feature vector:" + first_point.features.toString)
    println("Linear Model feature vector length: " + first_point.features.size)

    val iterations = 10
    //val step = 0.2
    val step = 0.025
    val intercept =true

    //LinearRegressionWithSGD.tr
    val linear_model = LinearRegressionWithSGD.train(data, iterations, step)
    val x = linear_model.predict(data.first().features)
    val true_vs_predicted = data.map(p => (Math.exp(p.label), Math.exp(linear_model.predict(p.features))))
    val true_vs_predicted_csv = data.map(p => p.label + " ,"  + linear_model.predict(p.features))
    val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
    val date = format.format(new java.util.Date())
    val save = false
    if (save){
      true_vs_predicted_csv.saveAsTextFile("./output/linear_model_" + date + ".csv")
    }
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }

    Util.calculatePrintMetrics(true_vs_predicted, "LinearRegressioWithSGD Log")

  }

} 
Example 148
package org.sparksamples

import org.apache.spark.mllib.regression.{IsotonicRegression, LabeledPoint}
import org.apache.spark.rdd.RDD

import scala.collection.Map
import scala.collection.mutable.ListBuffer

object IsotonicRegressionApp{

  def get_mapping(rdd :RDD[Array[String]], idx: Int) : Map[String, Long] = {
    return rdd.map( fields=> fields(idx)).distinct().zipWithIndex().collectAsMap()
  }

  def main(args: Array[String]) {
    val sc = Util.sc

    // we take the raw data in CSV format and convert it into a set of records
    // of the form (user, product, price)
    val rawData = sc.textFile("../data/hour_noheader_1000.csv")
    val numData = rawData.count()
    val records = rawData.map(line => line.split(","))
    records.cache()

    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen


    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val parsedData = records.map { r =>
      (Util.extractLabel(r), Util.extractSumFeature(r, catLen, mappings), 1.0)
    }

    val iterations = 10
    val step = 0.1
    val intercept =false

    val x = new IsotonicRegression().setIsotonic(false)
    val model = x.run(parsedData)

    val parsedData1: RDD[Double] = parsedData.map(r => r._2)
    //val model = GradientBoostedTrees.train(data, boostingStrategy)
    val true_vs_predicted = parsedData.map(p => (p._1, model.predict(p._2)))

    val save = true
    if(save){
      val true_vs_predicted_csv = parsedData.map(p => ( p._1+ "," + model.predict(p._2)))
      val format = new java.text.SimpleDateFormat("dd-MM-yyyy-hh-mm-ss")
      val date = format.format(new java.util.Date())
      true_vs_predicted_csv.saveAsTextFile("./output/isotonic_regression_" + date + ".csv")
    }
    val true_vs_predicted_take5 = true_vs_predicted.take(5)
    for(i <- 0 until 5) {
      println("True vs Predicted: " + "i :" + true_vs_predicted_take5(i))
    }

    val mse = true_vs_predicted.map{ case(t, p) => Util.squaredError(t, p)}.mean()
    val mae = true_vs_predicted.map{ case(t, p) => Util.absError(t, p)}.mean()
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())

    Util.calculatePrintMetrics(true_vs_predicted, "Isotonic Regression")

  }
} 
Example 149
Source File: Build.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
import play.dev.filewatch.FileWatchService
import play.sbt.run.toLoggerProxy
import sbt._

import javax.net.ssl.SSLContext
import javax.net.ssl.HttpsURLConnection
import javax.net.ssl.TrustManager
import javax.net.ssl.X509TrustManager
import java.security.cert.X509Certificate
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
import scala.util.Properties

// This is an almost verbatim copy from Play's
// https://github.com/playframework/playframework/blob/master/framework/src/sbt-plugin/src/sbt-test/play-sbt-plugin/generated-keystore/project/Build.scala
// I think some parts could be trimmed but keeping the (almost) verbatim copy will ease future consolidation.
// Changes compared to Play's version:
//  - had to replace `path` with `url` in `verifyResourceContains`
object DevModeBuild {

  def jdk7WatchService = Def.setting {
    FileWatchService.jdk7(Keys.sLog.value)
  }

  def jnotifyWatchService = Def.setting {
    FileWatchService.jnotify(Keys.target.value)
  }

  // Using 30 max attempts so that we can give more chances to
  // the file watcher service. This is relevant when using the
  // default JDK watch service which does uses polling.
  val MaxAttempts = 30
  val WaitTime    = 500L

  val ConnectTimeout = 10000
  val ReadTimeout    = 10000

  private val trustAllManager = {
    val manager = new X509TrustManager() {
      def getAcceptedIssuers: Array[X509Certificate]                                = null
      def checkClientTrusted(certs: Array[X509Certificate], authType: String): Unit = {}
      def checkServerTrusted(certs: Array[X509Certificate], authType: String): Unit = {}
    }
    Array[TrustManager](manager)
  }
  @tailrec
  def verifyResourceContains(
      url: String,
      status: Int,
      assertions: Seq[String],
      attempts: Int,
      headers: (String, String)*
  ): Unit = {
    println(s"Attempt $attempts at $url")
    val messages = ListBuffer.empty[String]
    try {
      val sc = SSLContext.getInstance("SSL")
      sc.init(null, trustAllManager, null)
      HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory)

      val jnUrl = new java.net.URL(url)
      val conn  = jnUrl.openConnection().asInstanceOf[java.net.HttpURLConnection]
      conn.setConnectTimeout(ConnectTimeout)
      conn.setReadTimeout(ReadTimeout)

      headers.foreach(h => conn.setRequestProperty(h._1, h._2))

      if (status == conn.getResponseCode) messages += s"Resource at $url returned $status as expected"
      else throw new RuntimeException(s"Resource at $url returned ${conn.getResponseCode} instead of $status")

      val is = if (conn.getResponseCode >= 400) conn.getErrorStream else conn.getInputStream

      // The input stream may be null if there's no body
      val contents = if (is != null) {
        val c = IO.readStream(is)
        is.close()
        c
      } else ""
      conn.disconnect()

      assertions.foreach { assertion =>
        if (contents.contains(assertion)) messages += s"Resource at $url contained $assertion"
        else throw new RuntimeException(s"Resource at $url didn't contain '$assertion':\n$contents")
      }

      messages.foreach(println)
    } catch {
      case e: Exception =>
        println(s"Got exception: $e")
        if (attempts < MaxAttempts) {
          Thread.sleep(WaitTime)
          verifyResourceContains(url, status, assertions, attempts + 1, headers: _*)
        } else {
          messages.foreach(println)
          println(s"After $attempts attempts:")
          throw e
        }
    }
  }
} 
Example 150
Source File: ImportGroup.scala    From sort-imports   with MIT License 5 votes vote down vote up
package fix

import scala.collection.mutable.ListBuffer
import scala.meta.contrib.{ AComments, AssociatedComments }
import scala.meta.inputs.Position
import scala.meta.tokens.Token
import scala.meta.{ Import, Tokens, Traverser, Tree }

object ImportGroupTraverser {
  def retrieveImportGroups(tree: Tree): List[ImportGroup] = {
    val importGroupsBuffer = ListBuffer[ListBuffer[Import]](ListBuffer.empty)
    val importTraverser    = new ImportGroupTraverser(importGroupsBuffer)
    importTraverser(tree)
    importGroupsBuffer.map(importGroupBuffer => ImportGroup(importGroupBuffer.toList)).toList
  }
}

private class ImportGroupTraverser(listBuffer: ListBuffer[ListBuffer[Import]]) extends Traverser {
  override def apply(tree: Tree): Unit =
    tree match {
      case x: Import => listBuffer.last.append(x)
      case node =>
        listBuffer.append(ListBuffer.empty)
        super.apply(node)
    }
}

case class ImportGroup(value: List[Import]) extends Iterable[Import] {

  def sortWith(ordering: Ordering[Import]): ImportGroup = ImportGroup(value.sortWith(ordering.lt))

  def groupByBlock(blocks: List[Block]): Map[Block, ImportGroup] =
    value.groupBy { imp =>
      blocks
        .find(_.matches(imp.children.head.syntax))
        .getOrElse(Block.Default)
    }.mapValues(ImportGroup(_)).toMap

  def containPosition(pos: Position): Boolean =
    pos.start > value.head.pos.start && pos.end < value.last.pos.end

  def trailedBy(pos: Position): Boolean =
    pos.start == value.last.pos.end

  def trailingComment(tokens: Tokens, comments: AssociatedComments): Map[Import, Token.Comment] = {
    val trailingMap = AComments.trailingMap(comments)

    value.flatMap { currentImport =>
      val sc                                  = ImportGroup.semicolons(tokens, currentImport)
      val cs: IndexedSeq[List[Token.Comment]] = sc.flatMap(s => trailingMap.get(s))

      (currentImport -> comments.trailing(currentImport).headOption) +: cs.map(c => currentImport -> c.headOption)
    }.collect {
      case (imp, Some(comment)) => (imp, comment)
    }.toMap
  }

  def trailingSemicolon(tokens: Tokens): Set[Import] =
    value.filter(imp => ImportGroup.semicolons(tokens, imp).nonEmpty).toSet

  override def isEmpty: Boolean = value.isEmpty

  override def foreach[U](f: Import => U): Unit = value.foreach(f)

  override def iterator: Iterator[scala.meta.Import] = value.iterator
}

object ImportGroup {
  def semicolons(tokens: Tokens, imp: Import) =
    tokens.collect { case t: Token.Semicolon if imp.pos.end == t.start => t }
} 
Example 151
Source File: NumericParser.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import java.util.StringTokenizer

import scala.collection.mutable.{ArrayBuilder, ListBuffer}

import org.apache.spark.SparkException


  def parse(s: String): Any = {
    val tokenizer = new StringTokenizer(s, "()[],", true)
    if (tokenizer.hasMoreTokens()) {
      val token = tokenizer.nextToken()
      if (token == "(") {
        parseTuple(tokenizer)
      } else if (token == "[") {
        parseArray(tokenizer)
      } else {
        // expecting a number
        parseDouble(token)
      }
    } else {
      throw new SparkException(s"Cannot find any token from the input string.")
    }
  }

  private def parseArray(tokenizer: StringTokenizer): Array[Double] = {
    val values = ArrayBuilder.make[Double]
    var parsing = true
    var allowComma = false
    var token: String = null
    while (parsing && tokenizer.hasMoreTokens()) {
      token = tokenizer.nextToken()
      if (token == "]") {
        parsing = false
      } else if (token == ",") {
        if (allowComma) {
          allowComma = false
        } else {
          throw new SparkException("Found a ',' at a wrong position.")
        }
      } else {
        // expecting a number
        values += parseDouble(token)
        allowComma = true
      }
    }
    if (parsing) {
      throw new SparkException(s"An array must end with ']'.")
    }
    values.result()
  }

  private def parseTuple(tokenizer: StringTokenizer): Seq[_] = {
    val items = ListBuffer.empty[Any]
    var parsing = true
    var allowComma = false
    var token: String = null
    while (parsing && tokenizer.hasMoreTokens()) {
      token = tokenizer.nextToken()
      if (token == "(") {
        items.append(parseTuple(tokenizer))
        allowComma = true
      } else if (token == "[") {
        items.append(parseArray(tokenizer))
        allowComma = true
      } else if (token == ",") {
        if (allowComma) {
          allowComma = false
        } else {
          throw new SparkException("Found a ',' at a wrong position.")
        }
      } else if (token == ")") {
        parsing = false
      } else if (token.trim.isEmpty) {
          // ignore whitespaces between delim chars, e.g. ", ["
      } else {
        // expecting a number
        items.append(parseDouble(token))
        allowComma = true
      }
    }
    if (parsing) {
      throw new SparkException(s"A tuple must end with ')'.")
    }
    items
  }

  private def parseDouble(s: String): Double = {
    try {
      java.lang.Double.parseDouble(s)
    } catch {
      case e: NumberFormatException =>
        throw new SparkException(s"Cannot parse a double from: $s", e)
    }
  }
} 
Example 152
Source File: ClientArguments.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.net.{URI, URISyntaxException}

import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer

import org.apache.log4j.Level

import org.apache.spark.util.{IntParam, MemoryParam, Utils}


  private def printUsageAndExit(exitCode: Int) {
    // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars
    //       separately similar to in the YARN client.
    val usage =
     s"""
      |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options]
      |Usage: DriverClient kill <active-master> <driver-id>
      |
      |Options:
      |   -c CORES, --cores CORES        Number of cores to request (default: $DEFAULT_CORES)
      |   -m MEMORY, --memory MEMORY     Megabytes of memory to request (default: $DEFAULT_MEMORY)
      |   -s, --supervise                Whether to restart the driver on failure
      |                                  (default: $DEFAULT_SUPERVISE)
      |   -v, --verbose                  Print more debugging output
     """.stripMargin
    // scalastyle:off println
    System.err.println(usage)
    // scalastyle:on println
    System.exit(exitCode)
  }
}

private[deploy] object ClientArguments {
  val DEFAULT_CORES = 1
  val DEFAULT_MEMORY = Utils.DEFAULT_DRIVER_MEM_MB // MB
  val DEFAULT_SUPERVISE = false

  def isValidJarUrl(s: String): Boolean = {
    try {
      val uri = new URI(s)
      uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar")
    } catch {
      case _: URISyntaxException => false
    }
  }
} 
Example 153
Source File: TaskInfo.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import scala.collection.mutable.ListBuffer

import org.apache.spark.TaskState
import org.apache.spark.TaskState.TaskState
import org.apache.spark.annotation.DeveloperApi


  var finishTime: Long = 0

  var failed = false

  var killed = false

  private[spark] def markGettingResult(time: Long = System.currentTimeMillis) {
    gettingResultTime = time
  }

  private[spark] def markFinished(state: TaskState, time: Long = System.currentTimeMillis) {
    finishTime = time
    if (state == TaskState.FAILED) {
      failed = true
    } else if (state == TaskState.KILLED) {
      killed = true
    }
  }

  def gettingResult: Boolean = gettingResultTime != 0

  def finished: Boolean = finishTime != 0

  def successful: Boolean = finished && !failed && !killed

  def running: Boolean = !finished

  def status: String = {
    if (running) {
      if (gettingResult) {
        "GET RESULT"
      } else {
        "RUNNING"
      }
    } else if (failed) {
      "FAILED"
    } else if (killed) {
      "KILLED"
    } else if (successful) {
      "SUCCESS"
    } else {
      "UNKNOWN"
    }
  }

  def id: String = s"$index.$attemptNumber"

  def duration: Long = {
    if (!finished) {
      throw new UnsupportedOperationException("duration() called on unfinished task")
    } else {
      finishTime - launchTime
    }
  }

  private[spark] def timeRunning(currentTime: Long): Long = currentTime - launchTime
} 
Example 154
Source File: HDFSExecutorMetricsReplayListenerBus.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.io.InputStream

import scala.collection.immutable
import scala.collection.mutable.ListBuffer
import scala.io.Source
import scala.util.parsing.json._

import org.apache.spark.internal.Logging

private[spark] class HDFSExecutorMetricsReplayListenerBus extends SparkListenerBus with Logging {

  
  def replay(
              logDataList: ListBuffer[(InputStream, String)],
              sourceName: String,
              maybeTruncated: Boolean = false): Unit = {

    logDataList.foreach(logData => {
      try {
        for (line <- Source.fromInputStream(logData._1).getLines()) {
          val hashMapParsed = JSON.parseFull(line)
          val hashMap = {
            hashMapParsed match {
              case Some(m: Map[String, Any]) => m
              case _ => new immutable.HashMap[String, Any]
            }
          }
          val hdfsExecutorMetrics = new HDFSExecutorMetrics(
            hashMap("values").asInstanceOf[Map[String, Any]],
            hashMap("host").asInstanceOf[String],
            hashMap("timestamp").asInstanceOf[Double].toLong)
          postToAll(hdfsExecutorMetrics)
        }
      } catch {
        case ex: Exception =>
          ex.printStackTrace();
          logError(ex.toString)
          logWarning(s"Got JsonParseException from log file $logData")
      }
    })
  }
} 
Example 155
Source File: exercise02.scala    From scala-for-the-Impatient   with MIT License 5 votes vote down vote up
import scala.collection.mutable.{ListBuffer, HashMap}
def mapStrIndex(str:String)={
  var indexMap = new HashMap[Char,ListBuffer[Int]]()
  var i = 0
  str.toCharArray.foreach {
    c =>
      indexMap.get(c) match {
        case Some(result) => result += i
        case None => indexMap += (c -> ListBuffer {
          i
        })
      }
      i += 1
  }
  indexMap

}
println(mapStrIndex("Mississippi")) 
Example 156
Source File: Generator.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut

import org.apache.commons.lang3.StringUtils
import org.joda.time.DateTime
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter}
import report.donut.gherkin.model._
import report.donut.log.Log
import report.donut.performance.PerformanceSupport
import report.donut.template.TemplateEngine
import report.donut.transformers.cucumber.{CucumberTransformer, Feature => CucumberFeature}

import scala.collection.mutable.ListBuffer
import scala.util.Try

object Generator extends Log with PerformanceSupport {

  val formatter: DateTimeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd-HHmm")

  //this wrapper is currently used to help the java maven plugin
  def apply(resultSources: String,
            outputPath: String = "donut",
            filePrefix: String = "",
            dateTime: String,
            template: String = "default",
            countSkippedAsFailure: Boolean = false,
            countPendingAsFailure: Boolean = false,
            countUndefinedAsFailure: Boolean = false,
            countMissingAsFailure: Boolean = false,
            projectName: String,
            projectVersion: String,
            customAttributes: scala.collection.mutable.Map[String, String]): ReportConsole = {

    createReport(resultSources, outputPath, filePrefix, dateTime, template, countSkippedAsFailure, countPendingAsFailure,
      countUndefinedAsFailure, countMissingAsFailure, projectName, projectVersion, customAttributes.toMap) match {
      case Right(report) => ReportConsole(report)
      case Left(error) => throw DonutException(s"An error occurred while generating donut report. $error")
    }
  }

  private[donut] def createReport(resultSources: String,
                                  outputPath: String = "donut",
                                  filePrefix: String = "",
                                  datetime: String = formatter.print(DateTime.now),
                                  template: String = "default",
                                  countSkippedAsFailure: Boolean = false,
                                  countPendingAsFailure: Boolean = false,
                                  countUndefinedAsFailure: Boolean = false,
                                  countMissingAsFailure: Boolean = false,
                                  projectName: String,
                                  projectVersion: String,
                                  customAttributes: Map[String, String] = Map()): Either[String, Report] = {

    //Prepare objects
    val statusConf = StatusConfiguration(countSkippedAsFailure, countPendingAsFailure, countUndefinedAsFailure, countMissingAsFailure)
    val projectMetadata = ProjectMetadata(projectName, projectVersion, customAttributes)
    val reportStartedTimestamp = Try(formatter.parseDateTime(datetime)).getOrElse(DateTime.now)

    for {
      resultSourceList <- if (!StringUtils.isBlank(resultSources)) Right(resultSources.split(",").map(_.trim).toList).right else Left("Unable to extract the paths to the result sources. Please use this format:- cucumber:/my/path/cucumber-reports,cucumber:/my/other/path/adapted-reports").right
      features <- timed("step1", "Loaded result sources") {
        loadResultSources(resultSourceList, statusConf).right
      }
      report <- timed("step2", "Produced report") {
        Right(Report(features, reportStartedTimestamp, projectMetadata)).right
      }
      _ <- TemplateEngine(report, s"/templates/$template/index.html").renderToHTML(outputPath, filePrefix).right
    } yield report
  }

  
  def loadResultSources(resultSourceList: List[String], statusConf: StatusConfiguration): Either[String, List[Feature]] = {
    var features = new ListBuffer[CucumberFeature]
    for (resultSource <- resultSourceList) {
      val result = ResultLoader(resultSource).load
      if (result.isLeft) return Left(result.left.get)
      features ++= result.right.get
    }
    val donutFeatures = CucumberTransformer.transform(features.toList, statusConf).right.get
    Try(donutFeatures.toList).toEither(_.getMessage)
  }
}

case class DonutException(mgs: String) extends Exception 
Example 157
Source File: GroupByFeatureNameTest.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut.transformers.cucumber

import java.io.File

import org.json4s.DefaultFormats
import org.scalatest.{FlatSpec, Matchers}
import report.donut.DonutTestData
import report.donut.gherkin.model
import report.donut.gherkin.processors.JSONProcessor

import scala.collection.mutable.ListBuffer

class GroupByFeatureNameTest extends FlatSpec with Matchers {

  implicit val formats = DefaultFormats

  // BDD json files for same feature
  private val sample4RootDir = List("src", "test", "resources", "samples-4").mkString("", File.separator, File.separator)
  private val sample4Features = JSONProcessor.loadFrom(new File(sample4RootDir)).right.get.flatMap(f => f.extract[List[Feature]])
  private val sample4DonutFeatures = CucumberTransformer.transform(sample4Features, DonutTestData.statusConfiguration).right.get

  // Unit tests as BDD format json files
  private val sample5RootDir = List("src", "test", "resources", "samples-5").mkString("", File.separator, File.separator)
  private val sample5Features = JSONProcessor.loadFrom(new File(sample5RootDir)).right.get.flatMap(f => f.extract[List[Feature]])

  // BDD and Unit test json files in BDD format, but with different feature names
  private val sample6BDDRootDir = List("src", "test", "resources", "samples-6", "bdd").mkString("", File.separator, File.separator)
  private val sample6BDDFeatures = JSONProcessor.loadFrom(new File(sample6BDDRootDir)).right.get.flatMap(f => f.extract[List[Feature]])
  private val sample6BDDDonutFeatures = CucumberTransformer.transform(sample6BDDFeatures, DonutTestData.statusConfiguration).right.get

  private val sample6UnitTestsRootDir = List("src", "test", "resources", "samples-6", "unit").mkString("", File.separator, File.separator)
  private val sample6UnitTests = JSONProcessor.loadFrom(new File(sample6UnitTestsRootDir)).right.get.flatMap(f => f.extract[List[Feature]])

  behavior of "Cucumber transformer - Group by feature name"

  it should "group donut features by feature name while transforming the list of cucumber features" in {
    sample4DonutFeatures.size shouldBe 1
    sample4DonutFeatures.head.name shouldBe "Add numbers"

    val expectedScenarioNames = List("Add two numbers: 1 and 2", "Only 1 number is provided", "Add four numbers: 1,2,5,10")
    val scenarios = sample4DonutFeatures.head.scenarios
    scenarios.size shouldBe 3
    scenarios.map(s => s.name).sorted shouldBe expectedScenarioNames.sorted
  }

  it should "mapToDonutFeatures if a feature is split across multiple BDD json files" in {
    val donutFeatures = CucumberTransformer.mapToDonutFeatures(sample4Features, new ListBuffer[model.Feature], DonutTestData.statusConfiguration)
    val scenarios = donutFeatures.head.scenarios

    sample4Features.size shouldBe 3
    donutFeatures.size shouldBe 1
    scenarios.size shouldBe 3
    donutFeatures.head.index.toInt shouldBe 10000

    for (o <- sample4Features) {
      o.name == donutFeatures.head.name
    }
  }

  it should "mapToDonutFeatures if 1 feature is split across few BDD and unit test json files" in {
    val generatedFeatures = CucumberTransformer.mapToDonutFeatures(sample5Features, sample4DonutFeatures, DonutTestData.statusConfiguration)
    val scenarios = generatedFeatures.head.scenarios

    generatedFeatures.size shouldBe 1
    scenarios.size shouldBe 4
    scenarios(3).keyword shouldBe "Unit Test"
    scenarios(3).name should equal(sample5Features.head.elements.head.name)
  }

  it should "mapToDonutFeatures when there are few bdd json files and few unit test json files with a different feature name" in {
    val generatedFeatures = CucumberTransformer.mapToDonutFeatures(sample6UnitTests, sample6BDDDonutFeatures, DonutTestData.statusConfiguration)
    val nonBDDFeature = generatedFeatures(1)
    val nonBDDScenario = nonBDDFeature.scenarios.head
    val bddFeature = generatedFeatures.head

    generatedFeatures.size shouldBe 2
    bddFeature.name shouldBe "Add numbers"
    bddFeature.index shouldBe "10000"
    nonBDDFeature.name shouldBe "Without feature"
    nonBDDFeature.index shouldBe "10001"
    nonBDDScenario.name shouldBe "Add four numbers: 1,2,5,10"
    nonBDDScenario.keyword shouldBe "Unit Test"
  }
} 
Example 158
Source File: DonutTestData.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut

import java.io.File

import report.donut.gherkin.model.{Embedding, Feature, StatusConfiguration}
import report.donut.transformers.cucumber.{CucumberTransformer, Feature => CucumberFeature}

import scala.collection.mutable.ListBuffer
import scala.util.Try

object DonutTestData {

  val statusConfiguration = StatusConfiguration()

  val features_sample_2: Either[String, List[Feature]] = {
    val sourcePath = List("cucumber:src", "test", "resources", "samples-2").mkString("", File.separator, File.separator)
    val loader = ResultLoader(sourcePath)
    val donutFeatures = CucumberTransformer.transform(loader.load().right.get, DonutTestData.statusConfiguration).right.get
    Try(donutFeatures.toList).toEither(_.getMessage)

  }

  val features_sample_3: Either[String, List[Feature]] = {
    val sourcePath = List("cucumber:src", "test", "resources", "samples-3").mkString("", File.separator, File.separator)
    val loader = ResultLoader(sourcePath)
    val donutFeatures = CucumberTransformer.transform(loader.load().right.get, DonutTestData.statusConfiguration).right.get
    Try(donutFeatures.toList).toEither(_.getMessage)
  }

  val featuresWithOnlyUnits: Either[String, List[Feature]] = {
    val sourcePath = List("cucumber:src", "test", "resources", "cuke-and-unit", "unit").mkString("", File.separator, File.separator)
    val loader = ResultLoader(sourcePath)
    val donutFeatures = CucumberTransformer.transform(loader.load().right.get, DonutTestData.statusConfiguration).right.get
    Try(donutFeatures.toList).toEither(_.getMessage)
  }

  val featuresWithCukeAndOrphanedUnits: Either[String, List[Feature]] = {
    val sourcePaths = List(List("cucumber:src", "test", "resources", "samples-6", "bdd").mkString("", File.separator, File.separator),
      List("cucumber:src", "test", "resources", "samples-6", "unit").mkString("", File.separator, File.separator))
    val features = new ListBuffer[CucumberFeature]
    for (sourcePath <- sourcePaths) {
      val loader = ResultLoader(sourcePath)
      features ++= loader.load().right.get
    }
    val donutFeatures = CucumberTransformer.transform(features.toList, DonutTestData.statusConfiguration).right.get
    Try(donutFeatures.toList).toEither(_.getMessage)
  }

  val embedding = Embedding("image/png",
    "iVBORw0KGgoAAAANSUhEUgAABBoAAAKyCAIAAAD0ISqxAAAgAElEQVR4nOzde3zddZ3g//f3nNMkbQOEFiEolHIRAoptQTGoA+moGFydieOF1NGh7LizZS4/UmdnaP156c66WHZmJMzub83+ZkfKqEM6zmhmBteM4lJkHYLgmKJCRC6Rayi3tKT0pDk53/0jaZteaT80LZfn8+HjPJJvvuf7/ZzTEL+v871lGzduDAAAgANXONwDAAAAXq7kBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkKh0uAfw6vWx64443EMAgOmyefPzh3iNrzki/+0L4q0nVw/xeuFVTk4cTv94RXa4hwAA0+KdVx3qNT75XNZ9e/7Wkw/1euFVzsFOAMArxOBTPqeDQ01OAAAAieQEAACQSE4AAACJDs6p2GPj8Ysn4pdPZ5u2ZNU8P3JmnHh0fkZj1M44KIsHAIBD6snnYsXfFZ7Z/LI/IadhZv65Xx+ff8x0vZAXmxOPPhv/0F/4wc8Lz1fybdMmx1pbqL7jjOyD51aPb3iRK5le9296qOvnaye+/tO3/NHhHQwAAC8Ff/T14sbnZ2SFUkQekb18H4e3VP7TP4391WXj0/RGpedEZTz+5vbsmz+KyCbGuqvRauF798Qt92SXNGcfPLeavVTT7rmx5//1qXsO9ygAAHgJ2fh8FArFPPIs4mX9WCgUn9k8Nn1vVGJOjJTzq75Vuufx7bsiorZQPe34wjGz80Ihf/K5wr1DsXU8IqISha/1xfDzhU9c4LYyAPDyMF6Njc9XI+KoWYWiEy15tcrzPLLI83jZP06nlJwYrcTnbyz+fGjy26Prq0veEr9yel43Y/s+lPHRStx6b/a122N4c2Fuff7+BVoCAF4enh7J/+72sb77xyOi+dTCh5tr5sx+qR5jANMpyyLP8yzLXv6P0/gupeTEl28t/Hxo8s/KuSdVl19UnV2761+Z2lK866y8+ZT8+n/JPnhu9bijXuxA4cWY+M/pcI8C4GWgUo2v3z72dz+s5JFFxMNPj2fZ2L9tqSnZR8GrT55Xs8heAY/T+i4dcE7c81h852eT8bDghFjx3rxU3OsnFvV12e/9qv0SrwTlcjki6urqDvdAAJhez23J++4byyOL6nhE5IXCbb+ofPitM46eth0Ui+YXf/3cGWefWDhyZrZpS37XQ9WeO8fWPzRdp43C/nsJ7FV4Je6d+Pqdk39N6uuy5RdVSsWDPSImlbvbZy6JG7Z0t0/dhC8PdM88c8kN92xpb6qL8sDSmWeWb7inu71p1ycPdM88c8nUKQsWXLiwpb1jxdKFjQeYBOXB1c0nr1wfEfGlH29ZtlBRALySVcZjy9Ys8jwmLqKS51u2ZpXp2bYvZPHJ99ZevGDH1kjDrOyCpuIFTcUbf1y5tne0ar8y23z605/e/vXnP//5/ZzyIh32vQqvwL0TT41E/+Dkve/ev6B61KyDM4h8+Nl88P54fnPMmp2dfFp21AtcWTYb21B4/qfZ+HN58YjqrLPzGa/Zx8xj4zHweDw2nBWyOOU1+SnHxgF8upLn+WOPVB95KCqVQuPx2cmnReHQ7usd2vPk8pQvhsp7niciFlx4SVPj5NcDa9dev/6W669d9+Mt3QdUBEN9XSvXx4JLv9DR3Liw8YXnB+BlbfJKjHkW2cS2fJZHfiD/33kAJlqiPBZf+8HWm35aeXJTfuyR2bvOLn30bTXvW1SKiGu+PTotK4b9M5HUr4DHaQ3zA8uJux7O8m3b0xeecRBCpzrws/HuNdW7frxjSpaV3rio+JuXFU4/c/f5CyM/nPHo6uKm7++Yv5pVGy6onPCp6uxzd5k5z+Nbd2Vfv7OwacuOiaceW136tvwz35z8u/jNP9jr2zved2u1+7rqQw/tWODcuTM+8lvFd703XrJXvd3Ziq417U3b0qF7zbrOpYuXr+3sWbVmt70Z+1AeGoyI1atXtGoJgFePKf9Hl0eeT1wg5qBaNL840RLLv7Ll3qHJjYonNuVf+8HYnQ+Mf/FjM9+3qPS9n43d9ZCjpjl88nx/bu3w5sUzLj+7MHH8x9Zyfscdlav6qpEXrvr3M86szb/zra1fuj/91hG//dGa9x2X3X/X2H/4XjX95hPT6cA+a//l05N/ShpmV4878sWuu/LNtaP/b8fUloiIQp5Xf/Kvo1deMf6Pf7fL/KXH/6LmZ/9maktERKGQlzbdUnPPxaWh/77Twqvxp73Fv7p1p5aIiPs3FP7kW4XItv1vj/K8cl1X5b/8x6ktERHZ009XvnRN5UtfjGneZ3Sw7Lzfoq6lrS0iypNTy/09nS1ZlmVZtrB9zbqBbc8Z7Gxf2r2ud0VLlmXZ7/zBvz15ydqIWLOifenqdeWIKA92r166cPJ5K3oHhrc9r7e9fXXvuu6WLMuy9t6f/7yzvX11d0/36qUT867u7i+XB7smn9rS2bttjVHu6+5sb5mYnrUsXd03ODHE8rrOpUs7e9Z1r97xoyn7YgbXdS+dfNbCFWvWDW+bPtTfs8fpAKQpRJZNw/bIr587IyK+9oOt21tiu58/Xr3htq0R0fbmmoO+XjgAWcTk3rm9Pp56fs0Xzi/Oq89qSllNKauvLyxeXPNnzRFZdtzcrL6+cFzxBZaw78fjjirU12Un1mfJS5imvYvbHdjeiY3PT34xd/Ze5/nAf9vX6RQXnJEvf3c1Isa/+63xr/zlxOZ8tZiVzj43jm2MDUOV/jsLEYWsWlnTFUccWVx80cQTixv+uuaRP5nIn2peqjZcGDNOiLGHC5tuKeTjhbxa8/BnozSnckz7xPzX/0vhtvsnV3rcEfGWU/NZM/KBocJdD8fY2Au8qeM3fmP8nyZjJp9ZX3rbO+LIo/L7fl79SX9EjN/07Wh8Xek32ve9kJeCXY5pGhzoj8njp8rrVjQvvnp9LLj0mmUL+7qWX7Z4be91P+5eujCi3Lf2+rVrr594yqNZ7YKI9RERdQ11MXG2xvURCy79wpcWDl2+/OqL1159zW3PdjQ3RHl47dqVaydvL752OFb2r117/dq1ERdeceUV666+duWSRSsjIi688sorrr762uUXn9l4z5b2prq+zubzl6+PBZdec92yod6uq69fef71Qw/mnfMjhvquv37t9ddHXHLFlY1Dvddev/L86+PxfEVjxGDvipMvvjpiwRe+9KWh7suvvmzx1UM35ytaBtetPnnxyogFV15zTfStufqyxVf33vBs98vgHwvgJSvLsoO+ayIizj6xEBE3/bSyx5/e9JPKZRfUvOlE15Ni0u7nQuzPlBcrr0aW7fvxfWdlERGV6j/fWrl/dvG3zivWRyxYVIrbxu66t/Bsbb5+434tZ2+P9zxQmTM3e/j+yn6OZ8+P0+nAciLLJvd1Jv9R2fR8HhH58LPjf9U1MaUw7+SaK1dlx79uckCPPjy2+nP5ow9FxNhf/tfCm5uzI47MxjbMeOQzEzPks84YPe2ree3Jk0Mq31d338ezLb+IiNLDnxpvuCgvzRl8Kr71r3kUsohY3FT93cX5tlPGx//lvvjTb2d73S8RkW8aHv/qdZNjO3th6T98NjtickfM+HdurHR1RkT1hr/OL3hndsy+ztl4Kejr620cmjgRpTzY133ZyusjoqNlfgyuW3z1+gVXfLOvs60uIpYtbW0/+rLLOjra1jXXTUTIJbc9291cVy5H3dDbnj15Saxas6Ypon9N+/URl95wz8ThUkvbWptPvnj5+avb8tWTR0Jdct2z3UvryuWIwZ6IiEt//OyahQ1Rbm+auejyWHDlg32r59fFirb5R5+/vBwR5YHO5esjrnm2v6MhIpYua2tuOX/50HB5Rwxd9+Nnly5siFjV2th88bX9Q+VojIFVF1+9feGxrL1p4dGXr1w10LGma/HKiCvv2bK6qS4iOtpbly66bMmajtZD+bYDvBJsP7gpi4g8m4ajfI+cmUXEk5v2vN9jw6Y8Io6a9fI4uphXqm33ndjXY/3E1nQlNo9Ue/oqUV/XPi975uk8ywonvrZwZl08PHtrlkXbr9d+4qxCTUSUq/eVs9Ma4p4fjl7x4IzuS0o1T48P1RZOq88i4qFfjv3218amLr9xbvHMk6Lm6UJsLq5tL9U8Nf5EXeHUbTN/4m/GXnCE2TQf73RgOdGw7dzrJzcW8hhP+E98YjO+etO3861bIiKfVVf6zBeyucfsmOF1J5Y+c1XlDz6Rj5Wz8pbx73271HZJ6amvFsY3R0S1cMTo6/8urzl++/x53Wnl1/9t7c/eXhh/vlDZVHzya5Xj/+DbP508x+OEOfG7v5pPvVT2206LD70l/u7OvY6westN+Vg5IrKGo0p/vCqbXR8RUa1Wf3R7te//TK50fOv4t/+h9PFPHPgbcEhde9kHrt1pwoXX3bambX7dYHdvRERjDPT3T/ygrnFBxC0Dg+XmpihHLPhCR3NDRNTVbT9iqhxRVx5Yszbi0hVtk6de1M1vXXPNhYuWDw6XYyInrulom3xeOSJiwTXLFjZERNQ1NV8Y0dixdH5dRERdw7bzMOqaurdsWRMRw8ODQ0NDQwM9fcMRUy4+teBLbROLiLr5zQvj2nJdRJSHByMu/FLH5E+iYem6x1ujoXGo59qIWNAwPNjfX46IGK5riIiegb2c0g7ApGo+uiEisppjJw+EzrZvfeQREVmWj4/nw89GFtlRR2fFg3Bhx01b8oZZ2bFHZk/sqSiOOyqLiI3Pu7QTkw7TlZ3yiGzfj/98f7VlbjHqCr/x/rrfeH+MDFfvuGP0qtuqEYWj67KaUj67FHlz7e+eVYiIKOdb6wqn1UVEzK7P8jzqI2rmFk+LfKQS9aWYd1LpU6eMXfXAjuXPnh01kc2ui4ioj6g5plg/ZeaVJ2+96oEXGOF033rrwHJi/rbN/o2j8eizccLRe5jnb35nD3str/tB8bs/yyKiYWZExPj6H02u/sL3TG2JCYVjGwstvzr+3f8VEfn6f422S4qbbpn4UfWYj0xtiQl57Yn53A/Ghq9ERGHjLXH8H/zkkcmAePeZ1d1vu3Px2fm+cuJnd00O48KLstn1+abh6vd6q/98Y3XDjk3SwhsWFs46e6+LOIhu6Rss7zibOiLK5QM4F+AL3/5xe1NDTNw1oq5h/vzJzfRyDEfE+pUfmDj8aMfCt33RNH8vF9eaHxGt86eMp65xfkR5+4TGhp0OsGpqbNgx416uUzU00NOxaMnanaYt3PFlw9QlbhtguTwc0TTlJhh1DY3zI8oTK1i/8vwzd3phw+W9X/0KgIjY+nT5Rx+K8ednvrmnkM2bVRORZVGtRkRkxdm1USxE/sRjG//jimx2/RGf/s/FObv+f3eCux6qXtBUfNfZpa/9YGz3n77rjaWIuMvdJzissoh88spIe3380U3lz0XtFeeU5pQiIuobCosX1y04Zaz9a+NbJxeT/9HZhYiIkfFlf1F+IGq6PzVjzrblT8xw4/98/i82lP76U7WNEVHadS07y2/8n89fu6H0lU/VNkZkpYgXGuF07+M7sJxYcGKeZ9UsL0TE/76n8Ftv28ORWDNr9jDmnz8+OXHe3IiIfOjxiW+z1+/5EkPZ68+MiZx44vGIyEYHJ6ZXZ5+zx/krs84txlciolh+MCKeHK5OfLhy0tw91Nic2XHErHju+d1/EhERT27b6B0drVy7evzWW6I6+Wcun1VXamktvuf92Ykn7eXJB92ul4Ed6l+3/0+eP79p/vw9XhS2HBFX3vzgquaG7Vvaw8Plxm17BfZ68dnBiFv6h6J9/vYFlQcjGqdeuHZfo9/dcF/roiXr48JrbljVsnD+/PmNQz1Lz1zyAk8rl4fWR+xUWUMDfQPl+UdHRCy48ua+Vc3l7aecDw/XNTYeffkLjQTg1SyrybLi+Mg948/95Ig58952eumR28erhWJEFLI4//XFI2pj7IH7qg//svSGN2UzDs7p0T13jl3QVPzo22rufGD854/vtEXR9NrCkrfVRMQ37thDacAhs22LfB+XTSp8pHXGabPz7jUjPfWlf3tW6W2nl+bVxZyTiu+OyrYDBvMZ25b3QOR5VJ6tzJhTiog8n9j7V8nv3JDnUd1ciShFFvmeLsuUZ9tmvmNDnkV1pBJR2p8RTu5hnL536cDOcGqYFc0nT47mf/XnT2zar2fdvyEeemby67NfN/GKtn3YsLdjMbdNz6rjERHbb2Ozt3Metk3Pigdtb8547z+O33LTREsU5p0yY1lH7V/+bekTv3+oWqKuqeXCiLWru/t3TBvu77psbcSC+Q37deeIvW2Vz1/YGhF9/UN1dQ0T+rvaTj75+J7BfW/H1zW2Loi4uqdv2x6S8kDnZbfEbud877/y0MD6iEuu6+pob1nYNL+hrtzXs3YfI982joWXRqxd3bt9d8e61WcuXrxo8LimSyLW9/YPb3th5f6u408+eVnPYOoAAV4dZhxZmHNhMUYrj91QKmz+4Hk1H35r8aS5cdLc+Mhbi79xXk1x9PnRm7+bjY3NOHvR5GHAL9r6h8Zv/HGlbkZ88WMzP/aOGY1HZYUsjm/IPv6OGV/82MzaUtx8d+Wnj7w8LqXIK9V+3NUhW3zOjJYzZnyirTYeGPvyjeW1D0z80k7dIs2fnbyBSnZK5NnppeMmP8+fut9g96+nPk6V7zit6QDujDGNDviu2B9+c/WH92V5oTBaLfxZb/VP2qp73B2xXWU8/v9bJrf1TzomTjk2IqJwzPHVp56OiPyB++LCd+3+rPzB+ya/Ou74iMhrT8zGHouIwvM/ibkf3n3+4ubJI5TGZ5wUEcccWRjaFBHxwFPZgnm7BsbTI3vfNRERr2mMBx+Y+LJaKpXOv6B48a8VznjDob/XxML2VQsuX7z2skVr11x6zdKWuqH+y1deGxELvtDZ8uJuAVHX1PalBZddvvz8lsEvrWprGurrWrLyllhwTWtT3b435FuWdsbKxcvPP7p8w7ebG4fXdCy5PuLSG1a90PP2PpLG+Qsi1l62qm3+iqaGoe6Oi6++JSLW9g+uWbiPe2PUNXV86cLrL19+fHv528uah/u6llwbcckNzY0L6665cO3y5ccvHPxmZ1vdUN/FS1ZGLOhobVq794UBEJGVXvuR0cduqD5249bRG45+00d/+8JZH3prRMRRdVEcfX7z399Que3WeO0JNRf8anbw7uh6be9oRLxvUemyC2ouu2CnnR43311ZfFbprNcVl39lyx5PruDV5rBc2SnPq1mW7fMxv/mX1VNPKtQcM+M7f1waqUR9XRYRMVL9Tp7/xrblfOnn47/xulLUF7v+uD5K2ZTlR0REKY/J8zT2tN6dZo0oRZ5Xt3+CnufViH2PMJuY/+C+M1Md8F+EU4+NX992v7j7NhRW/UPxqZG9zvxcOb/qW9m9T0y+a5e8ZfLs7exNiyamjH/v2/mmjbs8Kx9+tnLzTZPjO3tRRIwfecHEt9lTN2SVp3eZPxvbUHjm6xNfV498R0QsOmnyHf/OzwpbdzuV48b1+wqDwht3HLhfu/q/zlj+qULTG7e3RD5a3n7ix7RraOm75+YrL1kQt1y//LLLLl95bcSCK6+7uW9Fy/ZZ6nY6bXlXe/9Jw7K+B6+74sJbrr188eLFS1auXXDpNfes62jY0zLrpi6msWXLgzdfuiBWLrl48eIl169fcOUNP+6aclO8qWvcfWw7vt1+2kNDS/e3vxCxdsniRYsWXXx14zXfvOELEbGuf9v+hCnhNHUkC5f13nzdFbF25cWLFy9ZufbCK657cE17XURzR+9t110R66/9wOLFFy9ZGQsu/eY965pf4DbrAESh/g0zXv/Zyn1Nm//0K8/9+VXjt9/aMPxww/DD433ff+6LV2352nVRWzvzo5cVTzrlIK60msc13x5d/tUttwyMP7s5r+bx7Ob8lnsqV/z1ls/3jD6xMT/uqOyLH5953JGu78ThkWURk1dG2uvj335ty7pHqxERpWyiJbaOVP702i07PojO86zv+VX/WhmpRJSyrSPVke3TJ+apTPk6dlvL5FAmD4yKSkydvj8jnO6PxLONG3fdmn9BlWr853/K+h+eTJG62uwDC6vvPLM6d8rOz5Fy/oP7in97R/7M5m130T49v+Ki6sTLyZ/aMHb50nx8a0QUmt5YunJVdtTkFl/+7DOVqz9XvfeeiMhqZ8z471/Njp6bjT5a+9PzCtXRiKjWv3X0tDX5jMmLtGZjT9T+4rcKm38UEdWYObrgjrym8ZFn4vdvyCbO8XjrKfn/867xWTUTt/GI7/4s6/pebL+39zd/f7z/6YE//OF/mfj2exd/OUaeKy9bkj1fjohC0xtmfOrzUX/E5NhGnqt88T9X++8stlxUvOzy7IgjDvStm+pj1x3xj1fs1z9vuVwul8sRdXUNdcmHFe1jyXV1dXUHuOByebhcPnjjmVzcAQ9jr08sl4fL5alvWJZN+2UNAF7u8mpl9H//85a//nL+2MPVmprC7CMiorp5pLB1NF534qyPLq19Z2tWmvGCy5nwzqs2v8jxHHdk9sWPz2w8Knt8OP/kV7ds2L99FN/8fWdvvzId+is7feC/FYv5nu+Lsield78pi4hnnxi784ldf/bmi+uvOqcwMlL9l5tH/mykrntJ7ZzI131901X3vsgx7q/xrDR9/2kc8MFOEVEqxIr35td8N7/9gSwiyqP5DbdnN9xePP7IOPbIyCMf3pI99FQ1ptz15ryT8997Z3X7tnN2zLHFj11Wuf5/RER14Kejl3+89PYLs+OOz4cerfR9f2JTPiKKH/ud7Oi5EZHXvq7yuk/VPPy5iCiM3F7707fmDf+mWju/MPpg9sy3CtXnJuavzPtMXtMYESfMiQ+dE3//o4iI2x/IfmdN6dyT8rqafODxwkPPvNAumfojZvzWssn7Swz8bOvlH8/e3lI45jXVJzfk/3JLvnkkIsbXfSd7w9nFd16c8O4lSNnOnuYlH+SySV7c3p548MsL4JUvK5Tq3vne0ilnbP3+98rr79zw0M8j4timM2vPPqeu5V2F+adOx90n9uGJTfknv7Llzz828/iG7Isfm7n/RQEHzf6c5jz5WPnuXXv96Z2PVuOcYn198aL3HzV5h+ZK9Y57D2j5L+5xOqXkRETUzogrL67+U3/2tb7C1m2p8/imeHxTTA55+znTefVDb4klb80LO7+S4q99KDYOV3rWRkRW3jL+vd6J6dvnKn6gvfjetu3zVxp/t1B5qvT4f42IQmVTPHXDLoeAjR1/ReW4f7f9248258PP59+7J4uIzVvj+7/Y0TZHzoxNW/b16orv/jf5M0+P/+1XIiLfPJJ/58ZdTgQrLVl6yFoCAA6dLCudclrplNM2P/Xe//Tdz0VE50WfnT33hMM1nCc25X/4VUXB4TNxaMPkaQov4vGuzatmz/r4m0rH1WdRyZ8ZqX735pHvHpQl7+fjdBZFYk5ERJbFry3Kf+WM8W+tz265N556btfP/GeVsvNfX21blJ8wZ8/PL/7Wv8vOOnv8b75cHXxg6k8KJ59S/M1PFM45b5cnbD3hc+P1zTUPfz4r3zP1B/mss7ae8Nnxo3Y6pbuQxe+9s/rGE7LuHxaemHI817knVdvfGn/0t/vcQ5FlpfZLC2ecNf7Vv6w+uNvYPv7vCwvP3dtTeWlypBPAAanOnv3UrEJEVGfNesGZp9UTm/JPfnXLFz828/AOg1enbfdtyF7847/ctvm223aaEgdpyfvzOL3vUsK5E7vLIx4fjsGnYlM5q1ajvi5e2xDzj8l3v4Xcnp6c548+XL3/3tg8ErNmF047I3vdifu8jFKebflF8fn1URmO0lHjsxbmM1+/j+bK83jwqXh8Y1bM4pTX5MceGY8Nx+99tRgR9XXZVz6xz0Pi8jx/5KHJsc2uz055feHEkw7WJZ72/9wJADjEni4PL//+FyLimgtWzq1LuaLFiz93Yqpjj8wiYn92TTh3goPlA/+tWKyOTpx++XJ/HC/UvrTOndhdFvHahnhtQ+x8kd39fHKWnTCveMK8/X9CPvP0yszT9z1THvHTR2JmTXbasfkpr4lTXrNjYPdtmNyIP+6oF7qadZZlJ55UPHQ3rQOAl4SG2iP/9B1/PPHF4R5LxP6FBBx0WRZ55K+Ax2l1cHLipWb9Q/GX3y8+OhwnzImrP1iZVbvjXayMR0//5LcLXudvEwDsQTErHDdr7uEeBRxmeZ5nWeR5vNwfp/Vwp4N2J5qXlKNnx9CmakQ88kx8pqdwz2OTNzF/4Mn4TzcWHtyQRUSxUH33G+QEAAB7tr93dXjpP06nV+beiXlzY+nb469ujYh44MnCp74RtYVqRIxWd+TTR96SNR4lJwAA2LM8z7NXxKO9EynetyD/nQurxWzy7IjRamFqS3zgnOqH3/JCJ04AAPBq1TArz6MQMXH/6ZfxYx6Fo2dN42for8y9ExMuPjtfNC++dVd+54PZU5uqEXHMUYU3vi5vfWP11GMP9+AAAHgJ+9z7q39yY+2zB/MSZYdHw6xY/aFpvOLZKzknIqLxqPjtX6n+9q9sn+DicQAAvLD5r4kvX2bT8YW9Yg92AgAAppucAABeIU6a6yIrcKi9wg92eon7tWv91QOAg+M19fmS5sM9CHj1yTZu3Hi4xwAAALwsOdgJAABIJCcAAIBEcgIAAEgkJwAAgERyAgAASCQnAACARHICAABIJCcAAIBEcgIAAEhUGhkZOdxjAAAAXpayPM8P9xgAAICXJQc7AQAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAGjYxwkAACAASURBVACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkEhOAAAAieQEAACQSE4AAACJ5AQAAJBITgAAAInkBAAAkKj06KOPHu4xAAAAL0tZnueHewwAAMDLkoOdAACARHICAABIJCcAAIBEcgIAAEgkJwAAgERyAgAASCQnAACARHICAABIJCcAAIBEcgIAAEgkJwAAgERyAgAASCQnAACARHICAABIJCcAAIBEcgIAAEgkJwAAgERyAgAASCQnAACARHICAABIJCcAAIBEcgIAAEhUOtwDiIjYWon7nqjefHfluz+tvPuNpcVnlU47rlDzkhgaAACwV1me54dlxXnEMyP5jwfHe+4cu+ex6u4znPnaQtubZyyaX5xTn2WHfnwAAMALOdQ5MTYeDz5ZvflnlX/6cWXL1v1a9cya7P2LSovfUDr5NYUZxekeIAAAsL8OUU48uzlf/9D4P/yoctdD4y9mOW+aV/z1c0sL5hWPnm2PBQAAHGbTmBOVavzyqer376n0/KgyUj7Ia6mvy9rOLV1wZumkYwol55MDAMDhMF05ceUN5TsffFE7Ivbfm08uXr2k7tCsCwAA2G66Lp90yFriEK8LAIBD7PsDY1/5P2MPbDg8FxB6ZTv2yLj8XTUXNM1IXoLjhAAAeOkaHx//798d1RLTZMOmuP77o+Pj6Z/OywkAAF66KpXKk8+5Bs80GnwqkxMAALwyHa6bpL2qVKt7uAvcfnpJ3Ho6z/O8Wsmr45FXIytkhWJWKGVuXgcA8KonJw6BF/MmH+acqIyObHnml5Xyxt1/VKo7auack0q19Yd+VAAAwP44bDlRHR8beeKe8dGRvc1QKW987rG7irX19cedWSimn2wOAABMk8OTE5XR55577CdTp7xh/pHvOa/x+Lkzn944+r9/vOGH9zwzMX18dGTjQ3cc8dqzS7VHHI6RAgAAe3UYcmKXlsiy+OSHT//Nd5+0fcpHFp/4nTuGPvPln41VJk8Kee6xn7x0iqK/u7N7MBqGy83LVrTMP0QrHerv6ezua2hqX7F04SFa5ctff3dnd//QwvaO9oWNB2Fx5YGuVT2DUdfe0XFQlrdXQ/2dq1d19Q5MfLe0s3dF66H6PTtwQwP963q7u9f1DEyON5qaW1vb2lpbWuY3HNaRbTPQ27Wmf7BhfvuKdv/tAMDBd6hzIh+v7LJfYsmvzpvaEhMuekvjUxu3/tnan2+f8txjP2mYd15WPNznjpf7V63qmthwWhPNA6tbDtFqh9b19PRGc8uKpYdmha8EQ71dPX0x1Lzs4GxGDg109qyJiOZlHdO5WTq4oqW9Z8r38+e/VFtisK+jdWnvbpMH+noH+no7I9o6e1a3Nh2Gge1ssLuzpy+iuWVF++EeCgC8Eh3qC8WObBiY+m2hkH3ifafscc5LfvXEhvqdTpnY5bmHxdC67h2D6OnsLx+qFZeHIyKGD9n6Xgm2vWcH602rO0jL2afB/omWaGpd1tXd3dXZ1fzSrImhvqYpLdG6dEVnV3d3d3dX54rtBdHT0baid/AwjW+Kl8ZOEgB4pTqkH/aPb32+Ut40dcopx8/epRm2Kxays09puPWuJ7dPqZQ3jW99vlgza3pHuS/l3q6pHxwPrOkd6Gw7FJ+/zm/r6msp19XZMjoAbd39LeVyXcPL602bjJalHR2H7FC6AzfU2bJtN1nrqnWd7VMO/lrY0rp0sK+rdWlnRPR0rFo6sObw76EAAKbNIc2J8vAju0ypqynuY/66ml13npSHH5l97OkHeVj7b3Dd6oGIiGVd3fN72lf0Ru+KnqG2FQd8IH15eGi4HBENjY37+MR7Yq5t89Q1NKR/Ol4uDw9P7Nmoa2jc53KGh4bK8YJDSxvE8NBweff1b3sz9med5aGh4d1nHBoa2vPz6+oa6va2xPLQ0FBE3b7fkMmxTcyzr7FNDiyirrFxX/Wy87/pnmeJiIjm+Xv+rSoPDQ3vYczl8tDwcMQL//se8Hu4J0O9nV0TX7Wu7u9s2/0J85uX9a4eaF3RG9G3Zt3Q6pZdX8z29+sF1refr2v//oPaxxOn5RcemGYjvyz3P5EtPK82Hh3t/2W+8Ly6+pHRH96dz3tTXePOF5kfub/c/3S28Lx9XXt+6O7yQztfbLJ+buGsU2v29tOIqKkvLDyrZtt34w/dNfZQJTvvnNqaHbNU7v5hZcfzSjHnqOJpp+7+Ker40N1juy8/IhpPnzFnc2Xy1e3YZKvc/cPK1rnFhafOiJGtP7yrOu9NMxrrJzaoKvf9cGz9Q/kzkc17bfaWc+rm7P63bXjP79LeX8jOr2KKs86pi0dH735it/sVlLKzzqmtH9naf3d165TJNXVZ4ymlbUOdFu3vqz2zdqcpmzdW//6msfsjIuLtLTUXzd3ttmbj+V13529aVHj2oUrnbTvdGfqid9Uunht33Dw2520zTqxUP3fjWETxj9tKs3d7BZsfq/TPLi0+Kr/x77f+YHJa6dMfLM4Yz/+6Z+vE2uOUGZ9+S2Hk7rHOn+y4YdzbW2red1x2x82j39iwfVrhd9tmzBkZ//xNlckJx5Y+/e7SqbOziPyZp6vf2LaKU8+u+cRZ2fo7RrsfmHziJ9pmnFqMm28d/c7k0oodHyzVb5yyqOkxXTmRRez6y5XnWzc/tcu0Bx/fXBnPS8U937HuF4/s+tu7dfNTs/PXx853uCseqiO2+nsnNqKaWpoXNkZb9PZErOnt71i68IU3RHo7mjp6o21VZ/NQ94quvu3Tm5d1ru5o3b61Ndi7orWjp211d8vgqo7JczRiWde6tnJna0dPtHYOdLZOzBPR1N3Xs3DnbdeB7o62Vb0RrT0DnU0REeW+7s7Vq9bscpRY24o1q5Y27zTo8mBPV+eKrqlHwjd1dK5e1toUMbS6qWVNRDR3Dqxp3XlJQ51NLV0RTcu6e/Z4QsFgb1NrR7R1drcMtHdMboLG0jUDK5ojojywbtWKZT1TB9e8rHv1Tic6T7xvSzu7m4fWLFvdO2XGzjUdreWB3va2jh0LaFra3bVi+9Mnntu6urezbf7297ZpWdfqluG29hVTX+mq7q5dTtce6u9Z0b5ix79TU9uK1j0eNFXuW7Nq6eqp+6xiWWd3R+uOd2Nv/6Ydu2xkD/U2tXRs+6avfWFTRLR29na2xoqm1p5Y2t29cFX79he7bN1AR2NEDPV3rmif8gsVEc2ru1e3TXk5L+Y93ONL3raPrql71R5aYsL8tmVNK3oHoqlp51/R/t7OHb8J24Yx9T+Bbe/GC7+uPc62tLO7tdzdvqIn2rr2dWpTeWDNqhWrd/rla+3sXtE6vWfZAwfTfbeNf+6B+POzauPWyuceiD9/U5z20PjKdfn76saWn7PTJvt9d0zOuXCvPVH5+57xb+w6cTxmV75+xaw5e/5pRFT/4ayayUUOj/3OjeNjEZ+YXVxyxraNq5HKZ28af3aXZRbH/sfvzTptp5Hke1l+XFQqvufeyVe3Y/Ajlc/eND4yL3pPnTHyQGXluvx9dcXl5xSjPHr1Fyvf2bbMuDdi3eZP/2bt4pN22tgb2cu7FHt7ISOVT940Pran4X3hrKi5rbLygT386M9Prz3tgcof3rT7ndEq7a01/26Pqz4Iiu95U2neblPfc07p//svW74Rsfj0Ge84Zg9PG9taeceppXhd9k+3jd+/Y3JpyXmleZHfv7by9rNK86J60Y1j3zm99J6z9rTxfFxEpXTeMbH17K0/+ElExFtaZyw+oxARY3dv/fy9EREXvam0+NTCQ6OVmHIS8eLTZ5x3TJx3XPaNv9i+mZG95azSvOGYaICLWuuWn1Pc3qjzjiku/FSp/4ejf3jTeOOpxfNOLZxWGe9+oBIRcUrpg2eVaiIax8e/01OJiDi79P4zSvFUfD5enjlxzJHZk5t2+h3K8z3cu3tzufK/bn/819722t1/9KN7nx0c2rz79DyvZtlOVTi3/tDcP3twTedARETzsoV1Ec3tTdEzELF6zbqlna0v9NxJPas6Jjc8m5piYCAi+ro6Wrq2bRpun23FTifjNjY2xMQh6MMREfObWiJ6IgbWrBuc2FDeZqh3VW9ERPPEYTLD3W3Nq/Z0vknP6qU9w90DOwJgoGNh227n1A50drR1Luse6FjYsjTWrIno6+ofbt0pYAbWTWwYtu77uJyejqmvp3l+Q0QM9q5u7Viz65x9Xe0tXat7+tuadtpMXdPRvmbXGTuaumJXA2vaW+p2eTN3naVrWduuTxxY1d4SPf3t21Y6tG51y7KdVzjQs3oP7+Tg6qbW3V5DdHW0d7V29ne2Tn0Ne/g33cWeamXKyTJr2qeeSdzU2BAx3N/V3N6525P6VrS39KzoXrPzRcAO2ns4PDC5Eb50xcJ97Ylp2nGxp0lDa9padn8b+7o6Wrqau/vWbF/a/r6uoXVNLct2mWnHyxza+zkzE5W7q96O9t7W1T2H5vBF4MWrKe36xcRGTc1umza7T9ndnNqISvb1K2fNqYxHRFSq/6dn6+ceyK//17Hl52RzaiNGs699snZOJd/+cXtNKdu+kXffrZMb3NffWtmRE6Woj4h5xb9pL20tR1Sqd99aWfmT/N//j+f/4Q9nTQmK0u99Mi6txP9t7+6j2jrvPIF/JV1hyQYHaII1Ygqs5Q2KTRI5Ax7RFA9kWqLaCWiKu1Y2zKmyY+9AO05lT44rJ5OEZFKP1pM6mvq00JputB18Vt41PjJNugrbGh3YDBrDxkpiHMkn8gIdGIVpgAK2ZHQl7R/3CuuNNxn5pf19Tk4OSFdXz/NcCf9+93nLksBlvXFkFK99c92O+zAPZEmYT6/G3t+/eVq+syKmyp++w3YDuq+I9+/IAjDn8f9VZ/gN63zFd5ns+Jcv0SYpKpIt6XqenQeymfBb/xB8B4JT3+HbITsblxkANwu8IDsbcwwAPPWVrIM7hFw0OzkSfON0yGoPPvOYOEO7FM+zABP+4VH/hwVCAMgW7vtK1o77hQ1a5qyNnUcEEFzoCZz9HDGdSHj/SiRPyagkwmceBBf6A1BUM0XA/GdsO1DBAgyuAWAxD2RNs8YzbFbMP4Lz0yE8JnjyftEWBYOPWQA1RXx0+tDDDK6wAHbIhUDkw4/iOkC4IiFb9LaWec4WE/RzP1as++5jIiBy4YP5I3YWEH5dk7X/MZFqx7rXfht49aPQ/FZh/iYRd/TjW/mso0jOP1KrEAL4dDSzuQQyNxU7OcSPREIpj/z+ac+VX88mPPjZZOCV/3op5fHJ55Hl3o7uiWmXjQu4mwxqAJCoDFoAgN3g9K30JEoAULfZXW6bze12mPg0pM1oSxGrNpnarG0tGrWhOj62RomaC4XsRvt07ONuJxcc6pvUEiDgsvK5hLbF7nRz7FYTHyu1WRcmydqN0VxCa7K73G6322W3RovW4gpArTVxb2Bxxs2sddlauJdplowrOeomk9XapldrmqqV8DmiuYSyxWrnymZr4yM8o7Yl1QRejcXmcLvdDqs5NtxrMltdbrfbaeMuC9DmXMHsX/5VbofFwFe0xRK9xR1wGaO5hJEvm8OsV4K/fDc5TdFcItpuN4thN7SkmoW86DUFUKJxu90OK9ekaqvD7Xa5LNr4PE3d1Ga1tujVhpZqCdyGaMytN3HVudk4TpPOkmKhgLVow2kf97HSKFd3I99tNUZzCb3VwX3O+IYFnDq9NVrcFdYrYGnicwmNkT9s4SO0JJ8pmktoW6zR69bGXzej9m6YPU4IWZWiBwUAstdixGIWAEYERgSJ+Ms1QgCfXI3eDBUhXyLKymayo/9lSRZubs53fgzIhd9/GMGJkCth0BSDLIbJzmayc7N2PL3+x9uBG5H//lF8hCdhsrOZLIbhapF9H8O9UYqUCQAjyAKSRx/NsQDw6IN8nJxdKj38sODRQkFW0pGLS10RrjCQCGV8fsG3w806foGJbZm4pyQCQMS1ar5C8vUCAJHRDK8s8xngnQh7J8Leq+yRi2EA+V9YiBUj1z4LDVwJvR/zHxA+ezUMCB6tuNlt8syDQgAXLiSlc8A8GxmYiDvDwAQGroTnAFkB96lgHrpfMP+b0CgLWSH/iCIXCIR/nqozB0DR1qzDiYsTCb9fyQBw9Vw/Yuc+MOGz9sArH4QAwZcrxLgaGmWBXGEtAKCmSAg2fPk3EeQKuTuQfALzQeoIfA1lKhAvuT/pzKl6JwDMXmf/038Z+MnPr/7Lv/nn2fBnk4H//qvRZ9/453/9fJHPWtJ5ipKHwWWAgx8urlkIndXRATMLWwQsyw2Y7ZbqEu5vnkxrdnABkdPYlhDD6NscBm21qlpntjQlRW25GjOXyphjwz6XnQtuNVq1DIDTwsVkSptJt7ADQIlKa+aTmOiCRz4H32OiNrlNWq5okhKV2c6nEGb7MJRqrpz2NkfMVXFbLACgNOiWnzOsbbMYtCpVtdFiVsvgsvLxoslm1an4Vyurm5xWLs6zWRNTNGWbw6xWygDIVJoWAx8Ma0x2g0YlAZCrbGoxLVsKjr7Nwb8KMnWTycidLHoze9jBJxZNVqeeL5tMY7SZNYi7zAGXiWtvtckVbTfkKpssTu6ENoMtoQ5LXlOejJ84npsrAxInfujtFkO1SqUzWppUsmEbP8ZHa7Ybtfx4O5lKY7NH29Zsi/8KrU0bBgJ8DqtM2lci4BtOxk+SgLutxcnXwm1U8ROCZBqjrY37LLtbbO4AgJXWy23nkhOlwWqODjdUVjc5LIn9FYmFdNn474nJZtKpotet2uK0ci1isLqWbQRCyF0lezPz/aeYLbc+3iKEuUBofo6dm2Pnxm6cPBMGULFVtPDs5ZH5Ue8N/j/PjcnoH9n5y6Fu4OuPiVU7hAB+1ncj9qwJcX9RsRD8PelV6OkLuC4ELnD/9YS8QPLd/S0PCgAc+dE1Y4f//3xwwzfNbn16/fFG6crTiaUrsoRfX7nhW2gZ7w3fZzeTpbm5MALs3Bw7Nxf89J+uvzEBiIRbMjxb7aHNwooHRY8/KKqtzHq7SgTA99lCSC3Y9EjWvq9kfSv6374KEYD3bewkkF8oepw/TPzo/QIEQj/7OMX5syTCb1WKv3XzJOIKgA/u7xc+DqBCVAT4RoMfTkeQLdwH4GFRETD3WexgqgWRC54QIHjyKYki7nFhfjaA8Nn+uEcH7MFRALnCxxH68PMIINjxIADmoVxgOmQdjQDCikosJDDvTSDTMjXYSbEpOVFZNHW5fiPU1uVt60rVwikknifVe621aSc3jl5t1C/c1pWoNE0wtQFOk2VYb1rRMjxac/yOZDKdpalN3wYM+3yImX2r0SbNXo1VUs0PtWpzuDT82A+3hRuXr+dLWG12O32+aeTGF2x6eJiP7/jvcjStMBrih2yVaKyWEklurkxZAkBj0rQZ7XCbHD49N8592mXnu2s0y2/DYGqKHcXuc/BTCFo08Tfpc1XVapidgMXpM6pjWkDTFNseuSVKwA2o9bGtWaLUAMnbICRJaFuJUs0PPOMMO/hq6eK7XDRNJthvzriYdju41xiNmvi/irnVerXJ6AQcw9OGmDFNy1zT5cttikvb3Hxvmd6QsMldicaih94COJ3D0N3shVijNpwe5lPYkqQls+xNGmNyZq00uW1aDLv54rbpE74p1YY22JoA2N3DxGPksgAAIABJREFUOqVyhfWSDDu4s7fE7yoiU+ua0JY8hmuBm7++Sn3CoKZclV4NI//hoykUhNxTsrNUj6zJiSLPHo+7D6MoEj6zlYmOO4n89am4GQS1GuF3HxMDuPBPYUDQ8AgDCL61IfCji6HJryF/kffI2iz4ItBzOfTtR1Yxf+DsQOrJFbGyH1v/9m+vv9IfGRgND4yGAVa8bt7UuE61aaVTn1dVkVjmd+I7W+SRX+n5CPMdR/AdR9yTh7Xi1XSYpEH4jE76TMzv89Oh9ndu3qHfulW8NfbwOUH7QAgIDnwmfnKT6KkKvD+Ax7VMPjB6lU0dm2aLGmriWnXTaHDgSuiTzyJbCwWPF2DDZiEQ+eSD0PtM5On7hY9WomKTEMCnV1N3FEz1BXo2bajJFb3ewDzbuZBqRuZZgI0kD4KZZ/kQ/r2RcMMmkeJBEdaJZMDoeOj9K4L5x4RbHhTjM+HiCcway1Q68aAsMcQXCNcm6E8+z5YVf0/S5rZbuR+cJrtdNsxF4BLJdDRqsdlcRsMKBvxoqxOHZctk3CPu+JHeiy5IFH1e1aSBwQ63ye7Tq2RAwMVHSSbtzegqVybL9Q077S738LDb7XLbncnBns/N3YtVqxOH30hU6punUlbruDjTandr9EoATgtXe8NKNkZIWIco2qfSolK2pH6BK/67k3rDjYTFrpZrtEUOk6m4wDpaOO6tkofyKFWxofa0j6+ESata5Ja+2zcdu+nBCou3uITXc2dONXdYptICtsRXrFEbypT8p8I17NPErz+V+rV8xw3/pCp5ySpZCVfcXO6Y1dUrOamRLZ0S+VzctXbrlIvNkXBPA5ROEPL7yVAtyrsR6fkg7LgByEU/aYz9wyb4foPoZp9ACNlFQgAIBH42ASDSecafhYj3GoBI52V2f8oJuwA+j/waqC5aXVD0N3uzKr6A+RvAOkHW58E9p1OP+CiqWW+pCc2NsZevhN/7KOy4FvnrnwbePryhaCXh3morEuNbGqZiQ3TuRAhZ990MzBSbhfs2Cz4bDZuvRAD83bc27Mj4+u2Ry97QHCPYUijKZ4A59ms/iutm+fRy8MNAdO4Eg/nP+RD/2EehJ7/KqB7JwsD81zcLgfD7tkVmHQRCP/8ovBBEZwHvXwG44L5QpHiEydskBBvqmcDA5dD8I8KizVk1EgEQHuhPfeGycvHGj4IVL4plpVmGghsx83MAIB9ImRJsALwfhed2iGSbRLoNQiDyyeUQrmKUZbZ8QaTbKgDw6ZWUs+jXWKbSiS9+ISmdEKQI+rOlzI6H8h/ZnLtZvuH++7I2bhDPXWcnZ+ev/uu1y/9vpv/y51OziUPWks/zxfxMD3by2VoWwhOLIdXw7Dazo8my6Co3C2yOadNKpm0vM7UZAKr1RthNgMXhNuqUcPBDmwwxCcu0zaA2Lne7PsDN74ZsmUAyV21Ww+CE02Tz6Y2ygItbBUpj1qzgz4JGGV+f5QPr+NhXvYabKy/XtjKVGnYnplM8JVnit1TiNtBbwTVdWnV8I8qUatidsPsCK9tgby3bEABgcfmM8f0tGqvLGa2zRDJt0WnMbn79gIUi2n3TmqRRUrFWWC833ze26p0dSzQaOJf+VtDkCUJ+X4mET35JkgV8uWY+//vBs+OhH34Q/PbCGkQibClNsdSsrz/MhXpnr4QBcEdb+4KLReG+KxEAsuzVpRN5m8TZC+8tCckQTpo7Eexsn79cyLz8tXXZhaIdhdhRg2f+17W/vIgPR9gixfLx3morEkuxdV3RIn+yKx4R79jKYAcev3D9G7+MHPnx9f/53fUr6fG4BeGfnb4xAADitw9nFWUzb2tDMbOcI7/+aP5HKScwDLCf1jBbNolqH8zaKsH8b0Lti7zB/FzY/MsUcyq8jtDcl0SyzYwsG/OfhQfAjYASb9kkqmAEmAtZlyr2/LGPRK8/Inxal+WLFnaeBRjh4wUYiB2wtFlUxABzES+ACfbTgFh1n+jJ+4RgQz1XAYQ++TyyZZPwyc0Awu8PLPWWayVT6cTG9QKREKHYHEwgYKT3sf7fcr8VFazf/9Tm2opNYib+G/UFAFBv/QKAcCTS++Fv2t+9enmY3/yOkd6XvEpstjSz6UTA7eBn3Co1enXCXctpp8XmBuA0OnzapNUuE2mqFw+kYsOiVLFsAomqWguTDWixu3RKCR/cm24G9y6zfiGX0DYZq1VKmUymVJYM2wzauCSDe2NfYLmwVG1ogq4NsLiGjaphbndwpT6tIJmvq9bsMKoSVjSSSCQAAvFFWXz7iNVbrm19Diewkq2U+XKbbQ51bqpKBAKS2M/DCq7pqvjcTgDQrHS7hDVrwxKlHrAAaLO4DXFTuuM3+sgtKYnp9Yk2kGbJfTmw4nopNdxKzaveLWKYz0O0NodxketGXROE3Nuy1qWOCrKW/3sRmedmYyPr27rQz/9b+Kx9/slHxNFZGSnnNAd//kEEEJx6cX30T0fovR8Hjn0evjCNHdy8aiamPIEbJ/sjAGoevJUALPW8C98EHBPsMzXrFmYmZEsEQCRbkqJBklppyYos92/iEmtnzbN8afN3rH/to2uvTkTe+PmN40+vW/QFa2DhSgWf6xP9qkZUtDXL8E+sORqRz6fcRAMAQu95w1tKhfu/IsgCLlxMkTAsJ/TpHFT3iwBcHuE6PUIffh7ZskmYD/hGl5kS/f47/gubN+zIFsrAdfWEesbCW4qFT+5Z9/Mf3Yh2UIi+/5Q4C5j8nBvFFB74LKwqFhYBc2NhLnd4fyT89CZRUTYwvfwYuTWRwX0nvvYo887FuE4iaV7RrP9jAF/f+YfffaY0MZFIIhQIqlUP/MmjD5x85+qPf+6NRCDNS1xNuO4xcab7JpwWfkBOW5s5efS7T53LLSraZnNpmpaZRWB3+RA/InzYzY0oVJesOiwv0RnVNpMTDru9BEnB/TA/lQJ6m8sYO45JEh9WlpRUA3bA6XQHlHEbaAQsWpXJDWjNbpMGQK5Ko0GbHbA77AG3DQA0TSvYciNZNMz2TctyExt02OWaBnJld2yxzlwl4ATsruGkSxW70qskWolpJFVietjlnkZu7iqXPlolLj9JLicw7OBLmpmVM0o0BqXF7AbsWpPDbVx8b4c4fFkc7sQhUvANx23bsbJ6SfgT2tzDpvjvTrTfYhH89YVv8esmo3SCkHta5y/nsz/mI8Z5VtCglXKPt1uvV0j4idHz64T7n5bEpgeJEWah9PiD1w5cgdEaONPIzAEIhX94xp8dE8rPQ/DMY7DewBcfFMX83RA9XinAO5H2X97Y8RQA/NrLnjwTAjA3F3lvHEGgukJ8S3ORUw/AET+9ff7sRfzl8Wtff1i4NRu+sXD7KADBQ4UphockttIfLVmRPXz0v1gc/jPr9UclN5+dZwWP12RtSTrsy42M4jj74cfshR3rdmxaUV1XKzHl6w+8t3X9k5sET+9ZZ+aHPAke3yP9MRt7ZMR7cf6NgTCAs32hb5YK83MFYENnY+7rcwdvWPg1l3n7L0SIbddr4TdO3fAiPPB5WJUtBCKfRBeE5aY3AJFPLi+/YOsRa/Dcvpur6FpPzT9+SLI1l/nJYaHLG56C4FGFKJ8BAqFj0Zk81quR/cUAMDrCn3/go/DcDlE2MDqe8TWdOBmcxPxk0hwjZl2OSCz98sP3/82fP7RsLrFAIMB/fnrzN/7kiyKxlFmXk/DsV8oyvLF3wGXh9+xqSeyZAADIqrXcEpNuc+JucSlY9I64SQHDbfyySso09rxWaXQA4LYYjBYAUDfFTt/gz9eUMNvZHX3HaHwXjZ1MlvipUj5+C3C1eiGyV+qblADsJoPRBgAGvXrVhQYAmUYPAHC2tLnibtoHXG0anU6n02nMd2x1HZmKW2nI0ha/YqgjfhFSmYoPo1uMlviOh0CbWqPT63RajWuteyRiKXWalOXEsL2JX6prrYc3Ran0Jv7CW5r0bc5USUvAZTUZYsP6EiVXXJvBnDCWyG7m12KqVpVgxfUqUfPbcBharLHN7LKYlh7JFN2qwmlsi/+MBVx6/rpZVrzyMyHkLpAUBQR/G2m/ErFeiVivRM5eDS+MG/lwNObx+N2awW3jwMTdn9y6R/yUCFOjofdGwvkMALxzJWyNnsF6JXL2SrjvQhjANyvjCpH9CFMLeL2hSe42eQjc8e+MI3sDvvUV8ctfXXQqcvITKQ5lBPkL+05AACCLEQAo+tqGE9sFecDZj8Nv9IfbR5F3n+DHz69PeYskoZX++f8uXRFePpPY4FzxBmLaljvhwOeRrJiy8STrXq8WAHjF5l+sBW7RPAA2rvfm2E+DPgC5zPerhfMhAMjOFW65X1h08z+RYlO0kBPzA7+JAPB52YGk014DuC4sMIKiTbFnEBYVCrh2tl4JA8DczfWUvL8MTQJgIz1XkEII4AY1RQvww8uxY3tCB47fuPCbMBihqpSpKRXlM5j8jH3leOBm8fpDPgAIf+iIvnCC/XQOQMS7ggRmTWQwFn/wD1IkDNmyrU9VptF5hKe+9AfveQsSHhQI8O+T5nyvLZ/Dyi9b2VS9SMCvbNLDaQFgtzkSR5Mna6rWt9nM1cpc+FymJn5rM70lcbmbFZFVtyixsFFdkyEuuOeDvDazQ2OuVuYCmB52td3cUM7uHoayBMhVW7TQ2wC7QWs0tbVoZRJMux0tWj56boqZaq3SNuFmVL2i7SZSUunNsBgAmHVqtNn01UoJMOyyLWw40GZIL1FZAyWaJi1sNsBm0OSabUaNEpi2mw0GW/xxueo2LZpsgNuk1gdsJr1SJkFg2NZi4OugbVNncrZZiaZJDbsTsBk0MFlbtKpoG/LLTxkNK+w3WD2Jss3aotK1AHCa9Soz9EazRl2SK5EEpqfdTqvRHNNY/HAkZVOL2t7iBGwaZa7VblCVSBAYtpn51BTKFq4zYqX1ylVHF3pqUWuHLSZdiWTa0dbSsrCFyyKNn6vWatFmA9xmnR5tpqZqGRAYdpk1Or47r01PvROE3BO27tnwq/hHsreu/9XWVIcmHZmEeeY7zDOJD2Yd/G7WQe7H72Q1LPLK+hSPZX33xazvAgB+8uLqRvVsSSpq8iOQrDsePW32Vmlslbd+bf2Zr2F+LjjPAhJhtiTVtNVFWmnpigAAmIbvMAntkKJ4Mccnv5HsS+t/9aVFX3DLQn95LHkH5OCzR6PTkR3XkzdJTfDGT66/scRpr87XH10yjh248acDCQvsBr9xdNH50Md+ev1Y/CPdNn93XMjBHvkJC4hqHxYAEe/HySs1xVSQF/7rH6TYCTpzMphOMEI07BB3XoiroZBZ98G/oLZi1Wfr9USETOJ38j/8sViU2Wxi2t7GBzu6xfMEtdYEixGAxWxvqtYvF0M6m7TxsbLGbEzZ8bE8SbVBj6bodhNxwX1JNHrj3i5u8SKOb5pfeEhtsmltWhvgthmrbcbYY7Rme1zRStQGgN/PYiXbTUQl3sCWaWxmvdZgAWBu0iZ8t5VG6wqXVA0s+Wu6SlpsJpvWCMBi0CZver2gOtpucJq11QmVUFtbbimaDyzycwyl2dqi1rWA23I77rpBbbTqVzYQLb02lKh0Diuqdfw4QIspeYdzANCbrMboUmNKnbnFrm5xArDoEjcTV1utumhxV1ovtdFhdFebnIDbok++UItO0i5psZltWgMAp7kp8bopjYZbW8+XEELuuKzsTK/ESm6/UHeqHTDuEpkNxr9ekWJN5V9+su6sM3Eb7KWddc6e/SDFqf4s1fnXUmCYv9epXXKSgLKa3xPMbRtecnyLsslkalLHPWC2us1Jiz0tlpEkPb4w1EppSOzfUOosVpM++ps7+v5Wl9vO70nnWBhMojS5neaEfSegMVsdpsTh67nVRr78K9luIirFLGClxuiyWRKW/odSY7babfqYM8etMrpwOq4hEqfhJh6b6rUraVuJUuuyW+NHcqnNVgu3o0fM+ZQmt8ti1COepslsd1kSPzCr66mIa7GU9QCQq9K5HVZDwpgmpdZstVvWqg0XJ1Pp3G6n1dySYkyVUmMwWRwut1Eb+yHJ1VncVnPiymgaQ2JzrbRekOktbqv5Zk+WUmOwOhzcHo/q6Dwifh5O3PXVuF22loSsHsoms9Vl02d4byVCCCHkd40gElndvoyr9Z1/DFz6dYqJIJqHI9/+qnR9qm64WNcDoR/+b7/94xTTrR8rEf39f7xn/um3G5QGOzRmu1lTAgSmfdOQ5OamMWFi9aZ9vgC4d1v27QLTvumlD3aalHoLAIPT3bQ2Y3n494QkV3Zb2mM1AgHf9DQA2XKzc7lGhkQiS9oH4Xbgy7myi5yZEkSv4oo+1tzRyx+7VL18LpdPkitTJm5kMWxQauyAxmQ3a5ftPwv4fNMrLjUhhJA7w+/3P/VW6h0byFp556BQKpWm99oMz2MGDmqy/uJkigk39o8FTm9g747wru3SbGmKYsz52V9c9P+PC8Kp66mXbnr+yXuwKy+6KP/tXDxmNe+1XMF8TpMFWOl2E2vznneSRLJsIsG5w3VYcTkzV4JVtcBKj16iXsNOnc4IQGtxmGIG5PkcVm4qdsmKlpC94+1GCCGE3PMynk6UPCD8o38n+r//L0UHxfR1/Ngh/GnfjUf+cP4heWTTfYJ1DG6w+Oy3kU/+VfDRryNsaNGxWJX/XpS8Ux7JGJ/d4gjkTtuMZm61p6bEQVCE3F4l/CblNn11idmqUZVI4HPZLQYTN9lJo0lzPhIhhBBCVifj6QQAY926b/zD9cWeZUP4YCTywQjit2VZZgjWC7szugFKBkwv/O9eNG01tTijv2jMLXdsVwhCeCWGNr29yQLAvLAiWFSLzUQfUUIIIeT2uB03+PM3CP6iei0HJjX9aVbu+kxvXrfGZJomrVarKblHx2fnLkyAbTJZzdQ1Qe4CJdVGl8Nq1McsIaBU641mu9OtU96jXzRCCCHk3pPxqdicSATf/LF/bHINptEU3y/86X6p4B7LJgghhBBCSDpoKvZtcCtTsW/T9AOBAP/w52twv1AkxFuNEsolCCGEEEIIuRvcvtnMeRsEx5+91YzirUbJfffaMCdCCCGEEEJ+V93WxZEeLRb9VW36kygMmnXb/nCZfSoIIYQQQsjvEoFA8IX1KdYIJWvlD3NZwS0M/rnda63+Wbn4uT9JJ6P4i+qspx+7HetQEUIIIYSQu4dAIPjzyvnC+4J3uiC/m/Kl7J4/CgqF6ScFt2kqdgJrf/Bkz/zKj2/606xv/LE4c+UhhBBCCCF3p1AodO3atWvXrgWDlFGsPbFYvGHDhg0bNohEaQ4CujPpBICey+wbthsrObKlYV1VKfVLEEIIIYT8ngqFQqFQKBwO36nA9XeYUChkGCbtXAJ3MJ0A8OvPw3/13wJzgUULcN96wYlvSgrzaPdrQgghhBBC7kZ3Mp0AMM/i79+9cX6ITX7qq2XMX+9eJ6ap14QQQgghhNyt7nA6wbniC794OjB1jS/J/TmCo3sligLqlCCEEEIIIeSudlekEwAiEVy4GvrR/57/9lezKjaLaKM6QgghhBBC7n53SzpBCCGEEEIIuefQgCJCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImpixsbE7XQZCCCGEEELIPUkwMzNzp8tACCGEEEIIuSfRYCdCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImiidIIQQQgghhKSJ0glCCCGEEEJImpjb8B7e3o53h7Bb36iQpnp6cuBkh6e2ubFYnOb5g5Mj40xx8cbggLXdI9/VuLM4xUETA61Wz65beJc1Fhyxtv9iluHbn5HmqapqtxdvXM0pxs60dhXs0u8sTtmsdzu/t9fy7tC23fqdCx+LtbhGkyNeplixqnYkhBBC7kHBfqvFNbsQyTE5BYrKmp1JocS9Fy0Ex/otnS5I42JU1g9Vg76y8C4J41Zlsv/Muf6h0ckg8guLymvqdyrzlzg6GtYCCI54J4oVhXddEJvkNvROeM91uUa9rnPnR1I+7R/3eEYvTbLpnj7oPnr0RLd3BmA9g16XZzL1u0x6vKOu9N9lzbGzHq/X4/GOjIyMjHiHXM5TJ14/5/Wv4gzBqUGv1zMZzFgRMyp4/lyXd9TbdW5g4SH/hMd7K58EIOg9c/RE66pakRBCCLk3sSOXPF4+kBgZ8Q4N9nWdeP24O+EfwXsxWghMjIyOjIyMeD0c78jIyMjoyMTs3RPGrdzMme8d7XQOTeYUKBQFk96hrvajbw+kDlaB2LAW3jNHT7R2z9yFQWySjPdOTA70jgMAxvvOT+5+Ljkdk+ZIACb9dEuMICCVSgFx45tvNi5ylFTZ+ObfNeLuyeoYBAHVvlcblVIAmLz4vaOnBnrd9YrtKz2DuMz05puZK2Bmzbj6+Y9Fd//kzkruY8Ew/JVMHwtIxLejy40QQgi58yQq/RuNSu7nsf633+ocGvTOKMtieijuwWhBrKh/4816AEG39Uj7YPm+V3XKe6ZrJVFwdGgKeVXNL9UrACDo/cGR1qHuvmBFfeqY9GZYC7BBMFIpIL7bgtgkmY68gn3dQ5CU76tn20+7ut3+6AciOHCm9bRzFGByGBbIAfznjr92Sd74kq4MAILuH7xsketf3KNEb0d7l2scACNX7W9uVEgx0nuytXs8B7NTAchzEABc7Ucmq/arxv9xUN54qF7pH+ltPdk1HgDAlFY17q8vC468+1qrt/HV56UDra09QUXehGc0ADDb6pqf21kMYKzf2to5GAAkckX+7Ji0fH/z7lSDptYUs9D++WUKCVwAEOz9wWuDxY2H6pUAxnpbTwzKXz1UL/V7O1rfdo0HAOQpqvY31xcEva0vv/2A/qU9iomTr530FxdPeTyzAJOn2n+IG1c2k9x0SD4PMNLbcbLLFQDA5FU1NteXLdUHtya8Pd0B5Oxtrv1Fa2f3eXflHmX0mdnu1uPt3vGFCwcAk5dOtnZ4plgAclXdvsadG1O20oHS462DACxHjFXNr9anHlpHCCGE/C65ecu6sHJnTufQxMhUMMd1tLWbycHUVED+eJ30n7sf0L+03dN68lLRiy/t2QjwQVdR40t7ino7LF2uUQCARFX3XONOBYDJS+9aOnrGWQCS8ob9DQWul1sv1b/4Enf7b6z35Ike6eFXGzMdLnB1Y9kgsPBveorYJjYsfPzrX/uo6/8UKHK8nnGA2Va3t2yi57RzHGBKa/T7dytTBkKZdnPcllhRX1fePSHnfvP2drzNB2DyuubmncU49712Lqz9zbat/zIUAAaPGCf271f840lv46vPKyZ6j7b2yBV5Hs8oAPm2uubndkqB4Fh/a2vnaDSKnZDWvNi8k00KhjNXwQwPdpoc7J+CvLJKWVElBwa7+ZEt3nMnTjtHS2uePbBvF8N/EaSlcunUYDc3Imqiv2eUZRTyjf2tR7tck+q9zQeb9xZPuFpf65gE/OPjbGB2qkBVvk31x19VAyhSN9RX/cH4SGBilgXGLCe6JovrDr54+NkquafP0uH2s/6pADvhB/zjE+zsqAcqfbNeXYShLsulIILec291DopLa5oP7CvDyPhsYGJqNrMtA4gBT1/nu++eO3fuzMnjrw0GmF1PlAHs+HhgoTtvanSMnZgF0Nva6ppU6A8ePvCsesrbd9zqBusfYwMBNgh2djwQGPV4Shv0+xrKMeXqOOcGkLLpUpxn7N0TXa7iOv2Lhw9Uyaf6LMcvZbxHdOJ8/xTk5RWKynI5Zp09YzHPjY4we5sP7FUXe/osJ/snEfQeP2rxzBY3NB/YV1c+4ep6vbU3dSuJFbvUcoApb2isKqJcghBCyO8+Nhj9Nzs4c/Fc9ywgVxSw/vFZNjA1VaAu31ZaLOGiBXmpnJ1y9o0EAWBioG+cLVYUec8c63KNljfsO3iwuWab2NX19kU/MNl7zNIzUVC+7+CBum3iwc4T3UxpDqa6u90AgMnuX3hYeWnGbz2msmxYWCz5t1l21uuR7t23r6ZUOtR16vRQzt59enUR4+npdAdTBUIZJZZvy8N4T+sLxu+d7DjTe9Gdt1O3f0+FGJjoP9na5SpQNxw82FxVPNl14nsDM9LKXXxY+2e7/1QtB1DUoK//gyAfxLL+8Vl21uNBw77mBnXR+FDXObcfQe+JtzpHxaXPHjjQUMZ4x2dnx8b9qYLhzNUys+mE9/x5FkxlZSFQXLVNgtHugRkA/oH+cWbb3v27txcrdx5uVgPBIKCsrWUwfv7iDOA/3+1FUe126aVuL8uU7q4te0D6QFld/TawrvNe7ptTdPD5Rt1zjY9/qSwHyC8rL84XR+/2s0EgMOsdHfcravUHDxxuUEq5fphoN1HRgef3lCnK9uh2AcEgC0//AJhtB/fvVhQrdYf2y4HbMMaQAWY9rv7+gf7+Qc94AGC94xPcEwtJLMNwBwan/EBgyjs6IVXsPnzgwOF6ZcxBAKCoO6yrLFNW6nYVwe9nEUzZdNeTzxNkWQCzXu+4X1qrP3zg4KGyDPemBb29HhalVRUAKqu2Ad7uizcHEdYder5CUVyxp7kqD57u85Oe/nEwDYebKxXFyp0pFwGfAAAJ7ElEQVS6AzVyeHvcwZStJC7bLgeYsvKy/Lu4Q5AQQghZE4wY7NCpFzhHXj/V580prWuIjguqO/j8Ht1zux9ez/0qVT5RCvSdvwTAfb4HUNRu3/hAha7h2QO6SmV+/gOKggJALAbc3T0sipoP6ZSFxTufa64pr1LkKHZtk8wO9kwAGOkbYlFTq7oDFU4d28SFhQ+t5+reXKFU7n5iG4CaffsrlGV7dpcBfj+bKqDKrPw9L/3dvoYqRQHrcTm7TrW//sILHQMTgP/8ux4w2xp2q6TSB2rqdksQ6O7xFmyPhrUFxdvlOWAKypXF4rggFnWHn69UKir36IoAPxv0e/rHwTx7eP/24uJK3fM1eQAgTh0MZ0pGBztNnB+cAtB59IXO6EPd3d6KPXKGARvgHxEXlfHDfPLLK/M6+3pcwWLxYADltRWAlwFYT+frLy+cAKzfDwQhKeDT4iAbBMD6Y9q5uFFf09rR02kZ6gSYvNLG5v2Km6UKQlLAd2zl5UsAAAwjjuktLCqSYGIt2yE1f+zcCfjf/cFrPZ0Wd/mhVMeK6w/oJ0509HVa+joBJq9Gf2B3XJWQU5DD/VyQL+GSoVRNx+5JPo+yXl8z0dHTZxnqA5BXWtO8f3dGbzkMdrsAeE4fe+E0/8jQL/qC2+sBAEwOXw+Uleb0DQZYhgHYhc9pgbIIPS7/IrORWO5PCou7eXwhIYQQsibYIFBUtW93KYJgWeTIi4vzpeDHCOXkJw7iKaityjvR1zcWLOwbnM1R7y0AkINLHa2dpxb+Wc0BIJUyYKKREgp26+oBoLb21FBX74hfeb4fjKrqDq0xtHxYCAASru7+IAvkFPM/A4A4ZUClzOR6kDMT3n+DsrJeWVkPYHLsYmf7Kdfpk+WqQ4wYCAy99fLLC8dK/X4EsRDW8j8mnlGSz0dKOQUM/AAjlQBswM+PCFOW5vQMImUwnLnB7BlMJ/zu8x4WpXX76hU5fhZS6Wx3a7vL2T2259kAC0YSfevxSwE+/BPX7NrWd6q7vQNAaa1SCj/8gLzu4KGdhQDgHxsYGC0o2ui/lNS6jDTmoZkJv3y/6c2CmYlLQ/2dnX0d5y6+WhlzcNKVYdnYCzY+HrgtC+gCMe0vLSsu6BmdXCxQHhsNVjX/7f58/4h7qPt0Z4/lF0/8bfwYuOjIQv4EbOqmS3GeV6uC8if+9s39/omRof7uzr6ecxerntuesa9W8NJ5b0BSWtdcr2D9LCNlvO+2dw3190/UVwCxH0i3ZxZSCcMGAGbh2sxO/Nuy78DQVGxCCCG/HyT5pUpFyvvrweSba8U1tZK+06fbO8bBNDyhBPzW4xYPSp892FBWmA+39Ui7BwxmZ/1gZ2f56NTf+7ZlvKxOV1FRLuka7OzwjLNFdTV3ZkH2RWKbFGHhzbontkOKQMiky9x9+7G+jtae8b2vvFmxEQDyC7frG4aOWNx+FmwQkNe9eWgnAMDv7nf55UXAKMCFtTwmRdzK4+eW+AMAsxD8jI1yiwenCIZNz614vZ9VyuBgp4GeQaBo105lQWFhcXFhQYFyV20p4O25iPLyPHbo9JmBMf+Mt6PDuXCdN26vVSDgHQ3Ia2rzAUgVFXkY77L0uif8MyNnWt863XUusd+ABYAJV//IZHRMWHC04/Sp4yd7J8R5iiI5A25a/FJKqyqBoeMn3/V6L3Ucbx29LXe3xcDIYP/FgYGBgf7eMydP9I1DUqaQSuX5CLi6B8YmJ9y9HYNcZuPv6zjVfszinkCBvCiHWUEamLrpUpzHP9Jz6lRre69bnCcvknO3JTJY+4n+vimgdtfOwoLC4uLiwoLCnfW1DNju7osAgMCp42fGZma8vR09U1DU7MxXqHLAdrZ2eCf9k97eE51e5FWUpW4lgGWBgKv/0iStFUsIIeT3wmpWD91YUVuEce845FXlCzeqpTmFBfnsxCVLxyAw6x0PllZWAJ6THf0zQf+ldy1dQ14mvwCQPrG7lB33TCHviYrCDFRkBVYSFi5j9QHVrSmsrJQAp1//3rn+iyMjIxd7zxy1uCBRKqTSClUexrve7nXP+CcHzrS2d3a6JtnYsJZlAXa8/9LI0kGNtKxSDvb0sZMXvSP91uNd4wDY4OqD4VuRsVYMuvu8yFE9EfuJy698oqjT4+pzNT5/oG7qra7TbzljR7cAQGGNOs/rZGuquFWVpPWHD8wea+1qP9YFABL1s89XbISbEcfc1ldUFDF9g12t/nXlXBgsLttfp2rt6jr2chcAJq/8gE7JeC9xOYI41Zq04uLdr+jZY5aeVg9yikrz4MkvyvhEfwaYcnWdcvG/5hSVN+7XbQQq99b1vdV1+q2jAORyyfgkAGlDc523tav92OsAAHnD4QYpvNHPBSONPy2ARZpOqko6T34B26Ca6Oxqf7kLAOSqvZkcXefv7fEip1wV97Eor5J39rj6xivlADDrfOt1JwB5+d7mnQVAweHmuuOtXa1HXQCYPNWBw/Xi1K0EaZFKDtdgl8Wf80oGO1gIIYSQuxgjZmLui8YFCRW16q52p6qmSgwA0ifqVa7Tg8eODAKQlyokHu/IyIR4Z/2BuskTXZ2vuzoBKKr0exRiAAXlVfJOz4Siqux2LXfCiBkAkpuR2/JhYWzdU/2cMqDKpPzKlw4EW0929XWe6uNKkqdqPtS4Edi451BDoLWzq32oCwAU6mef254PSKNhbc6LFSrG5eyytK7/T+VcReKvLBh+rq/i0Cv61mOWU60e5BQp8jCSr9iYKhjOXC0FMzMzmTv7Mvz+mSA2blz+Ogb9M/6geKkjg8kdCsGZGb9YunElt9rHet8+7VE0798pBRC89DdHLAr9K8+V3cl41D8zk9w0/pmZ4AqrFJWy6VKcJ+if8QelGzfeDZMO/DMzSKqlf2YGYmlCz0nKVkrxWSCEEEJIasGZGX+qACA4M+OPCyAmeo3HulT7/vaO7wKxfFi4nDQCqlsV9PtZgEkaApIyAFtNKBMc6z1h8e469JxSCsBv/ZuXXQq96TluSPwqguFbcUfTibvHyLkXTvQhT1GukHgGh2aR9+wrL9HdbUIIIYQQwP9u67Ee7ywY1SumRoqP7jLe4y+0jiNPVa6Y9AyOzqJc/6Iu83uIxaJ0gjfp7j/XMzDlZ3PkpU/s2q2g7wohhBBCCAAEB6ztfVM5tXsbb2+YSlZm0n3mXM/olJ/JkVc+savitkexlE4QQgghhBBC0pThXbEJIYQQQgghv7sonSCEEEIIIYSkidIJQgghhBBCSJoonSCEEEIIIYSkidIJQgghhBBCSJoonSCEEEIIIYSkidIJQgghhBBCSJoonSCEEEIIIYSkidIJQgghhBBCSJoonSCEEEIIIYSkidIJQgghhBBCSJoonSCEEEIIIYSk6f8Dprov8V1LRpQAAAAASUVORK5CYII=",
    578984356)

} 
Example 159
Source File: ExtractFromFile.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.PrintWriter

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import org.clulab.odin.Mention
import org.clulab.wm.eidos.EidosSystem
import org.clulab.wm.eidos.utils.DisplayUtils.printMention
import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileUtils

object ExtractFromFile extends App {
  val inputDir = args(0)
  val outputFile = args(1)
  val files = FileUtils.findFiles(inputDir, "txt")
  println(s"There are ${files.length} files...")

  (FileUtils.printWriterFromFile(s"$outputFile")).autoClose { pw =>
    val ieSystem = new EidosSystem()

    for (filename <- files) {
      val text = FileUtils.getTextFromFile(filename)
      println(s"There are ${text.split('\n').length} lines in the file...")
      val annotatedDoc = ieSystem.extractFromText(text)
      val doc = annotatedDoc.document
      pw.println(s"Filename: ${filename.getName}")

      // keep the EidosMentions that are relevant to the CAG
      val cagEdgeMentions = annotatedDoc.odinMentions.filter(m => EidosSystem.CAG_EDGES.contains(m.label))
      val cagEdgeArguments = cagEdgeMentions.flatMap(mention => mention.arguments.values.flatten.toSeq)
      val eidosMentions = annotatedDoc.eidosMentions.filter(em => ieSystem.components.stopwordManager.isCAGRelevant(em.odinMention, cagEdgeMentions, cagEdgeArguments))

      val mentionsBySentence = eidosMentions.groupBy(_.odinMention.sentence).toSeq.sortBy(_._1)
      for ((sentence, sentenceMentions) <- mentionsBySentence) {
        pw.println(s"\nSENTENCE ${sentence}: ${doc.sentences(sentence).getSentenceText}")
        println(s"Number of Eidos mentions found: ${sentenceMentions.length}")
        sentenceMentions.foreach(
          m => {
            pw.println(s"CanonicalName: ${m.canonicalName}")
            pw.println(s"OntologyGrounding: \n\t${m.grounding.values.mkString("\n\t")}")
            printMention(m.odinMention, pw)

          }
        )
        pw.println(s"${"=" * 100}")
      }
    }
  }

  def prettyPrint(mentions:Seq[Mention], pw: PrintWriter): Unit = {
    val events = mentions.filter(_ matches "Event")
    val params = new mutable.HashMap[String, ListBuffer[(String, String, String)]]()
    for(e <- events) {
      val f = formal(e)
      if(f.isDefined) {
        val just = e.text
        val sent = e.sentenceObj.getSentenceText
        val quantifier = e.arguments.get("quantifier") match {
          case Some(quantifierMentions) => quantifierMentions.map(_.text).head
          case None => "None"
        }
        params.getOrElseUpdate(f.get, new ListBuffer[(String, String, String)]) += new Tuple3(just, sent, quantifier)
      }
    }

    if(params.nonEmpty) {
      println("Eidos Parameters:")
      for (k <- params.keySet) {
        val evidence = params.get(k).get
        pw.println(s"$k: ${evidence.size} instances:")
        for (e <- evidence) {
          pw.println(s"\tJustification: [${e._1}]")
          pw.println(s"""\tSentence: "${e._2}"""")
          pw.println(s"\tQuantifier: ${e._3}")
        }
        pw.println()
      }
    }
  }

  def formal(e: Mention): Option[String] = {
    val t =
        if (e matches "Decrease") Some("DECREASE")
        else if (e matches "Increase") Some("INCREASE")
        else None

    t.map(t => s"$t of ${e.arguments.get("theme").get.head.label}")
  }
} 
Example 160
Source File: ClientArguments.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.net.{URI, URISyntaxException}

import scala.collection.mutable.ListBuffer

import org.apache.log4j.Level

import org.apache.spark.util.{IntParam, MemoryParam}


  def printUsageAndExit(exitCode: Int) {
    // TODO: It wouldn't be too hard to allow users to submit their app and dependency jars
    //       separately similar to in the YARN client.
    val usage =
     s"""
      |Usage: DriverClient [options] launch <active-master> <jar-url> <main-class> [driver options]
      |Usage: DriverClient kill <active-master> <driver-id>
      |
      |Options:
      |   -c CORES, --cores CORES        Number of cores to request (default: $DEFAULT_CORES)
      |   -m MEMORY, --memory MEMORY     Megabytes of memory to request (default: $DEFAULT_MEMORY)
      |   -s, --supervise                Whether to restart the driver on failure
      |                                  (default: $DEFAULT_SUPERVISE)
      |   -v, --verbose                  Print more debugging output
     """.stripMargin
    System.err.println(usage)
    System.exit(exitCode)
  }
}

object ClientArguments {
  private[spark] val DEFAULT_CORES = 1
  private[spark] val DEFAULT_MEMORY = 512 // MB
  private[spark] val DEFAULT_SUPERVISE = false

  def isValidJarUrl(s: String): Boolean = {
    try {
      val uri = new URI(s)
      uri.getScheme != null && uri.getPath != null && uri.getPath.endsWith(".jar")
    } catch {
      case _: URISyntaxException => false
    }
  }
} 
Example 161
Source File: TaskInfo.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import scala.collection.mutable.ListBuffer

import org.apache.spark.annotation.DeveloperApi


  var finishTime: Long = 0

  var failed = false

  private[spark] def markGettingResult(time: Long = System.currentTimeMillis) {
    gettingResultTime = time
  }

  private[spark] def markSuccessful(time: Long = System.currentTimeMillis) {
    finishTime = time
  }

  private[spark] def markFailed(time: Long = System.currentTimeMillis) {
    finishTime = time
    failed = true
  }

  def gettingResult: Boolean = gettingResultTime != 0

  def finished: Boolean = finishTime != 0

  def successful: Boolean = finished && !failed

  def running: Boolean = !finished

  def status: String = {
    if (running) {
      if (gettingResult) {
        "GET RESULT"
      } else {
        "RUNNING"
      }
    } else if (failed) {
      "FAILED"
    } else if (successful) {
      "SUCCESS"
    } else {
      "UNKNOWN"
    }
  }

  def id: String = s"$index.$attempt"

  def duration: Long = {
    if (!finished) {
      throw new UnsupportedOperationException("duration() called on unfinished task")
    } else {
      finishTime - launchTime
    }
  }

  private[spark] def timeRunning(currentTime: Long): Long = currentTime - launchTime
} 
Example 162
Source File: ValidatorStack.scala    From sangria   with Apache License 2.0 5 votes vote down vote up
package sangria.validation

import scala.collection.mutable.ListBuffer

class ValidatorStack[T] {
  private val stack: ListBuffer[T] = ListBuffer.empty

  def push(element: T): Unit = stack.prepend(element)
  def pop(): T = stack.remove(0)
  def head = stack.head
  def headOption = stack.headOption
  def head(toDrop: Int) = stack.drop(toDrop).head
  def headOption(toDrop: Int) = stack.drop(toDrop).headOption
  def nonEmpty = stack.nonEmpty
  def toSeq: Seq[T] = stack.toSeq
}

object ValidatorStack {
  def empty[T] = new ValidatorStack[T]
} 
Example 163
Source File: NoUnusedVariables.scala    From sangria   with Apache License 2.0 5 votes vote down vote up
package sangria.validation.rules

import sangria.ast
import sangria.ast.AstVisitorCommand
import sangria.validation._

import scala.collection.mutable.ListBuffer


class NoUnusedVariables extends ValidationRule {
  override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
    val variableDefs = ListBuffer[ast.VariableDefinition]()

    override val onEnter: ValidationVisit = {
      case _: ast.OperationDefinition =>
        variableDefs.clear()
        AstVisitorCommand.RightContinue

      case varDef: ast.VariableDefinition =>
        variableDefs += varDef
        AstVisitorCommand.RightContinue
    }

    override def onLeave: ValidationVisit = {
      case operation: ast.OperationDefinition =>
        val usages = ctx.documentAnalyzer.getRecursiveVariableUsages(operation)
        val variableNameUsed = usages.map(_.node.name).toSet

        val errors = variableDefs.filterNot(vd => variableNameUsed.contains(vd.name)).toVector.map(vd =>
          UnusedVariableViolation(vd.name, operation.name, ctx.sourceMapper, vd.location.toList))

        if (errors.nonEmpty) Left(errors.distinct) else AstVisitorCommand.RightContinue
    }
  }
} 
Example 164
Source File: WorkspaceLoader.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.console.workspacehandling

import java.nio.file.Path

import better.files.Dsl.mkdirs
import better.files.File
import org.json4s.DefaultFormats
import org.json4s.native.Serialization.{read => jsonRead}

import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Success, Try}


  def load(path: String): Workspace[ProjectType] = {
    val dirFile = File(path)
    val dirPath = dirFile.path.toAbsolutePath

    if (!dirFile.exists) {
      println(s"creating workspace directory: ${dirFile.path.toString}")
      mkdirs(dirFile)
    }
    new Workspace(ListBuffer.from(loadProjectsFromFs(dirPath)))
  }

  private def loadProjectsFromFs(cpgsPath: Path): LazyList[ProjectType] = {
    cpgsPath.toFile.listFiles
      .filter(_.isDirectory)
      .to(LazyList)
      .flatMap(f => loadProject(f.toPath))
  }

  def loadProject(path: Path): Option[ProjectType] = {
    Try {
      val projectFile = readProjectFile(path)
      createProject(projectFile, path)
    } match {
      case Success(v) => Some(v)
      case Failure(e) =>
        System.err.println(s"Error loading project at $path - skipping: ")
        System.err.println(e)
        None
    }
  }

  def createProject(projectFile: ProjectFile, path: Path): ProjectType

  private val PROJECTFILE_NAME = "project.json"
  implicit val formats: DefaultFormats.type = DefaultFormats

  private def readProjectFile(projectDirName: Path): ProjectFile = {
    // TODO see `writeProjectFile`
    val content = File(projectDirName.resolve(PROJECTFILE_NAME)).contentAsString
    val map = jsonRead[Map[String, String]](content)
    ProjectFile(map("inputPath"), map("name"))
  }

} 
Example 165
Source File: Workspace.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.console.workspacehandling

import overflowdb.traversal.help.Table

import scala.collection.mutable.ListBuffer


  override def toString: String = {
    if (projects.isEmpty) {
      System.err.println("The workpace is empty. Use `importCode` or `importCpg` to populate it")
      "empty"
    } else {
      """
        |Overview of all projects present in your workspace. You can use `open` and `close`
        |to load and unload projects respectively. `cpgs` allows you to query all projects
        |at once. `cpg` points to the Code Property Graph of the *selected* project, which is
        |always the last project in the list. You can select a project by calling `open(name)`
        |on it, even if it is already open.
        |
        | Type `run` to add additional overlays to code property graphs
        |""".stripMargin
      "\n" + Table(
        columnNames = List("name", "overlays", "inputPath", "open"),
        rows = projects.map(_.toTableRow).toList
      ).render
    }

  }

} 
Example 166
Source File: WorkspaceTests.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.console.workspacehandling

import better.files.Dsl._
import better.files.File
import io.shiftleft.semanticcpg.testing.MockCpg
import org.scalatest.{Matchers, WordSpec}

import scala.collection.mutable.ListBuffer

class WorkspaceTests extends WordSpec with Matchers {

  "toString" should {

    "return an \"empty\" when no projects are present" in {
      val workspace = new Workspace(ListBuffer())
      workspace.toString shouldBe "empty"
    }

    "return a valid row for a project" in {
      File.usingTemporaryDirectory("project") { project =>
        mkdir(project / "overlays")
        val inputPath = "/input/path"
        val projectFile = ProjectFile(inputPath, project.name)
        val cpg = MockCpg().withMetaData("C", List("foo", "bar"), List()).cpg
        val projects = ListBuffer(
          Project(projectFile, project.path, Some(cpg))
        )
        val workspace = new Workspace(projects)
        val output = workspace.toString
        val lines = output.split("\n")
        lines.length shouldBe 5
        lines(4).contains(project.name) shouldBe true
        lines(4).contains(inputPath)
        lines(4).contains("foo,bar")
      }

    }

  }

}

object WorkspaceTests {

  def createFakeProject(workspaceFile: File, projectName: String): File = {
    mkdir(workspaceFile / projectName)
    mkdir(workspaceFile / projectName / "overlays")
    (workspaceFile / projectName / "project.json")
      .write(s"""{"inputPath":"foo","name":"$projectName"}""")
    touch(workspaceFile / projectName / "cpg.bin")
  }

} 
Example 167
Source File: MergeBloomIndexEventListener.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.events

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer

import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession

import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.index.IndexStoreManager
import org.apache.carbondata.core.metadata.index.IndexType
import org.apache.carbondata.events._
import org.apache.carbondata.index.CarbonMergeBloomIndexFilesRDD

class MergeBloomIndexEventListener extends OperationEventListener with Logging {
  val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)

  override def onEvent(event: Event, operationContext: OperationContext): Unit = {
    event match {
      case indexPostEvent: BuildIndexPostExecutionEvent =>
        LOGGER.info("Load post status event-listener called for merge bloom index")
        val carbonTableIdentifier = indexPostEvent.identifier
        val carbonTable = IndexStoreManager.getInstance().getCarbonTable(carbonTableIdentifier)
        val tableIndexes = IndexStoreManager.getInstance().getAllCGAndFGIndexes(carbonTable)
        val sparkSession = SparkSession.getActiveSession.get

        // filter out bloom indexSchema
        var index = tableIndexes.asScala.filter(
          _.getIndexSchema.getProviderName.equalsIgnoreCase(
            IndexType.BLOOMFILTER.getIndexProviderName))

        if (indexPostEvent.isFromRebuild) {
          if (null != indexPostEvent.indexName) {
            // for rebuild process
            index = index.filter(
              _.getIndexSchema.getIndexName.equalsIgnoreCase(indexPostEvent.indexName))
          }
        } else {
          // for load process, skip lazy indexSchema
          index = index.filter(!_.getIndexSchema.isLazy)
        }

        val segmentIds = indexPostEvent.segmentIdList
        if (index.size > 0 && segmentIds.size > 0) {
          // we extract bloom indexSchema name and index columns here
          // because TableIndex is not serializable
          val bloomDMnames = ListBuffer.empty[String]
          val bloomIndexColumns = ListBuffer.empty[Seq[String]]
          index.foreach( dm => {
            bloomDMnames += dm.getIndexSchema.getIndexName
            bloomIndexColumns += dm.getIndexSchema.getIndexColumns.map(_.trim.toLowerCase)
          })
          new CarbonMergeBloomIndexFilesRDD(sparkSession, carbonTable,
            segmentIds, bloomDMnames, bloomIndexColumns).collect()
        }
    }
  }

} 
Example 168
Source File: package.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis.protocol

import scala.collection.mutable.ListBuffer
import scala.language.higherKinds

package object requests {
  
  private[requests] def generateScanLikeArgs(
    keyOpt: Option[String],
    cursor: Long,
    matchOpt: Option[String],
    countOpt: Option[Int]
  ): List[Any] = {
    val args = ListBuffer[Any]()
    keyOpt.foreach {
      args += _
    }
    args += cursor
    countOpt.foreach {
      args += "COUNT" += _
    }
    matchOpt.foreach {
      args += "MATCH" += _
    }
    args.toList
  }
  
  private[requests] def unpair[A, B, CC[X] <: Iterable[X]](pairs: CC[(A, B)]): List[Any] = {
    val unpaired = ListBuffer[Any]()
    pairs.foreach {
      case (a, b) => unpaired += a += b
    }
    unpaired.toList
  }
  
} 
Example 169
Source File: TransactionBuilder.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis

import scredis.io.{ Connection, NonBlockingConnection }
import scredis.commands._
import scredis.protocol.Request
import scredis.exceptions.RedisTransactionBuilderException

import scala.collection.mutable.ListBuffer
import scala.concurrent.{ ExecutionContext, Future }

final class TransactionBuilder private[scredis] ()(
  implicit val dispatcher: ExecutionContext
) extends Connection with NonBlockingConnection
  with ConnectionCommands
  with HashCommands
  with HyperLogLogCommands
  with KeyCommands
  with ListCommands
  with PubSubCommands
  with ScriptingCommands
  with ServerCommands
  with SetCommands
  with SortedSetCommands
  with StringCommands {
  
  private val requests = ListBuffer[Request[_]]()
  @volatile private var isClosed = false
  
  override protected[scredis] def send[A](request: Request[A]): Future[A] = {
    if (isClosed) {
      throw RedisTransactionBuilderException(
        s"Cannot re-use a closed transaction builder; cannot queue '$request'"
      )
    }
    requests += request
    request.future
  }
  
  private[scredis] def result(): Transaction = {
    isClosed = true
    Transaction(requests.toList)
  }
  
} 
Example 170
Source File: ComposeFreeMonads.scala    From Freasy-Monad   with MIT License 5 votes vote down vote up
package examples.scalaz

import scalaz._
import scalaz.Id.Id
import freasymonad.scalaz.free
import scala.collection.mutable.ListBuffer
import scala.io.StdIn
import scala.language.{higherKinds, reflectiveCalls}

// example based off https://github.com/typelevel/cats/blob/master/docs/src/main/tut/datatypes/freemonad.md#composing-free-monads-adts
object ComposeFreeMonads extends App {

  @free trait Interact {
    type InteractF[A] = Free[Adt, A]
    sealed trait Adt[A]
    def ask(prompt: String): InteractF[String]
    def tell(msg: String): InteractF[Unit]
  }

  @free trait DataSource {
    type DataSourceF[A] = Free[Adt, A]
    sealed trait Adt[A]
    def addCat(a: String): DataSourceF[Unit]
    def getAllCats: DataSourceF[List[String]]
    def addAndGetAllCats(a: String): DataSourceF[List[String]] =
     for {
       _ <- addCat(a)
       c <- getAllCats
     } yield c
  }

  type ScalazApp[A] = Coproduct[DataSource.Adt, Interact.Adt, A]

  // program1 and program2 are the same.
  // This library lets you choose which style you like.

  def program1(implicit I: Interact.Injects[ScalazApp], D : DataSource.Injects[ScalazApp]): Free[ScalazApp, Unit] = {
    import I._, D._
    for {
      cat  <- ask("What's the kitty's name?")
      cats <- addAndGetAllCats(cat)
      _    <- tell(cats.toString)
    } yield ()
  }

  val program2: Free[ScalazApp, Unit] = {
    import Interact.injectOps._, DataSource.injectOps._
    for {
      cat  <- ask[ScalazApp]("What's the kitty's name?")
      cats <- addAndGetAllCats[ScalazApp](cat)
      _    <- tell[ScalazApp](cats.toString)
    } yield ()
  }

  val consoleCats = new Interact.Interp[Id] {
    def ask(prompt: String): Id[String] = {
      println(prompt)
      StdIn.readLine()
    }
    def tell(msg: String): Id[Unit] = println(msg)
  }

  val inMemoryDatasource = new DataSource.Interp[Id] {
    private[this] val memDataSet = new ListBuffer[String]
    def addCat(a: String): Id[Unit] = memDataSet.append(a)
    def getAllCats: Id[List[String]] = memDataSet.toList
  }

  // scalaz lacks a convenient `or` atm
  // https://github.com/scalaz/scalaz/issues/1222
  implicit class NaturalTransformationOps[F[_], G[_]](val self: F ~> G) extends AnyVal {
    def or[H[_]](g: H ~> G): ({type λ[α] = Coproduct[F, H, α]})#λ ~> G =
      new (({type λ[α] = Coproduct[F, H, α]})#λ ~> G) {
        def apply[A](fa: Coproduct[F, H, A]): G[A] = fa.run.fold(self.apply, g.apply)
      }
  }

  val interpreter = inMemoryDatasource or consoleCats

  program1.foldMap(interpreter)
  program2.foldMap(interpreter)
} 
Example 171
Source File: ComposeFreeMonads.scala    From Freasy-Monad   with MIT License 5 votes vote down vote up
package examples.cats

import cats.data._
import cats.free._
import cats._
import freasymonad.cats.free

import scala.collection.mutable.ListBuffer
import scala.io.StdIn

// example based off https://github.com/typelevel/cats/blob/master/docs/src/main/tut/datatypes/freemonad.md#composing-free-monads-adts
object ComposeFreeMonads extends App {

  @free trait Interact {
    type InteractF[A] = Free[Adt, A]
    sealed trait Adt[A]
    def ask(prompt: String): InteractF[String]
    def tell(msg: String): InteractF[Unit]
  }

  @free trait DataSource {
    type DataSourceF[A] = Free[Adt, A]
    sealed trait Adt[A]
    def addCat(a: String): DataSourceF[Unit]
    def getAllCats: DataSourceF[List[String]]
    def addAndGetAllCats(a: String): DataSourceF[List[String]] =
     for {
       _ <- addCat(a)
       c <- getAllCats
     } yield c
  }

  type CatsApp[A] = EitherK[DataSource.Adt, Interact.Adt, A]

  // program1 and program2 are the same.
  // This library lets you choose which style you like.
  def program1(implicit I: Interact.Injects[CatsApp], D : DataSource.Injects[CatsApp]): Free[CatsApp, Unit] = {
    import I._, D._
    for {
      cat  <- ask("What's the kitty's name?")
      cats <- addAndGetAllCats(cat)
      _    <- tell(cats.toString)
    } yield ()
  }

  val program2: Free[CatsApp, Unit] = {
    import Interact.injectOps._, DataSource.injectOps._
    for {
      cat  <- ask[CatsApp]("What's the kitty's name?")
      cats <- addAndGetAllCats[CatsApp](cat)
      _    <- tell[CatsApp](cats.toString)
    } yield ()
  }

  val consoleCats: Interact.Interp[Id] = new Interact.Interp[Id] {
    def ask(prompt: String): Id[String] = {
      println(prompt)
      StdIn.readLine()
    }
    def tell(msg: String): Id[Unit] = println(msg)
  }

  val inMemoryDatasource: DataSource.Interp[Id] = new DataSource.Interp[Id] {
    private[this] val memDataSet = new ListBuffer[String]
    def addCat(a: String): Id[Unit] = memDataSet.append(a)
    def getAllCats: Id[List[String]] = memDataSet.toList
  }

  val interpreter: CatsApp ~> Id = inMemoryDatasource or consoleCats

  program1.foldMap(interpreter)
  program2.foldMap(interpreter)
} 
Example 172
Source File: LoanWordsProcessor.scala    From attic-nlp4l   with Apache License 2.0 5 votes vote down vote up
package org.nlp4l.syn

import org.apache.lucene.search.spell.LuceneLevenshteinDistance
import org.nlp4l.core.RawReader
import org.nlp4l.framework.models._
import org.nlp4l.framework.processors.{Processor, ProcessorFactory, DictionaryAttributeFactory}
import org.nlp4l.lm.{HmmTokenizer, HmmModel}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.util.matching.Regex

class LoanWordsDictionaryAttributeFactory(settings: Map[String, String]) extends DictionaryAttributeFactory(settings) {
  override def getInstance: DictionaryAttribute = {

    val list = Seq[CellAttribute](
      CellAttribute("word", CellType.StringType, true, true),
      CellAttribute("synonym", CellType.StringType, false, true)
    )
    new DictionaryAttribute("loanWords", list)
  }
}

class LoanWordsProcessorFactory(settings: Map[String, String]) extends ProcessorFactory(settings) {

  val DEF_THRESHOLD = 0.8F
  val DEF_MIN_DOCFREQ = 3

  override def getInstance: Processor = {
    val index = getStrParamRequired("index")
    val field = getStrParamRequired("field")
    val modelIndex = getStrParamRequired("modelIndex")
    val threshold = getFloatParam("threshold", DEF_THRESHOLD)
    val minDocFreq = getIntParam("minDocFreq", DEF_MIN_DOCFREQ)
    new LoanWordsProcessor(index, field, modelIndex, threshold, minDocFreq)
  }
}

class LoanWordsProcessor(val index: String, val field: String, val modelIndex: String,
                          val threshold: Float, val minDocFreq: Int) extends Processor {

  override def execute(data: Option[Dictionary]): Option[Dictionary] = {
    val logger = LoggerFactory.getLogger(this.getClass)
    val reader = RawReader(index)
    val trModel = new TransliterationModelIndex(modelIndex)

    val pattern: Regex = """([a-z]+) ([\u30A0-\u30FF]+)""".r
    val lld = new LuceneLevenshteinDistance()

    val records = ListBuffer.empty[Record]
    try{
      var progress = 0
      val fi = reader.field(field)
      fi match {
        case Some(f) => {
          val len = f.uniqTerms
          f.terms.foreach { t =>
            progress = progress + 1
            if((progress % 10000) == 0){
              val percent = ((progress.toFloat / len) * 100).toInt
              logger.info(s"$percent % done ($progress / $len) term is ${t.text}")
            }
            if (t.docFreq >= minDocFreq) {
              t.text match {
                case pattern(a, b) => {
                  val predWord = trModel.predict(b)
                  if (lld.getDistance(a, predWord) > threshold) {
                    records += Record(Seq(Cell("word", a), Cell("synonym", b)))
                  }
                }
                case _ => {}
              }
            }
          }
          Some(Dictionary(records))
        }
        case _ => throw new RuntimeException(s"""field "$field" you specified in conf file doesn't exist in the index "$index""")
      }
    }
    finally{
      if(reader != null) reader.close
    }
  }
}

class TransliterationModelIndex(index: String){

  private val model = HmmModel(index)

  private val tokenizer = HmmTokenizer(model)

  def predict(katakana: String): String = {
    tokenizer.tokens(katakana).map(_.cls).mkString
  }
} 
Example 173
Source File: SynonymRecordsUnifier.scala    From attic-nlp4l   with Apache License 2.0 5 votes vote down vote up
package org.nlp4l.syn

import java.io.PrintWriter

import scala.collection.mutable.ListBuffer

object SynonymRecordsUnifier {

  def main(args: Array[String]): Unit = {
    if(args.length < 1) usage
    
    val synRecsList = ListBuffer[SynonymRecords]()
    for(arg <- args){
      synRecsList += new SynonymRecords(arg, SynonymCommon.readAllRecords(arg))
    }
    
    outputUniqueSynonymRecords(synRecsList.head, synRecsList.tail)
  }
  
  def usage(): Unit = {
    println("Usage: org.nlp4l.syn.SynonymRecordsUnifier synonyms.txt [synonyms-2.txt synonyms-3.txt...]");
    println("\tsynonyms.txt\tsynonyms file to be checked")
    sys.exit
  }
  
  def outputUniqueSynonymRecords(src: SynonymRecords, destList: Seq[SynonymRecords]): Unit = {
    if(destList.isEmpty){
      outputCheckedFile(src.headerComments, src.uniqueRecords, src.outFile)
    }
    else{
      val result = checkAcross(src, destList, List())
      outputCheckedFile(src.headerComments, result._1.uniqueRecords, src.outFile)
      outputUniqueSynonymRecords(result._2.head, result._2.tail)
    }
  }
  
  def checkAcross(src: SynonymRecords, destList: Seq[SynonymRecords], checkedDest: Seq[SynonymRecords]):
    (SynonymRecords, Seq[SynonymRecords]) = {
    if(destList.isEmpty){
      (src, checkedDest)
    }
    else{
      val checkedLists = checkTwoRecordsList(src, destList.head)
      checkAcross(checkedLists._1, destList.tail, checkedDest :+ checkedLists._2)
    }
  }
  
  def checkTwoRecordsList(src: SynonymRecords, dest: SynonymRecords): (SynonymRecords, SynonymRecords) = {
    val result = checkTwoRecordsList(src.uniqueRecords, dest.uniqueRecords, List())
    (new SynonymRecords(src.synFile, src.outFile, src.headerComments, result._1),
      new SynonymRecords(dest.synFile, dest.outFile, dest.headerComments, result._2))
  }
  
  def checkTwoRecordsList(src: Seq[Seq[String]], dest: Seq[Seq[String]], outSrc: Seq[Seq[String]]):
    (Seq[Seq[String]], Seq[Seq[String]]) = {
    if(src.isEmpty){
      (outSrc, dest)
    }
    else{
      val result = checkRecord2List(src.head, dest, List())
      checkTwoRecordsList(src.tail, result._2, outSrc :+ result._1)
    }
  }
  
  def checkRecord2List(srcRecord: Seq[String], destList: Seq[Seq[String]], outDest: Seq[Seq[String]]):
    (Seq[String], Seq[Seq[String]]) = {
    if(destList.isEmpty){
      (srcRecord, outDest)
    }
    else{
      SynonymCommon.unifyRecordsIfNeeded(srcRecord, destList.head) match {
        case Some(unifiedSrcRecord) => checkRecord2List(unifiedSrcRecord, destList.tail, outDest)
        case None => checkRecord2List(srcRecord, destList.tail, outDest :+ destList.head)
      }
    }
  }
  
  def outputCheckedFile(headerComments: Seq[String], records: Seq[Seq[String]], outFile: String): Unit ={
    if(records.nonEmpty){
      val pw = new PrintWriter(outFile)
      try{
        // write header comment lines
        for(headerComment <- headerComments){
          pw.println(headerComment)
        }
        
        // write synonym lines
        for(record <- records){
          pw.println(record.mkString(","))
        }
      }
      finally{
        pw.close
      }
    }
  }
} 
Example 174
Source File: TermsExtractionProcessor.scala    From attic-nlp4l   with Apache License 2.0 5 votes vote down vote up
package org.nlp4l.extract

import org.nlp4l.framework.models._
import org.nlp4l.framework.processors.{Processor, ProcessorFactory, DictionaryAttributeFactory}
import org.nlp4l.lucene.LuceneDocTermVector
import org.nlp4l.lucene.TermsExtractor
import org.nlp4l.lucene.TermsExtractor.Config
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer

class TermsExtractionDictionaryAttributeFactory(settings: Map[String, String]) extends DictionaryAttributeFactory(settings) {
  override def getInstance: DictionaryAttribute = {
    val outScore = getBoolParam("outScore", true)
    val list = if(outScore){
      Seq[CellAttribute](
        CellAttribute("term", CellType.StringType, true, true),
        // use constant hashCode so that we don't take into account score when calculating hashCode of Records
        CellAttribute("score", CellType.FloatType, false, true, constantHashCode => 0)
      )
    }
    else {
      Seq[CellAttribute](
        CellAttribute("term", CellType.StringType, true, true)
      )
    }
    new DictionaryAttribute("terms", list)
  }
}

class TermsExtractionProcessorFactory(settings: Map[String, String]) extends ProcessorFactory(settings) {

  override def getInstance: Processor = {
    val logger = LoggerFactory.getLogger(this.getClass)
    val config = new Config()
    config.index = getStrParamRequired("index")
    config.outScore = getBoolParam("outScore", true)
    config.fieldCn = getStrParamRequired("field")
    config.fieldLn2 = settings.getOrElse("fieldln2", null)
    config.fieldRn2 = settings.getOrElse("fieldrn2", null)
    config.delimiter = settings.getOrElse("delimiter", "/")
    config.outNum = getIntParam("num", org.nlp4l.lucene.TermsExtractor.DEF_OUT_NUM)
    config.scorer = settings.getOrElse("scorer", "FreqDFLR")
    logger.info(
      """TermsExtractionProcessor starts with parameters
        |    index "{}"
        |    field "{}"
        |    fieldln2 "{}"
        |    fieldrn2 "{}"
        |    delimiter "{}"
        |    num "{}"
        |    scorer "{}"
        |    outScore "{}"""".stripMargin,
      config.index, config.fieldCn, config.fieldLn2, config.fieldRn2, config.delimiter, config.outNum.toString, config.scorer, config.outScore.toString)
    new TermsExtractionProcessor(config)
  }
}

class TermsExtractionProcessor(val config: Config) extends Processor {

  override def execute(data: Option[Dictionary]): Option[Dictionary] = {
    val te = new ProcTermsExtractor(config)
    te.setConfig()
    te.execute()
    Some(Dictionary(te.records))
  }
}

class ProcTermsExtractor(config: Config) extends TermsExtractor(config: Config) {

  val records = ListBuffer.empty[Record]
  val logger = LoggerFactory.getLogger(this.getClass)

  override def printResultEntry(e: java.util.Map.Entry[String, LuceneDocTermVector.TermWeight]): Unit ={
    if(config.outScore){
      records += Record(Seq(Cell("term", getTerm(e)), Cell("score", getScore(e))))
    }
    else{
      records += Record(Seq(Cell("term", getTerm(e))))
    }
  }
} 
Example 175
Source File: BuddyWordsProcessor.scala    From attic-nlp4l   with Apache License 2.0 5 votes vote down vote up
package org.nlp4l.colloc

import org.nlp4l.core.RawReader
import org.nlp4l.framework.processors._
import org.nlp4l.framework.models._
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer

class BuddyWordsDictionaryAttributeFactory(settings: Map[String, String]) extends DictionaryAttributeFactory(settings) {
  override def getInstance: DictionaryAttribute = {

    val list = Seq[CellAttribute](
      CellAttribute("word", CellType.StringType, true, true),
      CellAttribute("buddies", CellType.StringType, false, true)
    )
    new DictionaryAttribute("buddyWords", list)
  }
}

class BuddyWordsProcessorFactory(settings: Map[String, String]) extends ProcessorFactory(settings) {

  val DEF_MAX_DOCS_TO_ANALYZE: Int = 1000
  val DEF_SLOP: Int = 5
  val DEF_MAX_COI_TERMS_PER_TERM: Int = 20
  val DEF_MAX_BASE_TERMS_PER_DOC: Int = 10 * 1000

  override def getInstance: Processor = {
    val index = getStrParamRequired("index")
    val field = getStrParamRequired("field")
    val srcField = field            // use same field name for source field for now
    val maxDocsToAnalyze = getIntParam("maxDocsToAnalyze", DEF_MAX_DOCS_TO_ANALYZE)
    val slop = getIntParam("slop", DEF_SLOP)
    val maxCoiTermsPerTerm = getIntParam("maxCoiTermsPerTerm", DEF_MAX_COI_TERMS_PER_TERM)
    val maxBaseTermsPerDoc = getIntParam("maxBaseTermsPerDoc", DEF_MAX_BASE_TERMS_PER_DOC)
    new BuddyWordsProcessor(index, field, srcField, maxDocsToAnalyze, slop, maxCoiTermsPerTerm, maxBaseTermsPerDoc)
  }
}

class BuddyWordsProcessor(val index: String, val field: String, val srcField: String, val maxDocsToAnalyze: Int,
                          val slop: Int, val maxCoiTermsPerTerm: Int, val maxBaseTermPerDoc: Int) extends Processor {

  override def execute(data: Option[Dictionary]): Option[Dictionary] = {
    val logger = LoggerFactory.getLogger(this.getClass)
    val reader = RawReader(index)
    val records = ListBuffer.empty[Record]
    try{
      var progress = 0
      val fi = reader.field(field)
      fi match {
        case Some(f) => {
          val finder = BuddyWordsFinder(reader, maxDocsToAnalyze, slop, maxCoiTermsPerTerm, maxBaseTermPerDoc)
          val len = f.uniqTerms
          f.terms.foreach{ t =>
            val result = finder.find(field, t.text)
            progress = progress + 1
            if((progress % 1000) == 0){
              val percent = ((progress.toFloat / len) * 100).toInt
              logger.info(s"$percent % done ($progress / $len) term is ${t.text}")
            }
            if(result.size > 0){
              records += Record(Seq(Cell("word", t.text), Cell("buddies", result.map(_._1).mkString(","))))
            }
          }
          Some(Dictionary(records))
        }
        case _ => throw new RuntimeException(s"""field "$field" you specified in conf file doesn't exist in the index "$index""")
      }
    }
    finally{
      if(reader != null) reader.close
    }
  }
} 
Example 176
Source File: PullExample.scala    From Hands-On-Data-Analysis-with-Scala   with MIT License 5 votes vote down vote up
package handson.example.extract

import java.io.{BufferedReader, InputStreamReader}
import java.util.function.Consumer

import scala.collection.mutable.ListBuffer


class DataConsumer extends Consumer[String] {
  val buf = ListBuffer[String]()
  override def accept(t: String): Unit = {
    buf += t
  }
}
object PullExample {
  def main(args: Array[String]): Unit = {
    val reader = new BufferedReader(
      new InputStreamReader(
        new java.net.URL("https://data.lacity.org/api/views/nxs9-385f/rows.csv?accessType=DOWNLOAD").openStream()
      )
    )
    val dataConsumer = new DataConsumer
    reader.lines().forEach(dataConsumer)
    dataConsumer.buf.toList.take(5).foreach(println)
  }

} 
Example 177
Source File: CsvParserExample.scala    From Hands-On-Data-Analysis-with-Scala   with MIT License 5 votes vote down vote up
package handson.example.csv

import java.io.{BufferedReader, InputStreamReader}
import java.util.function.Consumer

import org.apache.commons.csv.{CSVFormat, CSVPrinter, CSVRecord}

import scala.collection.mutable.ListBuffer


object CsvParserExample {
  def main(args: Array[String]): Unit = {
    val reader = new BufferedReader(
      new InputStreamReader(
        new java.net.URL("https://data.lacity.org/api/views/nxs9-385f/rows.csv?accessType=DOWNLOAD").openStream()
      )
    )
    val csvParser = CSVFormat.RFC4180.withFirstRecordAsHeader().parse(reader)
    val dataConsumer = new DataConsumer
    csvParser.forEach(dataConsumer)
    val allRecords = dataConsumer.buf.toList
    allRecords.take(3).foreach(println)

    val csvPrinter = new CSVPrinter(System.out, CSVFormat.RFC4180.withHeader("fname", "lname", "age"))
    csvPrinter.printRecord("Jon", "Doe", "21")
    csvPrinter.printRecord("James", "Bond", "39")
    csvPrinter.flush()

  }

} 
Example 178
Source File: RunInOutSpec.scala    From make-your-programs-free   with GNU General Public License v3.0 5 votes vote down vote up
package free

import org.scalatest._
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.Stack

import scalaz._, Scalaz._

class RunInOutSpec extends FreeSpec with Matchers {

  def interpreter(input: Stack[String], output: ListBuffer[String]) = new (InOut ~> Id) {

    def apply[A](inout: InOut[A]): Id[A] = inout match {
      case PrintLine(line) =>
        output += line
        ()
      case GetLine =>
        input.pop
    }
  }

  "A program" - {
    "should ask for a name and greet the user" in {
      // given
      implicit val ops = new InOut.Ops[InOut]
      val input = Stack.empty[String]
      val output = ListBuffer.empty[String]
      input.push("Pawel")
      // when
      OurFirstProgram.program.foldMap(interpreter(input, output))
      // then
      input.size should be(0)
      output should equal(ListBuffer("What is your name", "Nice to meet you Pawel"))
    }
  }
} 
Example 179
Source File: Post.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import com.typesafe.scalalogging.LazyLogging

import scala.collection.mutable.ListBuffer

case class Post(user: User, text: String) extends Observable[Post] {
  
  val comments = ListBuffer[Comment]()
  
  def addComment(comment: Comment): Unit = {
    comments.+=:(comment)
    notifyObservers()
  }
}

case class Comment(user: User, text: String)

case class User(name: String) extends Observer[Post] {
  override def handleUpdate(subject: Post): Unit = {
    System.out.println(s"Hey, I'm ${name}. The post got some new comments: ${subject.comments}")
  }
}

object PostExample extends LazyLogging {
  def main(args: Array[String]): Unit = {
    val userIvan = User("Ivan")
    val userMaria = User("Maria")
    val userJohn = User("John")
    
    logger.info("Create a post")
    val post = Post(userIvan, "This is a post about the observer design pattern")
    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "I hope you like the post!"))

    logger.info("John and Maria subscribe to the comments.")
    post.addObserver(userJohn)
    post.addObserver(userMaria)

    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "Why are you so quiet? Do you like it?"))
    logger.info("Add a comment")
    post.addComment(Comment(userMaria, "It is amazing! Thanks!"))
  }
} 
Example 180
Source File: Observer.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import scala.collection.mutable.ListBuffer

trait Observer[T] {
  def handleUpdate(subject: T)
}

trait Observable[T] {
  this: T =>
  
  private val observers = ListBuffer[Observer[T]]()
  
  def addObserver(observer: Observer[T]): Unit = {
    observers.+=:(observer)
  }
  
  def notifyObservers(): Unit = {
    observers.foreach(_.handleUpdate(this))
  }
} 
Example 181
Source File: RobotCommand.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.command

import scala.collection.mutable.ListBuffer

trait RobotCommand {
  def execute(): Unit
}

case class MakeSandwichCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.makeSandwich()
}

case class PourJuiceCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.pourJuice()
}

case class CleanUpCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.cleanUp()
}

class RobotController {
  val history = ListBuffer[RobotCommand]()
  
  def issueCommand(command: RobotCommand): Unit = {
    command +=: history
    command.execute()
  }
  
  def showHistory(): Unit = {
    history.foreach(println)
  }
}

class RobotByNameController {
  val history = ListBuffer[() => Unit]()

  def issueCommand(command: => Unit): Unit = {
    command _ +=: history
    command
  }

  def showHistory(): Unit = {
    history.foreach(println)
  }
}

object RobotExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotController
    
    robotController.issueCommand(MakeSandwichCommand(robot))
    robotController.issueCommand(PourJuiceCommand(robot))
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot))
    
    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
}

object RobotByNameExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotByNameController

    robotController.issueCommand(MakeSandwichCommand(robot).execute())
    robotController.issueCommand(PourJuiceCommand(robot).execute())
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot).execute())

    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
} 
Example 182
Source File: Student.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.iterator

import scala.collection.mutable.ListBuffer

case class Student(name: String, age: Int)

class StudentIterator(students: Array[Student]) extends Iterator[Student] {
  var currentPos = 0
  
  override def hasNext: Boolean = currentPos < students.size

  override def next(): Student = {
    val result = students(currentPos)
    currentPos = currentPos + 1
    result
  }
}

class ClassRoom extends Iterable[Student] {
  
  val students: ListBuffer[Student] = ListBuffer[Student]()
  
  def add(student: Student): Unit = {
    student +=: students
  }

  override def iterator: Iterator[Student] = new StudentIterator(students.toArray)
}

object ClassRoomExample {
  def main(args: Array[String]): Unit = {
    val classRoom = new ClassRoom
    classRoom.add(Student("Ivan", 26))
    classRoom.add(Student("Maria", 26))
    classRoom.add(Student("John", 25))
    classRoom.foreach(println)
  }
} 
Example 183
Source File: Post.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import com.typesafe.scalalogging.LazyLogging

import scala.collection.mutable.ListBuffer

case class Post(user: User, text: String) extends Observable[Post] {
  
  val comments = ListBuffer[Comment]()
  
  def addComment(comment: Comment): Unit = {
    comments.+=:(comment)
    notifyObservers()
  }
}

case class Comment(user: User, text: String)

case class User(name: String) extends Observer[Post] {
  override def handleUpdate(subject: Post): Unit = {
    System.out.println(s"Hey, I'm ${name}. The post got some new comments: ${subject.comments}")
  }
}

object PostExample extends LazyLogging {
  def main(args: Array[String]): Unit = {
    val userIvan = User("Ivan")
    val userMaria = User("Maria")
    val userJohn = User("John")
    
    logger.info("Create a post")
    val post = Post(userIvan, "This is a post about the observer design pattern")
    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "I hope you like the post!"))

    logger.info("John and Maria subscribe to the comments.")
    post.addObserver(userJohn)
    post.addObserver(userMaria)

    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "Why are you so quiet? Do you like it?"))
    logger.info("Add a comment")
    post.addComment(Comment(userMaria, "It is amazing! Thanks!"))
  }
} 
Example 184
Source File: Observer.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import scala.collection.mutable.ListBuffer

trait Observer[T] {
  def handleUpdate(subject: T)
}

trait Observable[T] {
  this: T =>
  
  private val observers = ListBuffer[Observer[T]]()
  
  def addObserver(observer: Observer[T]): Unit = {
    observers.+=:(observer)
  }
  
  def notifyObservers(): Unit = {
    observers.foreach(_.handleUpdate(this))
  }
} 
Example 185
Source File: RobotCommand.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.command

import scala.collection.mutable.ListBuffer

trait RobotCommand {
  def execute(): Unit
}

case class MakeSandwichCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.makeSandwich()
}

case class PourJuiceCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.pourJuice()
}

case class CleanUpCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.cleanUp()
}

class RobotController {
  val history = ListBuffer[RobotCommand]()
  
  def issueCommand(command: RobotCommand): Unit = {
    command +=: history
    command.execute()
  }
  
  def showHistory(): Unit = {
    history.foreach(println)
  }
}

class RobotByNameController {
  val history = ListBuffer[() => Unit]()

  def issueCommand(command: => Unit): Unit = {
    command _ +=: history
    command
  }

  def showHistory(): Unit = {
    history.foreach(println)
  }
}

object RobotExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotController
    
    robotController.issueCommand(MakeSandwichCommand(robot))
    robotController.issueCommand(PourJuiceCommand(robot))
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot))
    
    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
}

object RobotByNameExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotByNameController

    robotController.issueCommand(MakeSandwichCommand(robot).execute())
    robotController.issueCommand(PourJuiceCommand(robot).execute())
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot).execute())

    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
} 
Example 186
Source File: Student.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.iterator

import scala.collection.mutable.ListBuffer

case class Student(name: String, age: Int)

class StudentIterator(students: Array[Student]) extends Iterator[Student] {
  var currentPos = 0
  
  override def hasNext: Boolean = currentPos < students.size

  override def next(): Student = {
    val result = students(currentPos)
    currentPos = currentPos + 1
    result
  }
}

class ClassRoom extends Iterable[Student] {
  
  val students: ListBuffer[Student] = ListBuffer[Student]()
  
  def add(student: Student): Unit = {
    student +=: students
  }

  override def iterator: Iterator[Student] = new StudentIterator(students.toArray)
}

object ClassRoomExample {
  def main(args: Array[String]): Unit = {
    val classRoom = new ClassRoom
    classRoom.add(Student("Ivan", 26))
    classRoom.add(Student("Maria", 26))
    classRoom.add(Student("John", 25))
    classRoom.foreach(println)
  }
} 
Example 187
Source File: Master.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.actors

import java.time.LocalDateTime
import java.util.concurrent.TimeUnit

import akka.actor.{Props, Cancellable, Actor}
import akka.routing.RoundRobinPool
import com.ivan.nikolov.scheduler.actors.messages.{Work, Schedule, Done}
import com.ivan.nikolov.scheduler.config.job.{Daily, Hourly}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global

class Master(numWorkers: Int, actorFactory: ActorFactory) extends Actor with LazyLogging {
  val cancelables = ListBuffer[Cancellable]()
  
  val router = context.actorOf(
    Props(actorFactory.createWorkerActor()).withRouter(RoundRobinPool(numWorkers)),
    "scheduler-master-worker-router"
  )
  
  override def receive: Receive = {
    case Done(name, command, jobType, success) =>
      if (success) {
        logger.info("Successfully completed {} ({}).", name, command)
      } else {
        logger.error("Failure! Command {} ({}) returned a non-zero result code.", name, command)
      }
    case Schedule(configs) => 
      configs.foreach {
        case config =>
          val cancellable = this.context.system.scheduler.schedule(
            config.timeOptions.getInitialDelay(LocalDateTime.now(), config.frequency),
            config.frequency match {
              case Hourly => Duration.create(1, TimeUnit.HOURS)
              case Daily => Duration.create(1, TimeUnit.DAYS)
            },
            router,
            Work(config.name, config.command, config.jobType)
          )
          cancellable +: cancelables
          logger.info("Scheduled: {}", config)
      }
  }
  
  override def postStop(): Unit = {
    cancelables.foreach(_.cancel())
  }
} 
Example 188
Source File: Master.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.actors

import java.time.LocalDateTime
import java.util.concurrent.TimeUnit

import akka.actor.{Props, Cancellable, Actor}
import akka.routing.RoundRobinPool
import com.ivan.nikolov.scheduler.actors.messages.{Work, Schedule, Done}
import com.ivan.nikolov.scheduler.config.job.{Daily, Hourly}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global

class Master(numWorkers: Int, actorFactory: ActorFactory) extends Actor with LazyLogging {
  val cancelables = ListBuffer[Cancellable]()
  
  val router = context.actorOf(
    Props(actorFactory.createWorkerActor()).withRouter(RoundRobinPool(numWorkers)),
    "scheduler-master-worker-router"
  )
  
  override def receive: Receive = {
    case Done(name, command, jobType, success) =>
      if (success) {
        logger.info("Successfully completed {} ({}).", name, command)
      } else {
        logger.error("Failure! Command {} ({}) returned a non-zero result code.", name, command)
      }
    case Schedule(configs) => 
      configs.foreach {
        case config =>
          val cancellable = this.context.system.scheduler.schedule(
            config.timeOptions.getInitialDelay(LocalDateTime.now(), config.frequency),
            config.frequency match {
              case Hourly => Duration.create(1, TimeUnit.HOURS)
              case Daily => Duration.create(1, TimeUnit.DAYS)
            },
            router,
            Work(config.name, config.command, config.jobType)
          )
          cancellable +: cancelables
          logger.info("Scheduled: {}", config)
      }
  }
  
  override def postStop(): Unit = {
    cancelables.foreach(_.cancel())
  }
} 
Example 189
Source File: Post.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import com.typesafe.scalalogging.LazyLogging

import scala.collection.mutable.ListBuffer

case class Post(user: User, text: String) extends Observable[Post] {
  
  val comments = ListBuffer[Comment]()
  
  def addComment(comment: Comment): Unit = {
    comments.+=:(comment)
    notifyObservers()
  }
}

case class Comment(user: User, text: String)

case class User(name: String) extends Observer[Post] {
  override def handleUpdate(subject: Post): Unit = {
    System.out.println(s"Hey, I'm ${name}. The post got some new comments: ${subject.comments}")
  }
}

object PostExample extends LazyLogging {
  def main(args: Array[String]): Unit = {
    val userIvan = User("Ivan")
    val userMaria = User("Maria")
    val userJohn = User("John")
    
    logger.info("Create a post")
    val post = Post(userIvan, "This is a post about the observer design pattern")
    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "I hope you like the post!"))

    logger.info("John and Maria subscribe to the comments.")
    post.addObserver(userJohn)
    post.addObserver(userMaria)

    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "Why are you so quiet? Do you like it?"))
    logger.info("Add a comment")
    post.addComment(Comment(userMaria, "It is amazing! Thanks!"))
  }
} 
Example 190
Source File: Observer.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import scala.collection.mutable.ListBuffer

trait Observer[T] {
  def handleUpdate(subject: T)
}

trait Observable[T] {
  this: T =>
  
  private val observers = ListBuffer[Observer[T]]()
  
  def addObserver(observer: Observer[T]): Unit = {
    observers.+=:(observer)
  }
  
  def notifyObservers(): Unit = {
    observers.foreach(_.handleUpdate(this))
  }
} 
Example 191
Source File: RobotCommand.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.command

import scala.collection.mutable.ListBuffer

trait RobotCommand {
  def execute(): Unit
}

case class MakeSandwichCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.makeSandwich()
}

case class PourJuiceCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.pourJuice()
}

case class CleanUpCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.cleanUp()
}

class RobotController {
  val history = ListBuffer[RobotCommand]()
  
  def issueCommand(command: RobotCommand): Unit = {
    command +=: history
    command.execute()
  }
  
  def showHistory(): Unit = {
    history.foreach(println)
  }
}

class RobotByNameController {
  val history = ListBuffer[() => Unit]()

  def issueCommand(command: => Unit): Unit = {
    command _ +=: history
    command
  }

  def showHistory(): Unit = {
    history.foreach(println)
  }
}

object RobotExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotController
    
    robotController.issueCommand(MakeSandwichCommand(robot))
    robotController.issueCommand(PourJuiceCommand(robot))
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot))
    
    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
}

object RobotByNameExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotByNameController

    robotController.issueCommand(MakeSandwichCommand(robot).execute())
    robotController.issueCommand(PourJuiceCommand(robot).execute())
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot).execute())

    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
} 
Example 192
Source File: Student.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.iterator

import scala.collection.mutable.ListBuffer

case class Student(name: String, age: Int)

class StudentIterator(students: Array[Student]) extends Iterator[Student] {
  var currentPos = 0
  
  override def hasNext: Boolean = currentPos < students.size

  override def next(): Student = {
    val result = students(currentPos)
    currentPos = currentPos + 1
    result
  }
}

class ClassRoom extends Iterable[Student] {
  
  val students: ListBuffer[Student] = ListBuffer[Student]()
  
  def add(student: Student): Unit = {
    student +=: students
  }

  override def iterator: Iterator[Student] = new StudentIterator(students.toArray)
}

object ClassRoomExample {
  def main(args: Array[String]): Unit = {
    val classRoom = new ClassRoom
    classRoom.add(Student("Ivan", 26))
    classRoom.add(Student("Maria", 26))
    classRoom.add(Student("John", 25))
    classRoom.foreach(println)
  }
} 
Example 193
Source File: Post.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import com.typesafe.scalalogging.LazyLogging

import scala.collection.mutable.ListBuffer

case class Post(user: User, text: String) extends Observable[Post] {
  
  val comments = ListBuffer[Comment]()
  
  def addComment(comment: Comment): Unit = {
    comments.+=:(comment)
    notifyObservers()
  }
}

case class Comment(user: User, text: String)

case class User(name: String) extends Observer[Post] {
  override def handleUpdate(subject: Post): Unit = {
    System.out.println(s"Hey, I'm ${name}. The post got some new comments: ${subject.comments}")
  }
}

object PostExample extends LazyLogging {
  def main(args: Array[String]): Unit = {
    val userIvan = User("Ivan")
    val userMaria = User("Maria")
    val userJohn = User("John")
    
    logger.info("Create a post")
    val post = Post(userIvan, "This is a post about the observer design pattern")
    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "I hope you like the post!"))

    logger.info("John and Maria subscribe to the comments.")
    post.addObserver(userJohn)
    post.addObserver(userMaria)

    logger.info("Add a comment")
    post.addComment(Comment(userIvan, "Why are you so quiet? Do you like it?"))
    logger.info("Add a comment")
    post.addComment(Comment(userMaria, "It is amazing! Thanks!"))
  }
} 
Example 194
Source File: Observer.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.observer

import scala.collection.mutable.ListBuffer

trait Observer[T] {
  def handleUpdate(subject: T)
}

trait Observable[T] {
  this: T =>
  
  private val observers = ListBuffer[Observer[T]]()
  
  def addObserver(observer: Observer[T]): Unit = {
    observers.+=:(observer)
  }
  
  def notifyObservers(): Unit = {
    observers.foreach(_.handleUpdate(this))
  }
} 
Example 195
Source File: RobotCommand.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.command

import scala.collection.mutable.ListBuffer

trait RobotCommand {
  def execute(): Unit
}

case class MakeSandwichCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.makeSandwich()
}

case class PourJuiceCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.pourJuice()
}

case class CleanUpCommand(robot: Robot) extends RobotCommand {
  override def execute(): Unit = robot.cleanUp()
}

class RobotController {
  val history = ListBuffer[RobotCommand]()
  
  def issueCommand(command: RobotCommand): Unit = {
    command +=: history
    command.execute()
  }
  
  def showHistory(): Unit = {
    history.foreach(println)
  }
}

class RobotByNameController {
  val history = ListBuffer[() => Unit]()

  def issueCommand(command: => Unit): Unit = {
    command _ +=: history
    command
  }

  def showHistory(): Unit = {
    history.foreach(println)
  }
}

object RobotExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotController
    
    robotController.issueCommand(MakeSandwichCommand(robot))
    robotController.issueCommand(PourJuiceCommand(robot))
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot))
    
    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
}

object RobotByNameExample {
  def main(args: Array[String]): Unit = {
    val robot = Robot()
    val robotController = new RobotByNameController

    robotController.issueCommand(MakeSandwichCommand(robot).execute())
    robotController.issueCommand(PourJuiceCommand(robot).execute())
    System.out.println("I'm eating and having some juice.")
    robotController.issueCommand(CleanUpCommand(robot).execute())

    System.out.println("Here is what I asked my robot to do:")
    robotController.showHistory()
  }
} 
Example 196
Source File: Student.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.iterator

import scala.collection.mutable.ListBuffer

case class Student(name: String, age: Int)

class StudentIterator(students: Array[Student]) extends Iterator[Student] {
  var currentPos = 0
  
  override def hasNext: Boolean = currentPos < students.size

  override def next(): Student = {
    val result = students(currentPos)
    currentPos = currentPos + 1
    result
  }
}

class ClassRoom extends Iterable[Student] {
  
  val students: ListBuffer[Student] = ListBuffer[Student]()
  
  def add(student: Student): Unit = {
    student +=: students
  }

  override def iterator: Iterator[Student] = new StudentIterator(students.toArray)
}

object ClassRoomExample {
  def main(args: Array[String]): Unit = {
    val classRoom = new ClassRoom
    classRoom.add(Student("Ivan", 26))
    classRoom.add(Student("Maria", 26))
    classRoom.add(Student("John", 25))
    classRoom.foreach(println)
  }
} 
Example 197
Source File: Node.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.structural.composite

import scala.collection.mutable.ListBuffer

trait Node {
  def print(prefix: String): Unit
}

class Leaf(data: String) extends Node {
  override def print(prefix: String): Unit =
    System.out.println(s"${prefix}${data}")
}

class Tree extends Node {
  private val children = ListBuffer.empty[Node]
  
  override def print(prefix: String): Unit = {
    System.out.println(s"${prefix}(")
    children.foreach(_.print(s"${prefix}${prefix}"))
    System.out.println(s"${prefix})")
  }
  
  def add(child: Node): Unit = {
    children += child
  }
  
  def remove(): Unit = {
    if (children.nonEmpty) {
      children.remove(0)
    }
  }
}


object CompositeExample {
  def main(args: Array[String]): Unit = {
    val tree = new Tree
    
    tree.add(new Leaf("leaf 1"))
    
    val subtree1 = new Tree
    subtree1.add(new Leaf("leaf 2"))
    
    val subtree2 = new Tree
    subtree2.add(new Leaf("leaf 3"))
    subtree2.add(new Leaf("leaf 4"))
    subtree1.add(subtree2)
    
    tree.add(subtree1)
    
    val subtree3 = new Tree
    val subtree4 = new Tree
    subtree4.add(new Leaf("leaf 5"))
    subtree4.add(new Leaf("leaf 6"))
    
    subtree3.add(subtree4)
    tree.add(subtree3)
    
    tree.print("-")
  }
} 
Example 198
Source File: Circle.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.structural.flyweight

import scala.collection.mutable.{ListBuffer, Map}

class Circle(color: Color) {
  System.out.println(s"Creating a circle with $color color.")
  
  override def toString(): String =
    s"Circle($color)"
}

object Circle {
  val cache = Map.empty[Color, Circle]
  
  def apply(color: Color): Circle =
    cache.getOrElseUpdate(color, new Circle(color))
  
  def circlesCreated(): Int = cache.size
}

class Graphic {
  val items = ListBuffer.empty[(Int, Int, Double, Circle)]
  
  def addCircle(x: Int, y: Int, radius: Double, circle: Circle): Unit = {
    items += ((x, y, radius, circle))
  }
  
  def draw(): Unit = {
    items.foreach {
      case (x, y, radius, circle) =>
        System.out.println(s"Drawing a circle at ($x, $y) with radius $radius: $circle")
    }
  }
}

object FlyweightExample {
  def main(args: Array[String]): Unit = {
    val graphic = new Graphic
    graphic.addCircle(1, 1, 1.0, Circle(Green))
    graphic.addCircle(1, 2, 1.0, Circle(Red))
    graphic.addCircle(2, 1, 1.0, Circle(Blue))
    graphic.addCircle(2, 2, 1.0, Circle(Green))
    graphic.addCircle(2, 3, 1.0, Circle(Yellow))
    graphic.addCircle(3, 2, 1.0, Circle(Magenta))
    graphic.addCircle(3, 3, 1.0, Circle(Blue))
    graphic.addCircle(4, 3, 1.0, Circle(Blue))
    graphic.addCircle(3, 4, 1.0, Circle(Yellow))
    graphic.addCircle(4, 4, 1.0, Circle(Red))
    
    graphic.draw()
    
    System.out.println(s"Total number of circle objects created: ${Circle.circlesCreated()}")
  }
} 
Example 199
Source File: Node.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.structural.composite

import scala.collection.mutable.ListBuffer

trait Node {
  def print(prefix: String): Unit
}

class Leaf(data: String) extends Node {
  override def print(prefix: String): Unit =
    System.out.println(s"${prefix}${data}")
}

class Tree extends Node {
  private val children = ListBuffer.empty[Node]
  
  override def print(prefix: String): Unit = {
    System.out.println(s"${prefix}(")
    children.foreach(_.print(s"${prefix}${prefix}"))
    System.out.println(s"${prefix})")
  }
  
  def add(child: Node): Unit = {
    children += child
  }
  
  def remove(): Unit = {
    if (children.nonEmpty) {
      children.remove(0)
    }
  }
}


object CompositeExample {
  def main(args: Array[String]): Unit = {
    val tree = new Tree
    
    tree.add(new Leaf("leaf 1"))
    
    val subtree1 = new Tree
    subtree1.add(new Leaf("leaf 2"))
    
    val subtree2 = new Tree
    subtree2.add(new Leaf("leaf 3"))
    subtree2.add(new Leaf("leaf 4"))
    subtree1.add(subtree2)
    
    tree.add(subtree1)
    
    val subtree3 = new Tree
    val subtree4 = new Tree
    subtree4.add(new Leaf("leaf 5"))
    subtree4.add(new Leaf("leaf 6"))
    
    subtree3.add(subtree4)
    tree.add(subtree3)
    
    tree.print("-")
  }
} 
Example 200
Source File: Circle.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.structural.flyweight

import scala.collection.mutable.{ListBuffer, Map}

class Circle(color: Color) {
  System.out.println(s"Creating a circle with $color color.")
  
  override def toString(): String =
    s"Circle($color)"
}

object Circle {
  val cache = Map.empty[Color, Circle]
  
  def apply(color: Color): Circle =
    cache.getOrElseUpdate(color, new Circle(color))
  
  def circlesCreated(): Int = cache.size
}

class Graphic {
  val items = ListBuffer.empty[(Int, Int, Double, Circle)]
  
  def addCircle(x: Int, y: Int, radius: Double, circle: Circle): Unit = {
    items += ((x, y, radius, circle))
  }
  
  def draw(): Unit = {
    items.foreach {
      case (x, y, radius, circle) =>
        System.out.println(s"Drawing a circle at ($x, $y) with radius $radius: $circle")
    }
  }
}

object FlyweightExample {
  def main(args: Array[String]): Unit = {
    val graphic = new Graphic
    graphic.addCircle(1, 1, 1.0, Circle(Green))
    graphic.addCircle(1, 2, 1.0, Circle(Red))
    graphic.addCircle(2, 1, 1.0, Circle(Blue))
    graphic.addCircle(2, 2, 1.0, Circle(Green))
    graphic.addCircle(2, 3, 1.0, Circle(Yellow))
    graphic.addCircle(3, 2, 1.0, Circle(Magenta))
    graphic.addCircle(3, 3, 1.0, Circle(Blue))
    graphic.addCircle(4, 3, 1.0, Circle(Blue))
    graphic.addCircle(3, 4, 1.0, Circle(Yellow))
    graphic.addCircle(4, 4, 1.0, Circle(Red))
    
    graphic.draw()
    
    System.out.println(s"Total number of circle objects created: ${Circle.circlesCreated()}")
  }
}