java.io.PrintWriter Scala Examples

The following examples show how to use java.io.PrintWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: package.scala    From mantis   with Apache License 2.0 6 votes vote down vote up
package io.iohk.ethereum

import java.io.{File, PrintWriter}
import java.net.{Inet6Address, InetAddress}
import java.security.SecureRandom

import io.iohk.ethereum.crypto._
import org.spongycastle.crypto.AsymmetricCipherKeyPair
import org.spongycastle.crypto.params.ECPublicKeyParameters
import org.spongycastle.math.ec.ECPoint
import org.spongycastle.util.encoders.Hex

import scala.io.Source

package object network {

  val ProtocolVersion = 4

  implicit class ECPublicKeyParametersNodeId(val pubKey: ECPublicKeyParameters) extends AnyVal {
    def toNodeId: Array[Byte] =
      pubKey.asInstanceOf[ECPublicKeyParameters].getQ
      .getEncoded(false)
      .drop(1) // drop type info
  }

  def publicKeyFromNodeId(nodeId: String): ECPoint = {
    val bytes = ECDSASignature.uncompressedIndicator +: Hex.decode(nodeId)
    curve.getCurve.decodePoint(bytes)
  }

  def loadAsymmetricCipherKeyPair(filePath: String, secureRandom: SecureRandom): AsymmetricCipherKeyPair = {
    val file = new File(filePath)
    if(!file.exists()){
      val keysValuePair = generateKeyPair(secureRandom)

      //Write keys to file
      val (priv, _) = keyPairToByteArrays(keysValuePair)
      require(file.getParentFile.exists() || file.getParentFile.mkdirs(), "Key's file parent directory creation failed")
      val writer = new PrintWriter(filePath)
      try {
        writer.write(Hex.toHexString(priv))
      } finally {
        writer.close()
      }

      keysValuePair
    } else {
      val reader = Source.fromFile(filePath)
      try {
        val privHex = reader.mkString
        keyPairFromPrvKey(Hex.decode(privHex))
      } finally {
        reader.close()
      }
    }
  }

  
  def getHostName(address: InetAddress): String = {
    val hostName = address.getHostAddress
    address match {
      case _: Inet6Address => s"[$hostName]"
      case _ => hostName
    }
  }

} 
Example 2
Source File: GMM.scala    From spark-tda   with Apache License 2.0 6 votes vote down vote up
import java.io.{File, PrintWriter}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.clustering.GaussianMixture
import org.apache.spark.sql.functions._

def computeGaussianMixtureModel(
  pathToTextFile: String,
  quantity: Int) {
  case class Point(x: Double, y: Double)

  def save(f: File)(func: PrintWriter => Unit) {
    val p = new PrintWriter(f)
    try {
      func(p)
    } finally {
      p.close()
    }
  }

  val filename = pathToTextFile.split("\\.")(0)

  val outputFilename = s"$filename-GMM-k${quantity}.tsv"

  val points = sc
    .textFile(pathToTextFile)
    .map {
      line => line.trim.split("\\s+")
    }
    .map {
      row => Point(row(0).toDouble, row(1).toDouble)
    }

  val features = points
    .map {
      p => Vectors.dense(p.x, p.y)
    }

  features.cache()

  val gmm = new GaussianMixture()
    .setK(quantity)
    .run(features)

  val predictions = features
    .map {
      f => (f(0), f(1), gmm.predict(f) + 1)
    }
    .collect

  save(new File(outputFilename)) {
    println(s"OUTPUT TO: ${outputFilename}")
    f => predictions.foreach{
      case (x, y, ccid) => f.println(s"${x}\t${y}\t${ccid}")
    }
  }
} 
Example 3
Source File: LoggerSimulation.scala    From BigData-News   with Apache License 2.0 5 votes vote down vote up
package com.vita.spark.utils

import java.io.PrintWriter
import java.net.ServerSocket

class LoggerSimulation {

}

object LoggerSimulation {

  var numIndex = 0

  /**
    * 生成一个字母
    *
    * @param 字母的下标
    * @return 生成的字母
    */
  def gennerateContent(index: Int): String = {
    import scala.collection.mutable.ListBuffer
    val charList = ListBuffer[Char]();
    for (i <- 65 to 90) {
      charList += i.toChar
    }
    val charArray = charList.toArray
    charArray(index).toString();
  }

  def gennerateNumber(): String = {
    //    numIndex += 1
    //    return numIndex.toString
    return "a,b,c,d,e,f"
  }

  /**
    * 生成随机下标
    *
    * @return 返回一个下标
    */
  def index = {
    import java.util.Random
    val rdm = new Random()
    rdm.nextInt(7)
  }

  /**
    * 启动一个main方法来创建一个serversockt发送消息
    *
    * @param args 端口,发送的时间间隔
    */
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      System.err.println("Usage:<port><millisecond>")
      System.exit(1);
    }

    val listener = new ServerSocket(args(0).toInt)
    println("已经做好连接的准备-------")
    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from:" + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream, true)
          while (true) {
            Thread.sleep(args(1).toLong)
            //            val content = gennerateContent(index)
            val content = gennerateNumber()
            println(content)
            out.write(content + "\n")
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
} 
Example 4
Source File: ResourceFileGoldenCodecLaws.scala    From circe-golden   with Apache License 2.0 5 votes vote down vote up
package io.circe.testing.golden

import cats.instances.list._, cats.instances.try_._
import cats.syntax.apply._, cats.syntax.traverse._
import io.circe.{ Decoder, Encoder, Printer }
import java.io.{ File, PrintWriter }
import org.scalacheck.{ Arbitrary, Gen }
import scala.reflect.runtime.universe.TypeTag
import scala.util.{ Failure, Try }
import scala.util.matching.Regex

abstract class ResourceFileGoldenCodecLaws[A](
  name: String,
  resourceRootDir: File,
  resourcePackage: List[String],
  val size: Int,
  count: Int,
  override protected val printer: Printer
) extends GoldenCodecLaws[A]
    with ExampleGeneration[A] {

  private[this] val resourceRootPath: String = "/" + resourcePackage.mkString("/") + "/"
  private[this] val resourceDir: File = resourcePackage.foldLeft(resourceRootDir) {
    case (acc, p) => new File(acc, p)
  }
  private[this] val GoldenFilePattern: Regex = "^-(.{44})\\.json$".r

  private[this] lazy val loadGoldenFiles: Try[List[(A, String)]] =
    Resources.open(resourceRootPath).flatMap { dirSource =>
      val files = dirSource.getLines.flatMap {
        case fileName if fileName.startsWith(name) =>
          fileName.drop(name.length) match {
            case GoldenFilePattern(seed) => Some((seed, fileName))
            case _                       => None
          }
        case _ => None
      }.toList.traverse[Try, (A, String)] {
        case (seed, name) =>
          val contents = Resources.open(resourceRootPath + name).map { source =>
            val lines = source.getLines.mkString("\n")
            source.close()
            lines
          }
          (getValueFromBase64Seed(seed), contents).tupled
      }

      dirSource.close()

      // Fail if we don't have either zero golden files or the required number.
      files.flatMap { values =>
        if (values.size == 0 || values.size == count) files
        else Failure(new IllegalStateException(s"Expected 0 or $count golden files, got ${values.size}"))
      }
    }

  private[this] def generateGoldenFiles: Try[List[(A, String)]] =
    generateRandomGoldenExamples(count).traverse {
      case (seed, value, encoded) =>
        Try {
          resourceDir.mkdirs()
          val file = new File(resourceDir, s"$name-${seed.toBase64}.json")

          val writer = new PrintWriter(file)
          writer.print(encoded)
          writer.close()

          (value, encoded)
        }
    }

  protected lazy val goldenExamples: Try[List[(A, String)]] =
    loadGoldenFiles.flatMap(fs => if (fs.isEmpty) generateGoldenFiles else loadGoldenFiles)
}

object ResourceFileGoldenCodecLaws {
  def apply[A](
    name: String,
    resourceRootDir: File,
    resourcePackage: List[String],
    size: Int,
    count: Int,
   printer: Printer
  )(implicit decodeA: Decoder[A], encodeA: Encoder[A], arbitraryA: Arbitrary[A]): GoldenCodecLaws[A] =
    new ResourceFileGoldenCodecLaws[A](name, resourceRootDir, resourcePackage, size, count, printer) {
      val decode: Decoder[A] = decodeA
      val encode: Encoder[A] = encodeA
      val gen: Gen[A] = arbitraryA.arbitrary
    }

  def apply[A](
    size: Int = 100,
    count: Int = 1,
    printer: Printer = Printer.spaces2
  )(
    implicit decodeA: Decoder[A],
    encodeA: Encoder[A],
    arbitraryA: Arbitrary[A],
    typeTagA: TypeTag[A]
  ): GoldenCodecLaws[A] =
    apply[A](Resources.inferName[A], Resources.inferRootDir, Resources.inferPackage[A], size, count, printer)
} 
Example 5
Source File: ProxyServer.scala    From devbox   with Apache License 2.0 5 votes vote down vote up
package cmdproxy

import java.io.BufferedReader
import java.io.InputStreamReader
import java.io.OutputStreamWriter
import java.io.PrintWriter
import java.net.InetAddress
import java.net.ServerSocket
import java.net.Socket

import scala.util.Using

import devbox.logger.FileLogger
import os.RelPath
import ujson.ParseException
import upickle.default.{macroRW, ReadWriter}

case class Request(workingDir: String, cmd: Seq[String])
object Request {
  implicit val rw: ReadWriter[Request] = macroRW
}


  val localDir: Map[os.RelPath, os.Path] = dirMapping.map(_.swap).toMap

  def start(): Unit = {
    logger.info(s"Starting command proxy server, listening at ${socket.getInetAddress}:${socket.getLocalPort}")
    (new Thread("Git Proxy Thread") {
      override def run(): Unit = {
        while (!socket.isClosed) {
          Using(socket.accept()) { handleConnection } recover {
            case e: Exception =>
              logger.error(s"Error handling request ${e.getMessage}")
            case e: java.net.SocketException if e.getMessage == "Socket closed" =>
              logger.error(s"Git proxy socket closed")
          }
        }
      }
    }).start()

  }

  def handleConnection(conn: Socket): Unit = try {
    logger.info(s"Accepting connection from ${conn.getInetAddress}")
    val in = new BufferedReader(new InputStreamReader(conn.getInputStream, ProxyServer.CHARSET_NAME))
    val out = new PrintWriter(new OutputStreamWriter(conn.getOutputStream, ProxyServer.CHARSET_NAME))

    upickle.default.read[Request](in.readLine()) match {
      case Request(dir, args) =>
        val workingDir = localDir
          .collect{case (remote, local) if RelPath(dir).startsWith(remote) =>
            local / RelPath(dir).relativeTo(remote)
          }
          .head

        // being cautious here and only execute "git" commands
        if (args.headOption.exists((_ == "git"))) {
          logger.info(s"Executing `${args.mkString(" ")}` in $workingDir")

          val proc = os.proc(args).call(
            workingDir,
            mergeErrIntoOut = true,
            stdout = os.ProcessOutput.Readlines(str =>
              out.println(upickle.default.write(Left[String, Int](str)))
            ),
            check = false,
            timeout = 10000
          )

          out.println(upickle.default.write(Right[String, Int](proc.exitCode)))
        } else {
          val msg = s"Not executing non-git commend: `${args.mkString(" ")}`."
          logger.info(msg)
          out.println(upickle.default.write(Right[String, Int](1)))
        }

        out.flush()
    }
  } catch {
    case e: ParseException => logger.error(s"Error parsing incoming json request: ${e.getMessage}")
  }
}

object ProxyServer {
  val DEFAULT_PORT = 20280
  val CHARSET_NAME = "UTF-8"
} 
Example 6
Source File: Interpreter.scala    From sjsonnet   with Apache License 2.0 5 votes vote down vote up
package sjsonnet

import java.io.{PrintWriter, StringWriter}

import fastparse.Parsed
import sjsonnet.Expr.Params


class Interpreter(parseCache: collection.mutable.Map[String, fastparse.Parsed[(Expr, Map[String, Int])]],
                  extVars: Map[String, ujson.Value],
                  tlaVars: Map[String, ujson.Value],
                  wd: Path,
                  importer: (Path, String) => Option[(Path, String)],
                  preserveOrder: Boolean = false) {

  val evaluator = new Evaluator(
    parseCache,
    extVars,
    wd,
    importer,
    preserveOrder
  )

  def interpret(txt: String, path: Path): Either[String, ujson.Value] = {
    interpret0(txt, path, ujson.Value)
  }
  def interpret0[T](txt: String,
                    path: Path,
                    visitor: upickle.core.Visitor[T, T]): Either[String, T] = {
    for{
      res <- parseCache.getOrElseUpdate(txt, fastparse.parse(txt, Parser.document(_))) match{
        case f @ Parsed.Failure(l, i, e) => Left("Parse error: " + f.trace().msg)
        case Parsed.Success(r, index) => Right(r)
      }
      (parsed, nameIndices) = res
      _ = evaluator.loadedFileContents(path) = txt
      res0 <-
        try Right(
          evaluator.visitExpr(parsed)(
            Std.scope(nameIndices.size + 1),
            new FileScope(path, nameIndices)
          )
        )
        catch{case e: Throwable =>
          val s = new StringWriter()
          val p = new PrintWriter(s)
          e.printStackTrace(p)
          p.close()
          Left(s.toString.replace("\t", "    "))
        }
      res = res0 match{
        case f: Val.Func =>
          f.copy(params = Params(f.params.args.map{ case (k, default, i) =>
            (k, tlaVars.get(k) match{
              case None => default
              case Some(v) => Some(Materializer.toExpr(v))
            }, i)
          }))
        case x => x
      }
      json <-
        try Right(Materializer.apply0(res, visitor)(evaluator))
        catch{
          case Error.Delegate(msg) => Left(msg)
          case e: Throwable =>
            val s = new StringWriter()
            val p = new PrintWriter(s)
            e.printStackTrace(p)
            p.close()
            Left(s.toString.replace("\t", "    "))
        }
    } yield json
  }
} 
Example 7
Source File: SynthBenchmark.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.graphx

import java.io.{FileOutputStream, PrintWriter}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.graphx.{GraphXUtils, PartitionStrategy}
import org.apache.spark.graphx.util.GraphGenerators


  def main(args: Array[String]) {
    val options = args.map {
      arg =>
        arg.dropWhile(_ == '-').split('=') match {
          case Array(opt, v) => (opt -> v)
          case _ => throw new IllegalArgumentException("Invalid argument: " + arg)
        }
    }

    var app = "pagerank"
    var niter = 10
    var numVertices = 100000
    var numEPart: Option[Int] = None
    var partitionStrategy: Option[PartitionStrategy] = None
    var mu: Double = 4.0
    var sigma: Double = 1.3
    var degFile: String = ""
    var seed: Int = -1

    options.foreach {
      case ("app", v) => app = v
      case ("niters", v) => niter = v.toInt
      case ("nverts", v) => numVertices = v.toInt
      case ("numEPart", v) => numEPart = Some(v.toInt)
      case ("partStrategy", v) => partitionStrategy = Some(PartitionStrategy.fromString(v))
      case ("mu", v) => mu = v.toDouble
      case ("sigma", v) => sigma = v.toDouble
      case ("degFile", v) => degFile = v
      case ("seed", v) => seed = v.toInt
      case (opt, _) => throw new IllegalArgumentException("Invalid option: " + opt)
    }

    val conf = new SparkConf()
      .setAppName(s"GraphX Synth Benchmark (nverts = $numVertices, app = $app)")
    GraphXUtils.registerKryoClasses(conf)

    val sc = new SparkContext(conf)

    // Create the graph
    println(s"Creating graph...")
    val unpartitionedGraph = GraphGenerators.logNormalGraph(sc, numVertices,
      numEPart.getOrElse(sc.defaultParallelism), mu, sigma, seed)
    // Repartition the graph
    val graph = partitionStrategy.foldLeft(unpartitionedGraph)(_.partitionBy(_)).cache()

    var startTime = System.currentTimeMillis()
    val numEdges = graph.edges.count()
    println(s"Done creating graph. Num Vertices = $numVertices, Num Edges = $numEdges")
    val loadTime = System.currentTimeMillis() - startTime

    // Collect the degree distribution (if desired)
    if (!degFile.isEmpty) {
      val fos = new FileOutputStream(degFile)
      val pos = new PrintWriter(fos)
      val hist = graph.vertices.leftJoin(graph.degrees)((id, _, optDeg) => optDeg.getOrElse(0))
        .map(p => p._2).countByValue()
      hist.foreach {
        case (deg, count) => pos.println(s"$deg \t $count")
      }
    }

    // Run PageRank
    startTime = System.currentTimeMillis()
    if (app == "pagerank") {
      println("Running PageRank")
      val totalPR = graph.staticPageRank(niter).vertices.map(_._2).sum()
      println(s"Total PageRank = $totalPR")
    } else if (app == "cc") {
      println("Running Connected Components")
      val numComponents = graph.connectedComponents.vertices.map(_._2).distinct().count()
      println(s"Number of components = $numComponents")
    }
    val runTime = System.currentTimeMillis() - startTime

    println(s"Num Vertices = $numVertices")
    println(s"Num Edges = $numEdges")
    println(s"Creation time = ${loadTime/1000.0} seconds")
    println(s"Run time = ${runTime/1000.0} seconds")

    sc.stop()
  }
}
// scalastyle:on println 
Example 8
Source File: PageViewGenerator.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream

import java.io.PrintWriter
import java.net.ServerSocket
import java.util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/" -> .7,
                  "http://foo.com/news" -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01): _*)

  def pickFromDistribution[T](inputMap: Map[T, Double]): T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent(): String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println("Listening on port: " + port)

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from: " + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
}
// scalastyle:on println 
Example 9
Source File: PythonBroadcastSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import java.io.{File, PrintWriter}

import scala.io.Source

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 10
Source File: SentenceTokenizerSpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.dataset.text

import java.io.PrintWriter

import com.intel.analytics.bigdl.dataset.DataSet
import com.intel.analytics.bigdl.utils.{Engine, SparkContextLifeCycle}
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{FlatSpec, Matchers}

import scala.io.Source

class SentenceTokenizerSpec extends SparkContextLifeCycle with Matchers {

  override def appName: String = "DocumentTokenizer"

  "SentenceTokenizerSpec" should "tokenizes articles correctly on Spark" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DocumentTokenizerSpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"
    val sentence4 = "The Dr. lives in a blue-painted box."

    val sentences = Array(sentence1, sentence2, sentence3, sentence4)
    new PrintWriter(tmpFile) {
      write(sentences.mkString("\n")); close
    }

    val sents = DataSet.rdd(sc.textFile(tmpFile)
      .filter(!_.isEmpty)).transform(SentenceSplitter())
      .toDistributed().data(train = false).flatMap(item => item.iterator).collect()
      .asInstanceOf[Array[String]]
    val tokens = DataSet.rdd(sc.parallelize(sents))
        .transform(SentenceTokenizer())
    val output = tokens.toDistributed().data(train = false).collect()

    var count = 0
    println("tokenized sentences:")
    output.foreach(x => {
      count += x.length
      println(x.mkString(" "))
    })

    val numOfSents = 6
    val numOfWords = 33

    output.length should be (numOfSents)
    count should be (numOfWords)
  }

  "SentenceTokenizerSpec" should "tokenizes articles correctly on local" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DocumentTokenizerSpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"
    val sentence4 = "The Dr. lives in a blue-painted box."

    val sentences = Array(sentence1, sentence2, sentence3, sentence4)

    new PrintWriter(tmpFile) {
      write(sentences.mkString("\n")); close
    }

    val sentenceSplitter = SentenceSplitter()
    val sentenceTokenizer = SentenceTokenizer()
    val logData = Source.fromFile(tmpFile).getLines().toArray
    val sents = DataSet.array(logData
      .filter(!_.isEmpty)).transform(sentenceSplitter)
      .toLocal().data(train = false).flatMap(item => item.iterator)
    val tokens = DataSet.array(sents.toArray)
        .transform(sentenceTokenizer)
    val output = tokens.toLocal().data(train = false).toArray

    sentenceSplitter.close()
    sentenceTokenizer.close()

    var count_word = 0
    println("tokenized sentences:")
    output.foreach(x => {
      count_word += x.length
      println(x.mkString(" "))
    })

    val numOfSents = 6
    val numOfWords = 33
    output.length should be (numOfSents)
    count_word should be (numOfWords)
  }
} 
Example 11
Source File: TextToLabeledSentenceSpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.dataset.text

import java.io.PrintWriter

import com.intel.analytics.bigdl.dataset.DataSet
import com.intel.analytics.bigdl.utils.{Engine, SparkContextLifeCycle}
import org.apache.spark.SparkContext
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}

import scala.io.Source

@com.intel.analytics.bigdl.tags.Serial
class TextToLabeledSentenceSpec extends SparkContextLifeCycle with Matchers {
  override def nodeNumber: Int = 1
  override def coreNumber: Int = 1
  override def appName: String = "TextToLabeledSentence"

  "TextToLabeledSentenceSpec" should "indexes sentences correctly on Spark" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DocumentTokenizerSpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"
    val sentence4 = "The Dr. lives in a blue-painted box."

    val sentences = Array(sentence1, sentence2, sentence3, sentence4)

    new PrintWriter(tmpFile) {
      write(sentences.mkString("\n")); close
    }

    val tokens = DataSet.rdd(sc.textFile(tmpFile)
      .filter(!_.isEmpty))
      .transform(SentenceTokenizer())
    val output = tokens.toDistributed().data(train = false)
    val dictionary = Dictionary(output, 100)
    val textToLabeledSentence = TextToLabeledSentence[Float](dictionary)
    val labeledSentences = tokens.transform(textToLabeledSentence)
      .toDistributed().data(false).collect()
    labeledSentences.foreach(x => {
      println("input = " + x.data().mkString(","))
      println("target = " + x.label().mkString(","))
      var i = 1
      while (i < x.dataLength()) {
        x.getData(i) should be (x.getLabel(i - 1))
        i += 1
      }
    })
  }

  "TextToLabeledSentenceSpec" should "indexes sentences correctly on Local" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DocumentTokenizerSpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"
    val sentence4 = "The Dr. lives in a blue-painted box."

    val sentences = Array(sentence1, sentence2, sentence3, sentence4)

    new PrintWriter(tmpFile) {
      write(sentences.mkString("\n")); close
    }

    val logData = Source.fromFile(tmpFile).getLines().toArray
    val tokens = DataSet.array(logData
      .filter(!_.isEmpty))
      .transform(SentenceTokenizer())
    val output = tokens.toLocal().data(train = false)

    val dictionary = Dictionary(output, 100)
    val textToLabeledSentence = TextToLabeledSentence[Float](dictionary)
    val labeledSentences = tokens.transform(textToLabeledSentence)
      .toLocal().data(false)
    labeledSentences.foreach(x => {
      println("input = " + x.data().mkString(","))
      println("target = " + x.label().mkString(","))
      var i = 1
      while (i < x.dataLength()) {
        x.getData(i) should be (x.getLabel(i - 1))
        i += 1
      }
    })

  }
} 
Example 12
Source File: DictionarySpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.dataset.text

import java.io.PrintWriter

import com.intel.analytics.bigdl.dataset.DataSet
import com.intel.analytics.bigdl.utils.Engine
import com.intel.analytics.bigdl.utils.SparkContextLifeCycle
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}

import scala.io.Source

class DictionarySpec extends SparkContextLifeCycle with Matchers {
  override def nodeNumber: Int = 1
  override def coreNumber: Int = 1
  override def appName: String = "DictionarySpec"

  "DictionarySpec" should "creates dictionary correctly on Spark" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DictionarySpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"

    val sentences = Array(sentence1, sentence2, sentence3)

    new PrintWriter(tmpFile, "UTF-8") {
      write(sentences.mkString("\n")); close
    }

    val tokens = DataSet.rdd(sc.textFile(tmpFile)
      .filter(!_.isEmpty)).transform(SentenceTokenizer())
    val output = tokens.toDistributed().data(train = false)

    val numOfWords = 21

    val dictionary = Dictionary(output, 100)

    dictionary.getVocabSize() should be (numOfWords)
    dictionary.getDiscardSize() should be (0)
    dictionary.print()
    dictionary.printDiscard()
    dictionary.getVocabSize() should be (numOfWords)
    sc.stop()
  }

  "DictionarySpec" should "creates dictionary correctly on local" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DictionarySpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"

    val sentences = Array(sentence1, sentence2, sentence3)

    new PrintWriter(tmpFile, "UTF-8") {
      write(sentences.mkString("\n")); close
    }

    val logData = Source.fromFile(tmpFile, "UTF-8").getLines().toArray
    val tokens = DataSet.array(logData
      .filter(!_.isEmpty)).transform(SentenceTokenizer())
    val output = tokens.toLocal().data(train = false)

    val numOfWords = 21

    val dictionary = Dictionary(output, 100)

    dictionary.getVocabSize() should be (numOfWords)
    dictionary.getDiscardSize() should be (0)
    dictionary.print()
    dictionary.printDiscard()
    dictionary.getVocabSize() should be (numOfWords)
  }
} 
Example 13
Source File: SentenceBiPaddingSpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.dataset.text

import java.io.PrintWriter

import com.intel.analytics.bigdl.dataset.DataSet
import com.intel.analytics.bigdl.dataset.text.utils.SentenceToken
import com.intel.analytics.bigdl.utils.{Engine, SparkContextLifeCycle}
import org.apache.spark.SparkContext
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}

import scala.io.Source

@com.intel.analytics.bigdl.tags.Serial
class SentenceBiPaddingSpec extends SparkContextLifeCycle with Matchers {
  override def nodeNumber: Int = 1
  override def coreNumber: Int = 1
  override def appName: String = "DocumentTokenizer"

  "SentenceBiPaddingSpec" should "pads articles correctly on Spark" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DocumentTokenizerSpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"
    val sentence4 = "The Dr. lives in a blue-painted box."

    val sentences = Array(sentence1, sentence2, sentence3, sentence4)
    new PrintWriter(tmpFile) {
      write(sentences.mkString("\n")); close
    }

    val sents = DataSet.rdd(sc.textFile(tmpFile)
      .filter(!_.isEmpty)).transform(SentenceSplitter())
      .toDistributed().data(train = false).flatMap(item => item.iterator).collect()
      .asInstanceOf[Array[String]]
    val tokens = DataSet.rdd(sc.parallelize(sents))
      .transform(SentenceBiPadding())
    val output = tokens.toDistributed().data(train = false).collect()

    var count = 0
    println("padding sentences:")
    output.foreach(x => {
      count += x.length
      println(x)
      val words = x.split(" ")
      val startToken = words(0)
      val endToken = words(words.length - 1)
      startToken should be (SentenceToken.start)
      endToken should be (SentenceToken.end)
    })
    sc.stop()
  }

  "SentenceBiPaddingSpec" should "pads articles correctly on local" in {
    val tmpFile = java.io.File
      .createTempFile("UnitTest", "DocumentTokenizerSpec").getPath

    val sentence1 = "Enter Barnardo and Francisco, two sentinels."
    val sentence2 = "Who’s there?"
    val sentence3 = "I think I hear them. Stand ho! Who is there?"
    val sentence4 = "The Dr. lives in a blue-painted box."

    val sentences = Array(sentence1, sentence2, sentence3, sentence4)

    new PrintWriter(tmpFile) {
      write(sentences.mkString("\n")); close
    }

    val logData = Source.fromFile(tmpFile).getLines().toArray
    val sents = DataSet.array(logData
      .filter(!_.isEmpty)).transform(SentenceSplitter())
      .toLocal().data(train = false).flatMap(item => item.iterator)
    val tokens = DataSet.array(sents.toArray)
      .transform(SentenceBiPadding())
    val output = tokens.toLocal().data(train = false).toArray

    var count_word = 0
    println("padding sentences:")
    output.foreach(x => {
      count_word += x.length
      println(x)
      val words = x.split(" ")
      val startToken = words(0)
      val endToken = words(words.length - 1)
      startToken should be (SentenceToken.start)
      endToken should be (SentenceToken.end)
    })
  }
} 
Example 14
Source File: GenerateFile.scala    From streams-tests   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.streams.util

import java.io.PrintWriter

import scala.util.Random

object GenerateFile extends App {
  val sizeMB = 500

  //

  val file = TestFiles.testFile(sizeMB)
  val r = new Random()
  def oneKB = List.fill(1023)(r.nextPrintableChar()).mkString + "\n"
  file.createNewFile()
  val pw = new PrintWriter(file)
  for (i <- 1 to sizeMB) {
    for (j <- 1 to 1024) {
      pw.print(oneKB)
    }
  }
  pw.close()
} 
Example 15
Source File: SolidityPlugin.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
import java.io.PrintWriter

import sbt.TaskKey
import sbt._
import Keys._

object SolidityPlugin extends AutoPlugin {

  object autoImport {
    lazy val solidityCompile = TaskKey[Unit]("solidityCompile", "Compiles solidity contracts")
  }

  import autoImport._

  override def projectSettings: Seq[Def.Setting[_]] = Seq(
    solidityCompile := {
      import sys.process._

      val contractsDir = baseDirectory.value / "src" / "evmTest" / "resources" / "solidity"
      val outDir = baseDirectory.value / "target" / "contracts"

      (contractsDir ** "*.sol").get.foreach { f =>
        Seq("solc", f.getPath, "--bin", "--overwrite", "-o", outDir.getPath).!!

        // this is a temporary workaround, see: https://github.com/ethereum/solidity/issues/1732
        val abiOut = Seq("solc", f.getPath, "--abi").!!
        val abisLines = abiOut.split("\n").sliding(4, 4)
        abisLines.foreach { abiLines =>
          val contractName = abiLines(1)
            .replace(f.getPath, "")
            .dropWhile(_ != ':').drop(1)
            .takeWhile(_ != ' ')
          new PrintWriter(outDir / s"$contractName.abi") {
            write(abiLines.drop(3).mkString); close()
          }
        }
      }
    }
  )

} 
Example 16
Source File: WikiETL.scala    From CarbonDataLearning   with GNU General Public License v3.0 5 votes vote down vote up
package org.github.xubo245.carbonDataLearning.etl

import java.io.{File, PrintWriter}
import java.text.SimpleDateFormat
import java.util.Date

import scala.io.Source
import scala.util.Random

object WikiETL {
  def main(args: Array[String]): Unit = {
    val directory = "/root/xubo/data"
    val files = new File(directory)
    val out = new PrintWriter("/root/xubo/data/pageviews-20150505time")
    var flag:Int = 10000000;
    var typeMap= Map (("b","wikibooks")
      ,("d","wiktionary")
      ,("m","wikimedia")
      ,("mw","wikipedia mobile")
      ,("n","wikinews")
      ,("q","wikiquote")
      ,("s","wikisource")
      ,("v","wikiversity")
      ,("w","mediawiki"))

    for (file <- files.listFiles().sorted.filter(_.getCanonicalFile.getName.contains("pageviews-20150505-"))) {
      val filePath = file.getCanonicalPath
      println(filePath)
      //            val out = new PrintWriter(filePath + "WithTime")
      val reader = Source.fromFile(filePath)
      val fileName = file.getCanonicalFile.getName
      val delimiter = "\t"
      for (line <- reader.getLines()) {
        val stringBuffer = new StringBuffer()
        val random = new Random()
        val id = flag+random.nextInt(1000000)
        stringBuffer
          .append(id).append(delimiter)
          .append(fileName.substring(10, 14)).append(delimiter)
          .append(fileName.substring(14, 16)).append(delimiter)
          .append(fileName.substring(16, 18)).append(delimiter)
          .append(fileName.substring(19, 21)).append(delimiter)
        val array=line.mkString.split("\\s+")

        if (array.length == 4 && array(2).matches("[0-9]*") && !array(1).contains("\"")) {
          val domain = array(0).split('.')
          stringBuffer.append(domain(0)).append(delimiter)
          if (domain.length > 1) {
            var value: String = typeMap.getOrElse(domain(1), "wiki")
            stringBuffer.append(value).append(delimiter)
          } else {
            stringBuffer.append("wiki").append(delimiter)
          }
          val time = new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
          val tid= id*10+random.nextInt(5)
          stringBuffer.append(array(1).replace('_',' ')).append(delimiter)
            .append(tid).append(delimiter)
            .append(array(2)).append(delimiter)
            .append(random.nextInt(100000)).append(delimiter)
            .append(time)

          //          for (i <- 0 until array.length-1){
          //            stringBuffer.append(array(i)).append(delimiter)
          //          }
          //          stringBuffer.append(array(array.length-1))

          //        if (array.length == 4 && array(2).matches("[0-9]*")) {
          //          id = id + 1
          out.println(stringBuffer.toString)
        }
      }
    }
    out.close()
  }
} 
Example 17
Source File: BooleanFile.scala    From CarbonDataLearning   with GNU General Public License v3.0 5 votes vote down vote up
package org.github.xubo245.carbonDataLearning.booleanDataType

import java.io.{File, PrintWriter}

import scala.util.Random

object BooleanFile {

  val randomNumber = 10000

  def createBooleanFileRandom(path: String, totalLines: Int, rate: Double): Boolean = {
    try {
      val write = new PrintWriter(path)
      var d: Double = 0.0
      val random = new Random()
      for (i <- 0 until totalLines) {
        val eachNum = random.nextInt(randomNumber)
        var flag: Boolean = true
        if (eachNum >= randomNumber * rate) {
          flag = false
        }
        write.println(flag)
        d = d + 1
      }

      write.close()
    } catch {
      case _: Exception => assert(false)
    }
    return true
  }

  def createBooleanFileWithOtherDataType(path: String, trueLines: Int): Boolean = {
    try {
      val write = new PrintWriter(path)
      var d: Double = 0.0
      for (i <- 0 until trueLines) {
        write.println(i + "," + true + ",num" + i + "," + d + "," + false)
        d = d + 1
      }
      for (i <- 0 until trueLines / 10) {
        write.println((trueLines + i) + "," + false + ",num" + (trueLines + i) + "," + d + "," + true)
        d = d + 1
      }
      write.close()
    } catch {
      case _: Exception => assert(false)
    }
    return true
  }

  def deleteFile(path: String): Boolean = {
    try {
      val file = new File(path)
      file.delete()
    } catch {
      case _: Exception => assert(false)
    }
    return true
  }

  def createOnlyBooleanFile(path: String, num: Int): Boolean = {
    try {
      val write = new PrintWriter(path)
      for (i <- 0 until num) {
        write.println(true)
      }
      for (i <- 0 until num / 10) {
        write.println(false)
      }
      write.close()
    } catch {
      case _: Exception => assert(false)
    }
    return true
  }
} 
Example 18
Source File: SerializedException.scala    From ArchiveSpark   with MIT License 5 votes vote down vote up
package org.archive.archivespark.util

import java.io.{PrintWriter, StringWriter}

case class SerializedException (exception: String, message: String, stackTrace: String) {
  def print(): Unit = {
    println(exception + ": " + message)
    println(stackTrace)
  }
}

object SerializedException {
  def apply(ex: Exception): SerializedException = {
    val stackTrace = new StringWriter()
    ex.printStackTrace(new PrintWriter(stackTrace))
    new SerializedException(ex.getClass.getCanonicalName, ex.getMessage, stackTrace.toString)
  }
} 
Example 19
Source File: Request.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load.request

import java.io.PrintWriter

import com.google.common.net.HttpHeaders
import com.wavesplatform.dex.load.request.RequestTag.RequestTag
import com.wavesplatform.dex.load.request.RequestType.RequestType
import com.wavesplatform.dex.load.utils.{mkJson, settings}
import com.wavesplatform.wavesj.ApiJson

case class Request(httpType: RequestType,
                   path: String,
                   tag: RequestTag,
                   jsonBody: ApiJson = null,
                   headers: Map[String, String] = Map.empty,
                   stringBody: String = "") {
  val defaultHeaders = Map(
    HttpHeaders.ACCEPT       -> "application/json",
    HttpHeaders.CONNECTION   -> "close",
    HttpHeaders.CONTENT_TYPE -> "application/json",
    HttpHeaders.HOST         -> settings.hosts.shooted
  )

  def mkGet(path: String, tag: RequestTag, additionalHeaders: Map[String, String] = Map.empty) = {
    val request =
      s"${RequestType.GET} $path HTTP/1.1\r\n${(defaultHeaders ++ additionalHeaders).map { case (k, v) => s"$k: $v" }.mkString("\r\n")}\r\n\r\n"

    s"${request.length} $tag\r\n$request\r\n"
  }

  def mkPost(obj: ApiJson, path: String, tag: RequestTag, stringBody: String = ""): String = {
    val body = if (stringBody.isEmpty) mkJson(obj).replace("\"matcherFeeAssetId\":\"WAVES\",", "") else stringBody

    val headers = defaultHeaders ++ Map(
      HttpHeaders.CONTENT_LENGTH -> body.length.toString,
      "X-API-Key"                -> settings.dexRestApiKey
    )

    val request = s"${RequestType.POST} $path HTTP/1.1\r\n${headers.map { case (k, v) => s"$k: $v" }.mkString("\r\n")}\r\n\r\n$body"

    s"${request.length} $tag\r\n$request\r\n"
  }

  def save(pw: PrintWriter): Unit = {
    pw.println(httpType match {
      case RequestType.POST => mkPost(jsonBody, path, tag, stringBody)
      case RequestType.GET  => mkGet(path, tag, headers)
    })
  }
} 
Example 20
Source File: RequestDeleter.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load

import java.io.{File, PrintWriter}
import java.nio.file.Files

import scala.io.Source

object RequestDeleter {

  def delRequests(file: File, deletedCount: Int): Unit = {
    if (Files.exists(file.toPath)) {
      val source = Source.fromFile(file)
      val outputFile = s"requests-after-drop-${System.currentTimeMillis}.txt"
      val output = new PrintWriter(outputFile, "utf-8")

      var i = 0
      var j = 0
      var r = 0

      try {
        source
          .getLines()
          .map(line => {
            if (r < deletedCount)
              i = i + 1
            if (line.isEmpty || line.indexOf("{") == 0) {
              j = j + 1
              if (j % 3 == 0) {
                j = 0
                r = r + 1
              }
            }
            line
          })
          .drop(i)
          .foreach(line => output.print(s"$line\r\n"))
        println(s"$deletedCount of $r requests have been dropped from ${file.getAbsolutePath}, and saved to $outputFile")
      } finally output.close()
    }
  }
} 
Example 21
Source File: GatlingFeeder.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load

import java.io.{File, PrintWriter}
import java.security
import java.security.KeyFactory
import java.security.spec.PKCS8EncodedKeySpec
import java.util.Base64

import com.wavesplatform.dex.api.ws.protocol.WsAddressSubscribe.JwtPayload
import com.wavesplatform.dex.auth.JwtUtils
import com.wavesplatform.dex.domain.account.{AddressScheme, PrivateKey, PublicKey}
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.wavesj.PrivateKeyAccount
import play.api.libs.json.Json

import scala.concurrent.duration._
import scala.io.Source
import scala.util.Random

object GatlingFeeder {

  def authServiceKeyPair(rawPrivateKey: String): security.PrivateKey = {
    val privateKeyContent = rawPrivateKey
      .replace("-----BEGIN PRIVATE KEY-----", "")
      .replace("-----END PRIVATE KEY-----", "")
      .replaceAll("\\n", "")

    val kf         = KeyFactory.getInstance("RSA")
    val ksPkcs8    = new PKCS8EncodedKeySpec(Base64.getDecoder.decode(privateKeyContent))
    val privateKey = kf.generatePrivate(ksPkcs8)

    privateKey
  }

  private def mkJwtSignedPayload(a: PrivateKeyAccount): JwtPayload = {
    val exp = System.currentTimeMillis() / 1000 + 24.hour.toSeconds
    JwtPayload(
      signature = ByteStr(Array.emptyByteArray),
      publicKey = PublicKey(a.getPublicKey),
      networkByte = AddressScheme.current.chainId.toChar.toString,
      clientId = "test",
      firstTokenExpirationInSeconds = exp,
      activeTokenExpirationInSeconds = exp,
      scope = List("general")
    ).signed(PrivateKey(a.getPrivateKey))
  }

  private def mkAusString(accountPrivateKey: PrivateKeyAccount, authKp: security.PrivateKey): String = {
    s"""{"T":"aus","S":"${accountPrivateKey.getAddress}","t":"jwt","j":"${JwtUtils.mkJwt(authKp,
                                                                                         Json.toJsObject(mkJwtSignedPayload(accountPrivateKey)))}"}"""
  }

  private def mkObsStrings(pairsFile: File, numberPerClient: Int): String = {
    val source = Source.fromFile(pairsFile)
    try {
      val pairs = Random.shuffle(source.getLines.toVector)
      require(numberPerClient <= pairs.size, "numberPerClient > available asset pairs in file")
      pairs.take(numberPerClient).map(x => s"""{"T":"obs","S":"$x","d":100}""").mkString(";")
    } finally source.close()
  }

  def mkFile(accountsNumber: Int,
             seedPrefix: String,
             authKp: security.PrivateKey,
             pairsFile: File,
             orderBookNumberPerAccount: Int,
             feederFile: File): Unit = {
    val output = new PrintWriter(feederFile, "utf-8")
    try {
      (0 until accountsNumber).foreach { i =>
        val pk = PrivateKeyAccount.fromSeed(s"$seedPrefix$i", 0, AddressScheme.current.chainId)
        output.println(s"""${pk.getAddress};${mkAusString(pk, authKp)};${mkObsStrings(pairsFile, orderBookNumberPerAccount)}""")
      }
    } finally output.close()
    println(s"Results have been saved to $feederFile")
  }
} 
Example 22
Source File: Main.scala    From jardiff   with Apache License 2.0 5 votes vote down vote up
package scala.tools.jardiff

import java.io.{ByteArrayOutputStream, File, PrintWriter}
import java.nio.file._

import org.apache.commons.cli
import org.apache.commons.cli.{CommandLine, DefaultParser, HelpFormatter, Options}
import org.eclipse.jgit.util.io.NullOutputStream

import scala.collection.JavaConverters.collectionAsScalaIterableConverter
import scala.util.Try
import scala.util.control.NonFatal

object Main {
  def main(args: Array[String]): Unit = {
    run(args) match {
      case ShowUsage(msg) => System.err.println(msg); sys.exit(-1)
      case Error(err) => err.printStackTrace(System.err); sys.exit(-1)
      case Success(diffFound) => sys.exit(if (diffFound) 1 else 0)
    }
  }

  private object Opts {
    val Help = new cli.Option("h", "help", false, "Display this message")
    val Git = new cli.Option("g", "git", true, "Directory to output a git repository containing the diff")
    Git.setArgName("dir")
    val NoCode = new cli.Option("c", "suppress-code", false, "Suppress method bodies")
    val Raw = new cli.Option("r", "raw", false, "Disable sorting and filtering of classfile contents")
    val NoPrivates = new cli.Option("p", "suppress-privates", false, "Display only non-private members")
    val ContextLines = new cli.Option("U", "unified", true, "Number of context lines in diff")
    val Quiet = new cli.Option("q", "quiet", false, "Don't output diffs to standard out")
    val Ignore = new cli.Option("i", "ignore", true, "File pattern to ignore rendered files in gitignore format")
    Ignore.setArgs(cli.Option.UNLIMITED_VALUES)
    ContextLines.setArgName("n")
    def apply(): Options = {
      new cli.Options().addOption(Help).addOption(Git).addOption(ContextLines).addOption(NoCode).addOption(Raw).addOption(NoPrivates).addOption(Quiet).addOption(Ignore)
    }
  }
  private implicit class RichCommandLine(val self: CommandLine) {
    def has(o: cli.Option): Boolean = self.hasOption(o.getOpt)
    def get(o: cli.Option): String = self.getOptionValue(o.getOpt)
    def getOptInt(o: cli.Option): Option[Int] = Option(self.getOptionValue(o.getOpt)).map(x => Try(x.toInt).getOrElse(throw new cli.ParseException("--" + o.getLongOpt + " requires an integer")))
  }

  private def helpText: String = {
    val formatter = new HelpFormatter
    val baos = new ByteArrayOutputStream()
    val writer = new PrintWriter(baos)
    try {
      val footer = s" VERSION1 [VERSION2 ...]\n\nEach VERSION may designate a single file, a directory, JAR file or a `${File.pathSeparator}`-delimited classpath\n\n"
      formatter.printHelp(writer, 80, "jardiff", footer, Opts(), HelpFormatter.DEFAULT_LEFT_PAD, HelpFormatter.DEFAULT_DESC_PAD, "", true)
      writer.flush()
      baos.toString().replaceFirst("\\n", "")

    } finally {
      writer.close()
    }
  }

  def run(args: Array[String]): RunResult = {
    val parser = new DefaultParser

    try {
      val line = parser.parse(Opts(), args)
      val trailingArgs = line.getArgList
      if (line.has(Opts.Help)) {
        ShowUsage(helpText)
      } else {
        val gitRepo = if (line.has(Opts.Git)) Some(Paths.get(line.get(Opts.Git))) else None
        val diffOutputStream = if (line.has(Opts.Quiet)) NullOutputStream.INSTANCE else System.out
        val config = JarDiff.Config(gitRepo, !line.has(Opts.NoCode), line.has(Opts.Raw),
          !line.has(Opts.NoPrivates), line.getOptInt(Opts.ContextLines), diffOutputStream,
          Option(line.getOptionValues(Opts.Ignore.getOpt)).toList.flatten
        )
        val paths = trailingArgs.asScala.toList.map(JarDiff.expandClassPath)
        paths match {
          case Nil => ShowUsage(helpText)
          case _ =>
            val jarDiff = JarDiff(paths, config)
            val diffFound = jarDiff.diff()
            Success(diffFound)
        }
      }
    } catch {
      case exp: cli.ParseException => ShowUsage(helpText)
      case NonFatal(t) => Error(t)
    }
  }
}

sealed abstract class RunResult
case class ShowUsage(msg: String) extends RunResult
case class Error(err: Throwable) extends RunResult
case class Success(diffFound: Boolean) extends RunResult 
Example 23
Source File: AsmTextifyRenderer.scala    From jardiff   with Apache License 2.0 5 votes vote down vote up
package scala.tools.jardiff

import java.io.PrintWriter
import java.nio.file.{Files, Path}

import scala.collection.JavaConverters._
import org.objectweb.asm.{ClassReader, Opcodes}
import org.objectweb.asm.tree.{ClassNode, FieldNode, InnerClassNode, MethodNode}
import org.objectweb.asm.util.TraceClassVisitor

class AsmTextifyRenderer(code: Boolean, raw: Boolean, privates: Boolean) extends FileRenderer {
  def outFileExtension: String = ".asm"
  override def render(in: Path, out: Path): Unit = {
    val classBytes = Files.readAllBytes(in)
    val rawNode = classFromBytes(classBytes)
    val node = if (raw) rawNode else zapScalaClassAttrs(sortClassMembers(rawNode))
    if (!code)
      node.methods.forEach(_.instructions.clear())
    if (!privates) {
      node.methods.removeIf((m: MethodNode) => isPrivate(m.access))
      node.fields.removeIf((m: FieldNode) => isPrivate(m.access))
      node.innerClasses.removeIf((m: InnerClassNode) => isPrivate(m.access))
    }
    Files.createDirectories(out.getParent)
    val pw = new PrintWriter(Files.newBufferedWriter(out))
    try {
      val trace = new TraceClassVisitor(pw)
      node.accept(trace)
    } finally {
      pw.close()
    }
  }

  private def isPrivate(access: Int): Boolean = {
    (access & Opcodes.ACC_PRIVATE) != 0

  }

  def sortClassMembers(node: ClassNode): node.type = {
    node.fields.sort(_.name compareTo _.name)
    node.methods.sort(_.name compareTo _.name)
    node
  }

  private def isScalaSigAnnot(desc: String) =
    List("Lscala/reflect/ScalaSignature", "Lscala/reflect/ScalaLongSignature").exists(desc.contains)

  // drop ScalaSig annotation and class attributes
  private def zapScalaClassAttrs(node: ClassNode): node.type = {
    if (node.visibleAnnotations != null)
      node.visibleAnnotations = node.visibleAnnotations.asScala.filterNot(a => a == null || isScalaSigAnnot(a.desc)).asJava

    node.attrs = null
    node
  }

  private def classFromBytes(bytes: Array[Byte]): ClassNode = {
    val node = new ClassNode()
    new ClassReader(bytes).accept(node, if (raw) 0 else ClassReader.SKIP_DEBUG | ClassReader.SKIP_FRAMES)

    node
  }

} 
Example 24
Source File: GetUrlTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.http

import java.io.{BufferedReader, InputStreamReader, PrintWriter}
import java.net.{HttpURLConnection, InetAddress, URL, URLConnection}

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.http.client.methods.{CloseableHttpResponse, HttpGet}
import org.apache.http.impl.client.HttpClients
import org.apache.http.util.EntityUtils
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class GetUrlTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/http/getUrl.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 25
Source File: FileUtil.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.util

import java.io.{File, PrintWriter}

object FileUtil {

  def getJarFile(file:File): Array[File] ={
    val files = file.listFiles().filter(! _.isDirectory)
      .filter(t => t.toString.endsWith(".jar") )  //此处读取.txt and .md文件
    files ++ file.listFiles().filter(_.isDirectory).flatMap(getJarFile)
  }


  def writeFile(text: String, path: String) = {

    val file = new File(path)
    if(!file.exists()){
      file.createNewFile()
    }
    val writer = new PrintWriter(new File(path))
    writer.write(text)
    writer.close()
  }

  def main(args: Array[String]): Unit = {
    val classPath = PropertyUtil.getClassPath()

    val path = new File(classPath)
    getJarFile(path).foreach(println)

  }
} 
Example 26
Source File: ScalaExecutorUtil.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.conf.util

import java.io.PrintWriter

import cn.piflow.conf.bean.FlowBean
import cn.piflow.util.{ConfigureUtil, PropertyUtil}
import sys.process._

object ScalaExecutorUtil {

  val userDir = System.getProperty("user.dir")
  //val scalaDir =  s"$userDir/scala"
  val scalaDir = PropertyUtil.getScalaPath()
  var packageName : String = "cn.piflow.bundle.script"
  val className : String = "ScalaFile1"
  def construct(className : String, script : String) : String = {


    val path = s"$scalaDir/$className.scala"
    val code =
      s"""
        |package $packageName
        |import org.apache.spark.sql.{DataFrame, SparkSession}
        |import org.apache.spark.sql._
        |import cn.piflow.{JobContext, JobInputStream, JobOutputStream}
        |
        |
        |object $className {
        |  def perform(in: JobInputStream, out: JobOutputStream, pec: JobContext) : Unit  ={
        |    val spark = pec.get[SparkSession]()
        |    import spark.implicits._
        |    $script
        |  }
        |}
      """.stripMargin
    val out = new PrintWriter(path)
    out.write(code)
    out.close()
    path
  }

  def buildJar(className : String, classPath : String) : String = {

    //val piflowbundle = s"$userDir/lib/piflow-server-0.9.jar"
    //val piflowbundle = "/opt/project/piflow/piflow-server/target/piflow-server-0.9.jar"
    //"-encoding UTF8"
    val piflowbundle = ConfigureUtil.getPiFlowBundlePath()
    val jarFile = s"$scalaDir/$className.jar"
    val command = s"scalac -cp $piflowbundle -d $jarFile $classPath"
    println(s"Build ScalaExecutor jar: $command")
    command.!!
    jarFile
  }

  def buildScalaExcutorJar( flowBean : FlowBean) : List[String] = {
    var scalaPluginList = List[String]()
    flowBean.stops.foreach{s => {
      if(s.bundle.equals("cn.piflow.bundle.script.ExecuteScala")){

        //val plugin = s.flowName + "_" + s.name + "_" + s.uuid
        val plugin = s.properties.getOrElse("plugin", "")
        val script = s.properties.getOrElse("script", "")
        if(!script.equals("")){
          val classFile = construct(plugin, script)
          val jarFile = buildJar(plugin, classFile)
          scalaPluginList = jarFile +: scalaPluginList
        }
      }
    }}
    scalaPluginList
  }

  def main(args: Array[String]): Unit = {
    val script =
      """
        |val df = in.read()
        |df.show()
        |val df1 = df.select("title")
        |out.write(df1)
      """.stripMargin
    val code = construct( "ScalaFile",script)
    println(code)
  }
} 
Example 27
Source File: StreamingModelProducer.scala    From AI   with Apache License 2.0 5 votes vote down vote up
package com.bigchange.streaming

import java.io.PrintWriter
import java.net.ServerSocket

import breeze.linalg.DenseVector

import scala.util.Random


object StreamingModelProducer {

  def main(args: Array[String]) {

    val maxEvent = 100
    val numFeatures = 100
    val random = new Random()
    // 生成服从正太分布的稠密向量函数
    def generateRandomArray(n: Int) = Array.tabulate(n)(_ => random.nextGaussian())
    // 一个确定的随机模型权重向量
    val w = new DenseVector(generateRandomArray(numFeatures))
    val intercept = random.nextGaussian() * 10
    // 生成一些随机数据事件
    def generateNoisyData(n:Int) = {

      (1 to n).map { i  =>
        val x = new DenseVector(generateRandomArray(numFeatures)) // 随机特征向量
        val y = w.dot(x)
        val noisy = y + intercept // 目标值
        (noisy, x)
      }
    }

    // 创建网络生成器
    val listener = new ServerSocket(9999)
    println("listener port:" + listener.getLocalPort)

    while(true) {
      val socket = listener.accept()
      new Thread() {
        override def run() = {
          println("get client from:" + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream, true)

          while (true) {
            Thread.sleep(1000)
            val num = random.nextInt(maxEvent)
            val productEvents = generateNoisyData(num)
            productEvents.foreach { case(y, x) =>
              out.write(y + "\t" + x.data.mkString(","))
              out.write("\n")
            }
            out.flush()
            println(s"created $num events")
          }
          socket.close()
        }

      }.start()
    }

  }
} 
Example 28
Source File: ProcessInterpreter.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.engine.Interpreter

import java.io.{BufferedReader, InputStreamReader, PrintWriter}
import java.util.concurrent.TimeUnit

import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.engine.spark.common.{LineBufferedStream, Starting, State, _}
import com.webank.wedatasphere.linkis.scheduler.executer.{ErrorExecuteResponse, ExecuteResponse, SuccessExecuteResponse}
import org.apache.commons.io.IOUtils
import org.json4s._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}


abstract class ProcessInterpreter(process: Process) extends Interpreter with Logging {

  implicit val executor: ExecutionContext = ExecutionContext.global

  protected[this] var _state: State = Starting()

  protected[this] val stdin = new PrintWriter(process.getOutputStream)
  protected[this] val stdout = new BufferedReader(new InputStreamReader(process.getInputStream()), 1)
  protected[this] val errOut = new LineBufferedStream(process.getErrorStream())

  override def state: State = _state

  override def execute(code: String): ExecuteResponse = {
    if(code == "sc.cancelAllJobs" || code == "sc.cancelAllJobs()") {
      sendExecuteRequest(code)
    }
    _state match {
      case (Dead() | ShuttingDown() | Error() | Success()) =>
        throw new IllegalStateException("interpreter is not running")
      case Idle() =>
        require(state == Idle())
        code match {
          case "SHUTDOWN" =>
            sendShutdownRequest()
            close()
            ErrorExecuteResponse("shutdown",new Exception("shutdown"))
          case _ =>
            _state = Busy()
            sendExecuteRequest(code) match {
              case Some(rep) =>
                _state = Idle()
               // ExecuteComplete(rep)
                SuccessExecuteResponse()
              case None =>
                _state = Error()
                val errorMsg = errOut.lines.mkString(", ")
                throw new Exception(errorMsg)
            }
        }
      case _ => throw new IllegalStateException(s"interpreter is in ${_state} state, cannot do query.")
    }
  }

  Future {
    val exitCode = process.waitFor()
    if (exitCode != 0) {
      errOut.lines.foreach(println)
      println(getClass.getSimpleName+" has stopped with exit code " + process.exitValue)
      _state = Error()
    } else {
      println(getClass.getSimpleName+" has finished.")
      _state = Success()
    }
  }

  protected def waitUntilReady(): Unit

  protected def sendExecuteRequest(request: String): Option[JValue]

  protected def sendShutdownRequest(): Unit = {}


  override def close(): Unit = {
    val future = Future {
      _state match {
        case (Dead() | ShuttingDown() | Success()) =>
          Future.successful()
        case _ =>
          sendShutdownRequest()
      }
    }
    _state = Dead()
    IOUtils.closeQuietly(stdin)
    IOUtils.closeQuietly(stdout)
    errOut.close

    // Give ourselves 10 seconds to tear down the process.
    Utils.tryFinally(Await.result(future, Duration(10, TimeUnit.SECONDS))){
      process.destroy()}
  }

} 
Example 29
Source File: _02_WithPrintWriter.scala    From LearningScala   with Apache License 2.0 5 votes vote down vote up
package _011_writing_new_control_structures

import java.io.{File, PrintWriter}


  def withPrintWriter(file: File)(op: PrintWriter => Unit): Unit = {
    val writer = new PrintWriter(file)
    try {
      op(writer)
    } finally {
      writer.close()
    }
  }

  def main(args: Array[String]): Unit = {
    // old syntax
    withPrintWriter_oldSyntax(
      new File("date.txt"),
      writer => writer.println(new java.util.Date)
    )

    // new syntax
    val file = new File("date.txt")
    withPrintWriter(file) {
      writer => writer.println(new java.util.Date)
    }
  }
} 
Example 30
Source File: Pattern.scala    From Converter   with GNU General Public License v3.0 5 votes vote down vote up
package com.olvind.logging

import java.io.{File, PrintWriter, StringWriter}

import fansi.{Color, EscapeAttr, Str}
import sourcecode.Text

trait Pattern {
  def apply[T: Formatter](t: => Text[T], throwable: Option[Throwable], metadata: Metadata, ctx: Ctx): Str
}

object Pattern {
  def prefixFor(l: LogLevel): String =
    f"[${l.name.value}%-5s]"

  @inline def colorFor(l: LogLevel): EscapeAttr =
    l.level match {
      case LogLevel.trace.level => Color.Reset
      case LogLevel.debug.level => Color.Green
      case LogLevel.info.level  => Color.Blue
      case LogLevel.warn.level  => Color.Yellow
      case LogLevel.error.level => Color.Red
    }

  @inline def subtleColorFor(l: LogLevel): EscapeAttr =
    l.level match {
      case LogLevel.trace.level => Color.Reset
      case LogLevel.debug.level => Color.LightGreen
      case LogLevel.info.level  => Color.LightBlue
      case LogLevel.warn.level  => Color.LightYellow
      case LogLevel.error.level => Color.LightRed
    }

  def formatThrowable(th: Throwable): String = {
    val sw = new StringWriter()
    val pw = new PrintWriter(sw)
    th.printStackTrace(pw)
    sw.toString
  }

  object default extends Pattern {
    override def apply[T: Formatter](t: => Text[T], throwable: Option[Throwable], m: Metadata, ctx: Ctx): Str = {
      val Color  = colorFor(m.logLevel)
      val Subtle = subtleColorFor(m.logLevel)
      val source = if (t.source.startsWith("\"") || t.source.startsWith("s\"")) "" else t.source

      Str.join(
        Color(prefixFor(m.logLevel)),
        " ",
        Subtle(m.instant.toString),
        " ",
        Subtle(Formatter(new File(m.file.value))),
        ":",
        Subtle(Formatter(m.line.value)),
        " ",
        Color(source),
        " ",
        Color(Formatter(t.value)),
        " ",
        Subtle(Formatter(ctx)),
        throwable match {
          case None     => ""
          case Some(th) => Subtle(formatThrowable(th))
        },
      )
    }
  }
} 
Example 31
Source File: PersistentActorWithNotifications.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkapersistence.impl

import java.io.{PrintWriter, StringWriter}

import akka.actor.{ActorLogging, ActorRef}
import akka.persistence._

private[akkapersistence] trait PersistentActorWithNotifications { this: AbstractSnapshotter with ActorLogging =>
  override def persistenceId: String = SnapshotsRegistry.persistenceId(persistenceCategory, id)

  protected def persistenceCategory: String

  protected def id: String

  private var listenersForSnapshotSave: Map[Long, RecipientWithMsg] = Map.empty

  protected def deleteSnapshotsLogging(): Unit = {
    deleteSnapshotsLogging(None)
  }

  private def deleteSnapshotsLogging(maxSequenceNr: Option[Long]): Unit = {
    log.debug(s"Deleting all snapshots for $persistenceId until (inclusive): $maxSequenceNr...")
    deleteSnapshots(SnapshotSelectionCriteria(maxSequenceNr = maxSequenceNr.getOrElse(Int.MaxValue)))
  }

  protected def saveSnapshotNotifying(snapshot: Any, sequenceNr: Long, listener: Option[RecipientWithMsg]): Unit = {
    log.debug(s"Saving snapshot for $persistenceId with sequenceNr: $sequenceNr: $snapshot ...")
    listener.foreach { listener =>
      listenersForSnapshotSave += sequenceNr -> listener
    }
    saveSnapshotWithSeqNr(snapshot, sequenceNr)
  }

  protected val handleSnapshotEvents: Receive = {
    case SaveSnapshotSuccess(metadata) =>
      log.debug(s"State saved for: $metadata")
      deleteSnapshotsLogging(Some(metadata.sequenceNr-1))
      replyToListenerForSaveIfWaiting(metadata)
    case DeleteSnapshotsSuccess(criteria) =>
      log.debug(s"Snapshots with criteria: $criteria deleted")
    case SaveSnapshotFailure(metadata, cause) =>
      log.error(cause, s"State save failure for: $metadata")
    case DeleteSnapshotsFailure(criteria, cause) =>
      log.warning(s"Delete snapshots with criteria failure: $criteria.\nError: ${printStackTrace(cause)}")
  }

  private def printStackTrace(cause: Throwable): String = {
    val stringWriter = new StringWriter()
    val printWriter = new PrintWriter(stringWriter)
    cause.printStackTrace(printWriter)
    stringWriter.toString
  }

  private def replyToListenerForSaveIfWaiting(metadata: SnapshotMetadata): Unit = {
    listenersForSnapshotSave.get(metadata.sequenceNr).foreach { listener =>
      listener.reply()
      listenersForSnapshotSave -= metadata.sequenceNr
    }
  }
}

class RecipientWithMsg(recipient: ActorRef, msg: Any) {
  def reply() = recipient ! msg
} 
Example 32
Source File: SlickJdbcMigration.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc.slick

import java.io.PrintWriter
import java.lang.reflect.{InvocationHandler, Method, Proxy}
import java.sql.Connection
import java.util.logging.Logger

import javax.sql.DataSource
import org.flywaydb.core.api.migration.{BaseJavaMigration, Context}
import slick.jdbc.JdbcProfile

import scala.concurrent.Await
import scala.concurrent.duration._

trait SlickJdbcMigration extends BaseJavaMigration {

  protected val profile: JdbcProfile

  import profile.api._

  def migrateActions: DBIOAction[Any, NoStream, _ <: Effect]

  override final def migrate(context: Context): Unit = {
    val database = Database.forDataSource(new AlwaysUsingSameConnectionDataSource(context.getConnection), None)
    Await.result(database.run(migrateActions), 10 minute)
  }

}

class AlwaysUsingSameConnectionDataSource(conn: Connection) extends DataSource {
  private val notClosingConnection = Proxy.newProxyInstance(
    ClassLoader.getSystemClassLoader,
    Array[Class[_]](classOf[Connection]),
    SuppressCloseHandler
  ).asInstanceOf[Connection]

  object SuppressCloseHandler extends InvocationHandler {
    override def invoke(proxy: AnyRef, method: Method, args: Array[AnyRef]): AnyRef = {
      if (method.getName != "close") {
        method.invoke(conn, args : _*)
      } else {
        null
      }
    }
  }

  override def getConnection: Connection = notClosingConnection
  override def getConnection(username: String, password: String): Connection = notClosingConnection
  override def unwrap[T](iface: Class[T]): T = conn.unwrap(iface)
  override def isWrapperFor(iface: Class[_]): Boolean = conn.isWrapperFor(iface)

  override def setLogWriter(out: PrintWriter): Unit = ???
  override def getLoginTimeout: Int = ???
  override def setLoginTimeout(seconds: Int): Unit = ???
  override def getParentLogger: Logger = ???
  override def getLogWriter: PrintWriter = ???
} 
Example 33
Source File: DatabaseInitializer.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc.slick.helpers

import java.io.PrintWriter
import java.sql.Connection
import java.util.logging.Logger
import javax.sql.DataSource

import com.typesafe.config.Config
import org.flywaydb.core.Flyway
import slick.jdbc.JdbcBackend

import scala.concurrent.ExecutionContext

class DatabaseInitializer(db: JdbcBackend.Database) {
  def initDatabase()(implicit executionContext: ExecutionContext) = {
    migrateIfNeeded(db)
    db
  }

  private def migrateIfNeeded(db: JdbcBackend.Database) = {
    Flyway.configure
      .dataSource(new DatabaseDataSource(db))
      .baselineOnMigrate(true)
      .load
      .migrate
  }
}

object DatabaseInitializer {
  def apply(config: Config) = {
    val db = JdbcBackend.Database.forConfig("db", config)
    new DatabaseInitializer(db)
  }
}

class DatabaseDataSource(db: JdbcBackend.Database) extends DataSource {
  private val conn = db.createSession().conn

  override def getConnection: Connection = conn
  override def getConnection(username: String, password: String): Connection = conn
  override def unwrap[T](iface: Class[T]): T = conn.unwrap(iface)
  override def isWrapperFor(iface: Class[_]): Boolean = conn.isWrapperFor(iface)

  override def setLogWriter(out: PrintWriter): Unit = ???
  override def getLoginTimeout: Int = ???
  override def setLoginTimeout(seconds: Int): Unit = ???
  override def getParentLogger: Logger = ???
  override def getLogWriter: PrintWriter = ???
} 
Example 34
Source File: CongestionModel.scala    From spatial   with MIT License 5 votes vote down vote up
package models

import java.io.File
import java.io.PrintWriter
import utils.io.files._
import utils.math.{CombinationTree, ReduceTree}

import scala.io.Source

object CongestionModel {

	abstract class FeatureVec[T] {
		def loads: T
		def stores: T
		def gateds: T
		def outerIters: T
		def innerIters: T
		def bitsPerCycle: T
		def toSeq: Seq[T] = Seq(stores, outerIters, loads, innerIters, gateds, bitsPerCycle)
	}
	case class RawFeatureVec(loads: Double, stores: Double, gateds: Double, outerIters: Double, innerIters: Double, bitsPerCycle: Double) extends FeatureVec[Double]
	case class CalibFeatureVec(loads: Double, stores: Double, gateds: Double, outerIters: Double, innerIters: Double, bitsPerCycle: Double) extends FeatureVec[Double]

	// Set up lattice properties
	val feature_dims = 6
	val lattice_rank = 6
	val lattice_size = Seq(3,3,3,3,3,3)
	val num_keypoints = 8
	val num_lattices = 1
	var model: String = ""

	// Derive lattice properties
	val sizes = scala.Array.tabulate(lattice_rank){i => lattice_size(i)}
	val dimensions = sizes.length
	val params_per_lattice = sizes.product
	val strides: scala.Array[Int] = scala.Array.fill(dimensions){1}
	val nparams = num_lattices * params_per_lattice

	// Grab lattice params
	lazy val loads_keypoints_inputs = ModelData.loads_keypoints_inputs(model).map(_.toDouble) //loadCSVNow[Int](s"../data/${model}/CALIBRATOR_INPUT_PARAMS/loads_keypoints_inputs.csv", ","){x => x.toDouble}
	lazy val loads_keypoints_outputs = ModelData.loads_keypoints_outputs(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/CALIBRATOR_OUTPUT_PARAMS/loads_keypoints_outputs.csv", ","){x => x.toDouble}
	lazy val stores_keypoints_inputs = ModelData.stores_keypoints_inputs(model).map(_.toDouble) //loadCSVNow[Int](s"../data/${model}/CALIBRATOR_INPUT_PARAMS/stores_keypoints_inputs.csv", ","){x => x.toDouble}
	lazy val stores_keypoints_outputs = ModelData.stores_keypoints_outputs(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/CALIBRATOR_OUTPUT_PARAMS/stores_keypoints_outputs.csv", ","){x => x.toDouble}
	lazy val gateds_keypoints_inputs = ModelData.gateds_keypoints_inputs(model).map(_.toDouble) //loadCSVNow[Int](s"../data/${model}/CALIBRATOR_INPUT_PARAMS/gateds_keypoints_inputs.csv", ","){x => x.toDouble}
	lazy val gateds_keypoints_outputs = ModelData.gateds_keypoints_outputs(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/CALIBRATOR_OUTPUT_PARAMS/gateds_keypoints_outputs.csv", ","){x => x.toDouble}
	lazy val outerIters_keypoints_inputs = ModelData.outerIters_keypoints_inputs(model).map(_.toDouble) //loadCSVNow[Int](s"../data/${model}/CALIBRATOR_INPUT_PARAMS/outerIters_keypoints_inputs.csv", ","){x => x.toDouble}
	lazy val outerIters_keypoints_outputs = ModelData.outerIters_keypoints_outputs(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/CALIBRATOR_OUTPUT_PARAMS/outerIters_keypoints_outputs.csv", ","){x => x.toDouble}
	lazy val innerIters_keypoints_inputs = ModelData.innerIters_keypoints_inputs(model).map(_.toDouble) //loadCSVNow[Int](s"../data/${model}/CALIBRATOR_INPUT_PARAMS/innerIters_keypoints_inputs.csv", ","){x => x.toDouble}
	lazy val innerIters_keypoints_outputs = ModelData.innerIters_keypoints_outputs(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/CALIBRATOR_OUTPUT_PARAMS/innerIters_keypoints_outputs.csv", ","){x => x.toDouble}
	lazy val bitsPerCycle_keypoints_inputs = ModelData.bitsPerCycle_keypoints_inputs(model).map(_.toDouble) //loadCSVNow[Int](s"../data/${model}/CALIBRATOR_INPUT_PARAMS/bitsPerCycle_keypoints_inputs.csv", ","){x => x.toDouble}
	lazy val bitsPerCycle_keypoints_outputs = ModelData.bitsPerCycle_keypoints_outputs(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/CALIBRATOR_OUTPUT_PARAMS/bitsPerCycle_keypoints_outputs.csv", ","){x => x.toDouble}
    lazy val params = ModelData.params(model).map(_.toDouble) //loadCSVNow[Double](s"../data/${model}/LATTICE_PARAMS.csv", ","){x => x.toDouble}

    
	def evaluate(features: RawFeatureVec, typ: Runtime.CtrlSchedule): Int = {
		model = typ.toString

		val calibrated_features = calibrate_features(features)
		val result = hypercube_features(calibrated_features)
		// TODO: Model is naughty if it returns <170
		// println(s"evaluating $features = ${170 max result.toInt}")
		170 max result.toInt
	}
} 
Example 35
Source File: globals.scala    From spatial   with MIT License 5 votes vote down vote up
package fringe
import java.io.{File, PrintWriter}

import fringe.targets.DeviceTarget
import fringe.templates.axi4.{AXI4BundleParameters, AXI4StreamParameters}


  private var _tclScript: PrintWriter = {
    val pw = new PrintWriter(new File("bigIP.tcl"))
    pw.flush()
    pw
  }
  def tclScript: PrintWriter = _tclScript
  def tclScript_=(value: PrintWriter): Unit = _tclScript = value


  var regression_testing: String = scala.util.Properties.envOrElse("RUNNING_REGRESSION", "0")

  // Top parameters
  // These are set by the generated Instantiator class
  var numArgIns: Int = 1      // Number of ArgIn registers
  var numArgOuts: Int = 1     // Number of ArgOut registers
  var numArgIOs: Int = 0      // Number of HostIO registers
  var numArgInstrs: Int = 0   // TODO: What is this?
  var argOutLoopbacksMap: Map[Int,Int] = Map.empty // TODO: What is this?

  var loadStreamInfo: List[StreamParInfo] = Nil
  var storeStreamInfo: List[StreamParInfo] = Nil
  var gatherStreamInfo: List[StreamParInfo] = Nil
  var scatterStreamInfo: List[StreamParInfo] = Nil
  var axiStreamInsInfo: List[AXI4StreamParameters] = List(AXI4StreamParameters(64,8,64))
  var axiStreamOutsInfo: List[AXI4StreamParameters] = List(AXI4StreamParameters(64,8,64))

  var numAllocators: Int = 0

  def LOAD_STREAMS: List[StreamParInfo] = if (loadStreamInfo.isEmpty) List(StreamParInfo(DATA_WIDTH, WORDS_PER_STREAM, 0)) else loadStreamInfo
  def STORE_STREAMS: List[StreamParInfo] = if (storeStreamInfo.isEmpty) List(StreamParInfo(DATA_WIDTH, WORDS_PER_STREAM, 0)) else storeStreamInfo
  def GATHER_STREAMS: List[StreamParInfo] = if (gatherStreamInfo.isEmpty) List(StreamParInfo(DATA_WIDTH, WORDS_PER_STREAM, 0)) else gatherStreamInfo
  def SCATTER_STREAMS: List[StreamParInfo] = if (scatterStreamInfo.isEmpty) List(StreamParInfo(DATA_WIDTH, WORDS_PER_STREAM, 0)) else scatterStreamInfo

  def AXI_STREAMS_IN: List[AXI4StreamParameters] = if (axiStreamInsInfo.isEmpty) List(AXI4StreamParameters(256,8,32)) else axiStreamInsInfo
  def AXI_STREAMS_OUT: List[AXI4StreamParameters] = if (axiStreamOutsInfo.isEmpty) List(AXI4StreamParameters(256,8,32)) else axiStreamOutsInfo

  def NUM_LOAD_STREAMS: Int = LOAD_STREAMS.size
  def NUM_STORE_STREAMS: Int = STORE_STREAMS.size

  def NUM_ARG_INS: Int = numArgIns
  def NUM_ARG_OUTS: Int = numArgOuts
  def NUM_ARG_IOS: Int = numArgIOs
  def NUM_ARG_LOOPS: Int = argOutLoopbacksMap.size max 1
  def NUM_ARGS: Int = numArgIns + numArgOuts
  def NUM_STREAMS: Int = LOAD_STREAMS.size + STORE_STREAMS.size
} 
Example 36
Source File: GephiApp.scala    From Mastering-Machine-Learning-with-Spark-2.x   with MIT License 5 votes vote down vote up
package com.github.maxpumperla.ml_spark.graphs

import java.io.PrintWriter

import com.github.maxpumperla.ml_spark.utils.Gephi.toGexf
import org.apache.spark._
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD

object GephiApp extends App {

    val conf = new SparkConf()
      .setAppName("Gephi Test Writer")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    val vertices: RDD[(VertexId, String)] = sc.parallelize(
      Array((1L, "Anne"),
        (2L, "Bernie"),
        (3L, "Chris"),
        (4L, "Don"),
        (5L, "Edgar")))

    val edges: RDD[Edge[String]] = sc.parallelize(
      Array(Edge(1L, 2L, "likes"),
        Edge(2L, 3L, "trusts"),
        Edge(3L, 4L, "believes"),
        Edge(4L, 5L, "worships"),
        Edge(1L, 3L, "loves"),
        Edge(4L, 1L, "dislikes")))

    val graph: Graph[String, String] = Graph(vertices, edges)

    val pw = new PrintWriter("./graph.gexf")
    pw.write(toGexf(graph))
    pw.close()
} 
Example 37
Source File: Utils.scala    From tispark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import java.io.{File, PrintWriter}
import java.nio.file.{Files, Paths}
import java.util.Properties

import org.slf4j.Logger

import scala.collection.JavaConversions._

object Utils {

  def writeFile(content: String, path: String): Unit =
    TryResource(new PrintWriter(path))(_.close()) {
      _.print(content)
    }

  def TryResource[T](res: T)(closeOp: T => Unit)(taskOp: T => Unit): Unit =
    try {
      taskOp(res)
    } finally {
      closeOp(res)
    }

  def readFile(path: String): List[String] =
    Files.readAllLines(Paths.get(path)).toList

  def getOrThrow(prop: Properties, key: String): String = {
    val jvmProp = System.getProperty(key)
    if (jvmProp != null) {
      jvmProp
    } else {
      val v = prop.getProperty(key)
      if (v == null) {
        throw new IllegalArgumentException(key + " is null")
      } else {
        v
      }
    }
  }

  def getFlagOrFalse(prop: Properties, key: String): Boolean =
    getFlag(prop, key, "false")

  private def getFlag(prop: Properties, key: String, defValue: String): Boolean =
    getOrElse(prop, key, defValue).equalsIgnoreCase("true")

  def getOrElse(prop: Properties, key: String, defValue: String): String = {
    val jvmProp = System.getProperty(key)
    if (jvmProp != null) {
      jvmProp
    } else {
      Option(prop.getProperty(key)).getOrElse(defValue)
    }
  }

  def getFlagOrTrue(prop: Properties, key: String): Boolean =
    getFlag(prop, key, "true")

  def time[R](block: => R)(logger: Logger): R = {
    val t0 = System.nanoTime()
    val result = block
    val t1 = System.nanoTime()
    logger.info("Elapsed time: " + (t1 - t0) / 1000.0 / 1000.0 / 1000.0 + "s")
    result
  }

  def ensurePath(basePath: String, paths: String*): Boolean =
    new File(joinPath(basePath, paths: _*)).mkdirs()

  def joinPath(basePath: String, paths: String*): String =
    Paths.get(basePath, paths: _*).toAbsolutePath.toString
} 
Example 38
Source File: RedisBenchmarks.scala    From spark-redis   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.redislabs.provider.redis

import java.io.{File, FileWriter, PrintWriter}
import java.time.{Duration => JDuration}

import com.redislabs.provider.redis.util.Logging


trait RedisBenchmarks extends Logging {

  val benchmarkReportDir = new File("target/reports/benchmarks/")
  benchmarkReportDir.mkdirs()

  def time[R](tag: String)(block: => R): R = {
    val t0 = System.nanoTime()
    val result = block // call-by-name
    val t1 = System.nanoTime()
    new PrintWriter(new FileWriter(s"$benchmarkReportDir/results.txt", true)) {
      // scalastyle:off
      this.println(s"$tag, ${JDuration.ofNanos(t1 - t0)}")
      close()
    }
    result
  }
} 
Example 39
Source File: HydraJsonSupport.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.marshallers

import java.io.{PrintWriter, StringWriter}
import java.util.UUID

import akka.actor.ActorPath
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model.StatusCode
import hydra.common.util.Resource._
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import spray.json.{JsString, _}

import scala.util.{Failure, Success, Try}


  implicit def tryWriter[R: JsonWriter]: RootJsonWriter[Try[R]] =
    new RootJsonWriter[Try[R]] {

      override def write(responseTry: Try[R]): JsValue = {
        responseTry match {
          case Success(r) => JsObject("success" -> r.toJson)
          case Failure(t) => JsObject("failure" -> t.toJson)
        }
      }
    }

  implicit object StreamTypeFormat extends RootJsonFormat[StreamType] {

    def read(json: JsValue): StreamType = json match {
      case JsString("Notification") => Notification
      case JsString("History")      => History
      case JsString("CurrentState") => CurrentState
      case JsString("Telemetry")    => Telemetry
      case _ => {
        import scala.reflect.runtime.{universe => ru}
        val tpe = ru.typeOf[StreamType]
        val clazz = tpe.typeSymbol.asClass
        throw new DeserializationException(
          s"expected a streamType of ${clazz.knownDirectSubclasses}, but got $json"
        )
      }
    }

    def write(obj: StreamType): JsValue = {
      JsString(obj.toString)
    }
  }

  implicit val genericErrorFormat = jsonFormat2(GenericError)

  implicit val topicCreationMetadataFormat = jsonFormat8(TopicMetadataRequest)

  implicit val genericSchemaFormat = jsonFormat2(GenericSchema)

}

case class GenericError(status: Int, errorMessage: String)

case class TopicMetadataRequest(
    schema: JsObject,
    streamType: StreamType,
    derived: Boolean,
    deprecated: Option[Boolean],
    dataClassification: String,
    contact: String,
    additionalDocumentation: Option[String],
    notes: Option[String]
)

case class GenericSchema(name: String, namespace: String) {
  def subject = s"$namespace.$name"
}

sealed trait StreamType
case object Notification extends StreamType
case object CurrentState extends StreamType
case object History extends StreamType
case object Telemetry extends StreamType 
Example 40
Source File: Download.scala    From URI-Potigol   with MIT License 5 votes vote down vote up
import scala.sys.process._

case class Problem(number: Int, name: String, category: String) {
  override def toString = s"${number}\t${name}\t${category}"
}

object Download extends App {
  def get(page: Int = 1) = {
    val ur = Seq("curl", "-sb", "-H", s"https://www.urionlinejudge.com.br/judge/pt/problems/all?page=${page}&limit=100").!!.split("\n")
    val a = ur.map(_.trim).dropWhile(_ != "<tbody>").drop(1).takeWhile(_ != "</tbody>").filter(_ != "").grouped(13)
    val c = for (b <- a if b(1).startsWith("<td class")) yield {
      Problem(
        b(2).drop(33).take(4).toInt,
        b(6).drop(39).takeWhile(_ != '<').trim,
        b(8).drop(37).takeWhile(_ != '<').trim)
    }
    c
  }
  val all = for (i <- 1 to 22) yield {
    println(23-i)
    get(i)
  }
  import java.io.PrintWriter
  new PrintWriter("problems.txt") { write(all.reduce(_ ++ _).mkString("\n")); close }
} 
Example 41
Source File: KMeans.scala    From spark-tda   with Apache License 2.0 5 votes vote down vote up
import java.io.{File, PrintWriter}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.sql.functions._

def computeKMeans(
  pathToTextFile: String,
  quantity: Int,
  iteration: Int) {
  case class Point(x: Double, y: Double)

  def save(f: File)(func: PrintWriter => Unit) {
    val p = new PrintWriter(f)
    try {
      func(p)
    } finally {
      p.close()
    }
  }

  val filename = pathToTextFile.split("\\.")(0)

  val outputFilename = s"$filename-KMEANS-k${quantity}-i${iteration}.tsv"

  val points = sc
    .textFile(pathToTextFile)
    .map {
      line => line.trim.split("\\s+")
    }
    .map {
      row => Point(row(0).toDouble, row(1).toDouble)
    }

  val features = points
    .map {
      p => Vectors.dense(p.x, p.y)
    }

  features.cache()

  val kmeans = KMeans.train(features, quantity, iteration)

  val predictions = features
    .map {
      f => (f(0), f(1), model.predict(f) + 1)
    }
    .collect

  save(new File(outputFilename)) {
    println(s"OUTPUT TO: ${outputFilename}")
    f => predictions.foreach{
      case (x, y, ccid) => f.println(s"${x}\t${y}\t${ccid}")
    }
  }
} 
Example 42
Source File: ReebDiagram.scala    From spark-tda   with Apache License 2.0 5 votes vote down vote up
import java.io.{File, PrintWriter}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.feature.{ReebDiagram, VectorAssembler}
import org.apache.spark.sql.functions._

def computeReebDiagram(
  pathToTextFile: String,
  quantity: Int,
  linkThresholdRatio: Double,
  coreThresholdRatio: Double,
  topTreeRatio: Double) {

  def save(f: File)(func: PrintWriter => Unit) {
    val p = new PrintWriter(f)
    try {
      func(p)
    } finally {
      p.close()
    }
  }

  val filename = pathToTextFile.split("\\.")(0)

  val outputFilename = s"$filename-REEB-k${quantity}-l${linkThresholdRatio}-c${coreThresholdRatio}-i${topTreeRatio}.tsv"

  val points = sc.textFile(pathToTextFile)
    .map {
      line => line.trim.split("\\s+")
    }
    .zipWithIndex
    .map { case (row, i) =>
      (i, row(0).toDouble, row(1).toDouble, 0)
    }
    .toDF("id", "x", "y", "cover_id")

  val cardinality = points.count

  val assembler = new VectorAssembler()
    .setInputCols(Array("x", "y"))
    .setOutputCol("feature")

  val features = assembler
    .transform(points)

  val reeb = new ReebDiagram()
    .setK(quantity)
    .setLinkThresholdRatio(linkThresholdRatio)
    .setCoreThresholdRatio(coreThresholdRatio)
    .setTopTreeSize((topTreeRatio * cardinality).toInt)
    .setTopTreeLeafSize(quantity)
    .setIdCol("id")
    .setCoverCol("cover_id")
    .setFeaturesCol("feature")
    .setOutputCol("cluster_id")

  val transformed = reeb
    .fit(features)
    .transform(features)

  val clusters = Map(
    transformed
      .select("cluster_id")
      .rdd
      .map(row => row.getLong(0))
      .distinct
      .zipWithIndex
      .collect(): _*)

  val result = transformed
    .select("x", "y", "cluster_id")
    .rdd
    .map(row => (row.getDouble(0), row.getDouble(1), row.getLong(2)))
    .map { case (x, y, clusterId) => (x, y, clusters(clusterId) + 1)}
    .collect()

  save(new File(outputFilename)) {
    println(s"OUTPUT TO: ${outputFilename}")
    f => result.foreach{
      case (x, y, ccid) => f.println(s"${x}\t${y}\t${ccid}")
    }
  }
} 
Example 43
Source File: CssFileRenderer.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.css

import java.io.{File, PrintWriter}

import scalacss.internal.Renderer


class CssFileRenderer(dirPath: String, styles: Seq[CssBase], createMain: Boolean) {
  def render()(implicit renderer: Renderer[String]): Unit = {
    val dir = new File(dirPath)
    dir.mkdirs()

    val mainFile: Option[File] = if (createMain) Some(new File(s"${dir.getAbsolutePath}/main.css")) else None
    mainFile.foreach(_.createNewFile())

    val mainWriter = mainFile.map(new PrintWriter(_, "UTF-8"))

    styles.foreach { style =>
      val name = style.getClass.getName
      val f = new File(s"${dir.getAbsolutePath}/$name.css") {
        createNewFile()
      }
      new PrintWriter(f, "UTF-8") {
        write(style.render)
        flush()
        close()
      }

      mainWriter.foreach(_.append(s"""@import "$name.css";\n"""))
    }

    mainWriter.foreach { w =>
      w.flush()
      w.close()
    }
  }
} 
Example 44
Source File: PrintUtils.scala    From random-projections-at-berlinbuzzwords   with Apache License 2.0 5 votes vote down vote up
package com.stefansavev

import java.io.PrintWriter

import scala.io.{Codec, Source}

object PrintUtils {
  def columnVectorToFile(fileName: String, v: Array[Double]): Unit = {
    val writer = new PrintWriter(fileName)
    for (a <- v) {
      writer.println(a.toString)
    }
    writer.close()
  }

  def stringsToFile(fileName: String, v: Array[String]): Unit = {
    val writer = new PrintWriter(fileName)
    for (a <- v) {
      writer.println(a)
    }
    writer.close()
  }

  def withPrintWriter(fileName: String, body: PrintWriter => Unit): Unit = {
    val writer = new PrintWriter(fileName, "UTF-8")
    body(writer)
    writer.close()
  }

}

object FileReadUtils {
  def withLinesIterator[T](fileName: String)(body: Iterator[String] => T): T = {
    val source = Source.fromFile(fileName)(Codec.UTF8)
    val result = body(source.getLines())
    source.close()
    result
  }
} 
Example 45
Source File: gen4tests.scala    From tscfg   with Apache License 2.0 5 votes vote down vote up
package tscfg

import java.io.{File, FileWriter, PrintWriter}

import tscfg.generators.java.JavaGen
import tscfg.generators.{GenOpts, Generator}
import tscfg.generators.scala.ScalaGen

object gen4tests {
  def main(args: Array[String]): Unit = {
    val sourceDir = new File("src/main/tscfg/example")
    sourceDir.listFiles().filter {
      _.getName.endsWith(".spec.conf")
    } foreach generate
  }

  private def generate(confFile: File): Unit = {
    //println(s"gen4tests: confFile=$confFile")

    val bufSource = io.Source.fromFile(confFile)
    val source = bufSource.mkString
    bufSource.close

    val opts = {
      val linePat = """\s*//\s*GenOpts:(.*)""".r
      source.split("\n")
        .collect { case linePat(xs) => xs.trim }
        .flatMap(_.split("\\s+"))
    }
    if (opts.contains("--skip-gen4tests")) {
      println(s"gen4tests: skipping $confFile")
      return
    }

    val baseGenOpts: GenOpts = {
      var genOpts = GenOpts("tscfg.example", "?")
      opts foreach {
        case "--scala:2.12"      => genOpts = genOpts.copy(s12 = true)
        case "--scala:bt"        => genOpts = genOpts.copy(useBackticks = true)
        case "--java:getters"    => genOpts = genOpts.copy(genGetters = true)
        case "--java:optionals"  => genOpts = genOpts.copy(useOptionals = true)
        case "--durations"       => genOpts = genOpts.copy(useDurations = true)
        case "--all-required"    => genOpts = genOpts.copy(assumeAllRequired = true)

        // $COVERAGE-OFF$
        case opt => println(s"WARN: $confFile: unrecognized GenOpts argument: `$opt'")
        // $COVERAGE-ON$
      }
      genOpts
    }

    val buildResult = ModelBuilder(source, assumeAllRequired = baseGenOpts.assumeAllRequired)
    val objectType = buildResult.objectType

    val name = confFile.getName
    val (base, _) = name.span(_ != '.')
    val classNameSuffix = util.upperFirst(base.replace('-', '_')) + "Cfg"

    List("Scala", "Java") foreach { lang =>
      val targetScalaDir = new File("src/test/" + lang.toLowerCase + "/tscfg/example")
      targetScalaDir.mkdirs()

      val className = lang + classNameSuffix

      val fileName = className + "." + lang.toLowerCase
      val targetFile = new File(targetScalaDir, fileName)
      // $COVERAGE-OFF$
      if (true||confFile.lastModified >= targetFile.lastModified) {
        val genOpts = baseGenOpts.copy(className = className)
        //println(s"generating for $name -> $fileName")
        val generator: Generator = lang match {
          case "Scala" => new ScalaGen(genOpts)
          case "Java" =>  new JavaGen(genOpts)
        }

        val results = generator.generate(objectType)
        val out = new PrintWriter(new FileWriter(targetFile), true)
        out.println(results.code)
      }
      // $COVERAGE-ON$
    }
  }
} 
Example 46
Source File: StanProgram.scala    From ScalaStan   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.cibo.scalastan.ast

import java.io.PrintWriter

import com.cibo.scalastan.StanType
import com.cibo.scalastan.transform.StanTransform

case class StanProgram(
  data: Seq[StanDataDeclaration[_ <: StanType]] = Seq.empty,
  parameters: Seq[StanParameterDeclaration[_ <: StanType]] = Seq.empty,
  functions: Seq[StanFunctionDeclaration] = Seq.empty,
  transformedData: Seq[StanTransformedData] = Seq.empty,
  transformedParameters: Seq[StanTransformedParameter] = Seq.empty,
  generatedQuantities: Seq[StanGeneratedQuantity] = Seq.empty,
  model: StanStatement = StanBlock(Seq.empty)
) {

  def emit(writer: PrintWriter): Unit = {
    if (functions.nonEmpty) {
      writer.println("functions {")
      functions.foreach(f => f.emit(writer))
      writer.println("}")
    }
    if (data.nonEmpty) {
      writer.println("data {")
      data.foreach(d => writer.println(s"  ${d.emitDeclaration};"))
      writer.println("}")
    }
    if (transformedData.nonEmpty) {
      writer.println("transformed data {")
      transformedData.foreach(d => writer.println(s"  ${d.result.emitDeclaration};"))
      transformedData.foreach(_.emit(writer))
      writer.println("}")
    }
    if (parameters.nonEmpty) {
      writer.println("parameters {")
      parameters.foreach(d => writer.println(s"  ${d.emitDeclaration};"))
      writer.println("}")
    }
    if (transformedParameters.nonEmpty) {
      writer.println("transformed parameters {")
      transformedParameters.foreach(d => writer.println(s"  ${d.result.emitDeclaration};"))
      transformedParameters.foreach(_.emit(writer))
      writer.println("}")
    }
    writer.println("model {")
    model.emitDeclarations(writer, 1)
    model.emit(writer, 1)
    writer.println("}")
    if (generatedQuantities.nonEmpty) {
      writer.println("generated quantities {")
      generatedQuantities.foreach(d => writer.println(s"  ${d.result.emitDeclaration};"))
      generatedQuantities.foreach(_.emit(writer))
      writer.println("}")
    }
  }
}

object StanProgram {
  def getStatements(code: StanStatement): Seq[StanStatement] = {
    val inner = code match {
      case block: StanBlock      => block.children.flatMap(child => getStatements(child))
      case loop: StanLoop        => getStatements(loop.body)
      case cond: StanIfStatement =>
        cond.conds.flatMap(c => getStatements(c._2)) ++ cond.otherwise.map(getStatements).getOrElse(Vector.empty)
      case _                     => Vector.empty
    }
    code +: inner
  }

  def getStatements(program: StanProgram): Seq[StanStatement] = {
    getStatements(program.model) ++
      program.generatedQuantities.flatMap(q => getStatements(q.code)) ++
      program.transformedData.flatMap(d => getStatements(d.code)) ++
      program.transformedParameters.flatMap(p => getStatements(p.code))
  }
} 
Example 47
Source File: StanCodeBlock.scala    From ScalaStan   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.cibo.scalastan

import java.io.PrintWriter

import com.cibo.scalastan.ast._

trait StanCodeBlock extends Implicits {

  implicit val _context: StanContext
  implicit val _code: StanProgramBuilder = new StanProgramBuilder

  object stan extends StanFunctions with StanDistributions

  private def insertLocal[T <: StanType](
    typeConstructor: T,
    valueOpt: Option[StanValue[T]],
    name: sourcecode.Name
  ): StanLocalDeclaration[T] = {
    if (typeConstructor.lower.isDefined || typeConstructor.upper.isDefined) {
      throw new IllegalStateException("local variables may not have constraints")
    }

    val decl = StanLocalDeclaration[T](typeConstructor, _context.fixName(name.value))
    _code.insert(StanInlineDeclaration(decl, valueOpt))
    decl
  }

  def local[T <: StanType](typeConstructor: T)(implicit name: sourcecode.Name): StanLocalDeclaration[T] = {
    insertLocal(typeConstructor, None, name)
  }

  def local[T <: StanType](
    typeConstructor: T,
    value: StanValue[T]
  )(implicit name: sourcecode.Name): StanLocalDeclaration[T] = {
    insertLocal(typeConstructor, Some(value), name)
  }

  case class when(cond: StanValue[StanInt])(block: => Unit) {
    _code.enter()
    block
    _code.leave(code => ast.StanIfStatement(Seq((cond, StanBlock(code))), None))

    def when(cond: StanValue[StanInt])(otherBlock: => Unit): when = {
      _code.enter()
      otherBlock
      _code.handleElseIf(cond)
      this
    }

    def otherwise(otherBlock: => Unit): Unit = {
      _code.enter()
      otherBlock
      _code.handleElse()
    }
  }

  def when[T <: StanType](cond: StanValue[StanInt], ifTrue: StanValue[T], ifFalse: StanValue[T]): StanValue[T] = {
    StanTernaryOperator(cond, ifTrue, ifFalse)
  }

  def range(start: StanValue[StanInt], end: StanValue[StanInt]): StanValueRange = StanValueRange(start, end)

  def loop(cond: StanValue[StanInt])(body: => Unit): Unit = {
    _code.enter()
    body
    _code.leave(children => ast.StanWhileLoop(cond, StanBlock(children)))
  }

  def break: Unit = {
    _code.append(StanBreakStatement())
  }

  def continue: Unit = {
    _code.append(StanContinueStatement())
  }

  private[scalastan] def emitTopLevelLocals(writer: PrintWriter): Unit = {
    // Values have to be declared before code.  Since we treat transformations
    // differently, we need to make a special pass to combine the top-level locals.
    _code.results.children.foreach { child =>
      if (child.isInstanceOf[StanInlineDeclaration]) {
        child.emit(writer, 1)
      }
    }
  }

  private[scalastan] def emitCode(writer: PrintWriter): Unit = {
    _code.results.children.foreach { child =>
      if (!child.isInstanceOf[StanInlineDeclaration]) {
        child.emit(writer, 1)
      }
    }
  }
} 
Example 48
Source File: ScalaStanBaseSpec.scala    From ScalaStan   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.cibo.scalastan

import java.io.{PrintWriter, StringWriter}

import com.cibo.scalastan.ast.StanParameterDeclaration
import com.cibo.scalastan.run.{StanCompiler, StanRunner}
import org.scalatest.{FunSpec, Matchers}

trait ScalaStanBaseSpec extends FunSpec with Matchers {

  case class MockRunner(data: Vector[Map[String, Double]] = Vector.empty) extends StanRunner {

    private val results = scala.collection.mutable.ArrayBuffer[(String, Vector[Vector[String]])]()

    private def set(prefix: String, values: Any, mapping: Map[String, Double]): Map[String, Double] = {
      values match {
        case s: Seq[_] =>
          s.zipWithIndex.foldLeft(mapping) { case (m, (v, i)) =>
            set(s"$prefix.${i + 1}", v, m)
          }
        case _ => mapping + (prefix -> values.asInstanceOf[Double])
      }
    }


    def set[T <: StanType](decl: StanParameterDeclaration[T], values: Seq[T#SCALA_TYPE]): MockRunner = {
      require(data.isEmpty || data.length == values.length)
      val dataBefore = if (data.isEmpty) values.map(_ => Map[String, Double]("lp__" -> 1)) else data
      val prefix = decl.emit
      val newData = values.zip(dataBefore).map { case (v, d) => set(prefix, v, d) }
      copy(data = newData.toVector)
    }

    def run(model: CompiledModel, chains: Int, seed: Int, cache: Boolean, method: RunMethod.Method): StanResults = {
      val mappedData: Map[String, Vector[Vector[Double]]] = data.flatten.groupBy(_._1).mapValues { grouped =>
        val iterations = grouped.map { case (k, v) => v }
        Vector.fill(chains)(iterations)
      }
      StanResults(mappedData, Vector.empty, model, method)
    }
  }

  implicit object MockCompiler extends StanCompiler {
    def compile(model: StanModel): CompiledModel = CompiledModel(
      model = model,
      runner = MockRunner()
    )
  }

  private def removeSpaces(str: String): String = str.replaceAllLiterally(" ", "").replaceAllLiterally("\n", "")

  private def removeNumbers(str: String): String = str.replaceAll("[0-9]+", "#")

  private def compare(actual: String, template: String, originalTemplate: String = ""): Boolean = {
    val fixedActual = removeNumbers(removeSpaces(actual))
    val fixedExpected = removeNumbers(removeSpaces(template))
    fixedActual.contains(fixedExpected)
  }

  def check(actual: String, template: String): Unit = {
    withClue(s"actual:\n$actual\nexpected:\n$template\n") {
      compare(actual, template) shouldBe true
    }
  }

  // Compare the code output of the model with a template.
  // Spaces are ignored and "#" in the template matches any integer.
  def checkCode(model: StanModel, template: String): Unit = {
    val sw = new StringWriter()
    model.emit(new PrintWriter(sw))
    sw.close()
    check(sw.toString, template)
  }
} 
Example 49
Source File: FileDataStore.scala    From regressr   with Apache License 2.0 5 votes vote down vote up
package org.ebayopensource.regression.internal.datastore

import java.io.{File, PrintWriter}

import scala.io.Source
import scala.util.Try


class FileDataStore(path: String) extends BaseDataStore {

  new File(path).mkdir()

  override def put(key: String, value: String): Unit = {
    val pw = new PrintWriter(new File(buildFilePath(key)))
    pw.write(value)
    pw.flush()
    pw.close()
  }

  private def buildFilePath(key: String) = {
    if (key.endsWith(".strategy")) s"${path}${key}" else s"${path}${key}.json"
  }

  override def get(key: String): Option[String] = {
    val file = new File(buildFilePath(key))
    if (!file.exists()) {
      None
    }
    else {
      Some(Source.fromFile(file).mkString)
    }
  }

  override def close(): Unit = ??? // This is a no op.

  override def listStrategies(): Seq[String] = {
    new File(path).listFiles().filter {
      file => file.getName.endsWith(".strategy")
    }.map(file => file.getName.replaceFirst(s"${BaseDataStore.strategyPrefix}", "").replaceFirst(".strategy", ""))
  }

  override def remove(key: String): Unit = {
    new File(s"${path}${key}").delete()
  }

  override def deleteRecordingFiles(testIdentifier: String): Try[Unit] = Try {
    new File(path).listFiles().filter {
      file => file.getName.startsWith(s"${BaseDataStore.strategyPrefix}${testIdentifier}.")
    }.map(file => file.delete())
  }
}

object FileDataStore {
  val PATH="./tmp/"
} 
Example 50
Source File: ScannerSpec.scala    From GettingWorkDoneWithExtensibleEffects   with Apache License 2.0 5 votes vote down vote up
package scan

import java.io.PrintWriter
import java.nio.file._

import org.specs2._

import scala.collection.immutable.SortedSet

class ScannerSpec extends mutable.Specification {

  "Report Format" ! {
    val base = deletedOnExit(Files.createTempDirectory("exerciseClassic"))
    val base1 = deletedOnExit(fillFile(base, 1))
    val base2 = deletedOnExit(fillFile(base, 2))
    val subdir = deletedOnExit(Files.createTempDirectory(base, "subdir"))
    val sub1 = deletedOnExit(fillFile(subdir, 1))
    val sub3 = deletedOnExit(fillFile(subdir, 3))

    val actual = Scanner.pathScan(FilePath(base), 2)
    val expected = new PathScan(SortedSet(FileSize(sub3, 3), FileSize(base2, 2)), 7, 4)

    actual.mustEqual(expected)
  }

  def fillFile(dir: Path, size: Int) = {
    val path = dir.resolve(s"$size.txt")
    val w = new PrintWriter(path.toFile)
    try w.write("a" * size)
    finally w.close
    path
  }

  def deletedOnExit(p: Path) = {
    p.toFile.deleteOnExit()
    p
  }

} 
Example 51
Source File: ScannerSpec.scala    From GettingWorkDoneWithExtensibleEffects   with Apache License 2.0 5 votes vote down vote up
package scan

import java.io.PrintWriter
import java.nio.file._

import org.specs2._

import scala.collection.immutable.SortedSet

class ScannerSpec extends mutable.Specification {

  "Report Format" ! {
    val base = deletedOnExit(Files.createTempDirectory("exerciseClassic"))
    val base1 = deletedOnExit(fillFile(base, 1))
    val base2 = deletedOnExit(fillFile(base, 2))
    val subdir = deletedOnExit(Files.createTempDirectory(base, "subdir"))
    val sub1 = deletedOnExit(fillFile(subdir, 1))
    val sub3 = deletedOnExit(fillFile(subdir, 3))

    val actual = Scanner.pathScan(FilePath(base), 2)
    val expected = new PathScan(SortedSet(FileSize(sub3, 3), FileSize(base2, 2)), 7, 4)

    actual.mustEqual(expected)
  }

  def fillFile(dir: Path, size: Int) = {
    val path = dir.resolve(s"$size.txt")
    val w = new PrintWriter(path.toFile)
    try w.write("a" * size)
    finally w.close
    path
  }

  def deletedOnExit(p: Path) = {
    p.toFile.deleteOnExit()
    p
  }

} 
Example 52
Source File: ScannerSpec.scala    From GettingWorkDoneWithExtensibleEffects   with Apache License 2.0 5 votes vote down vote up
package scan

import java.io.PrintWriter
import java.nio.file._

import org.specs2._

import scala.collection.immutable.SortedSet

import scala.concurrent.duration._

import monix.execution.Scheduler.Implicits.global

class ScannerSpec extends mutable.Specification {

  "Report Format" ! {
    val base = deletedOnExit(Files.createTempDirectory("exerciseTask"))
    val base1 = deletedOnExit(fillFile(base, 1))
    val base2 = deletedOnExit(fillFile(base, 2))
    val subdir = deletedOnExit(Files.createTempDirectory(base, "subdir"))
    val sub1 = deletedOnExit(fillFile(subdir, 1))
    val sub3 = deletedOnExit(fillFile(subdir, 3))

    val actual = Scanner.pathScan(FilePath(base), 2).runSyncUnsafe(3.seconds)
    val expected = new PathScan(SortedSet(FileSize(sub3, 3), FileSize(base2, 2)), 7, 4)

    actual.mustEqual(expected)
  }

  def fillFile(dir: Path, size: Int) = {
    val path = dir.resolve(s"$size.txt")
    val w = new PrintWriter(path.toFile)
    try w.write("a" * size)
    finally w.close
    path
  }

  def deletedOnExit(p: Path) = {
    p.toFile.deleteOnExit()
    p
  }

} 
Example 53
Source File: ScannerSpec.scala    From GettingWorkDoneWithExtensibleEffects   with Apache License 2.0 5 votes vote down vote up
package scan

import java.io.PrintWriter
import java.nio.file._

import org.specs2._

import scala.collection.immutable.SortedSet

import cats.effect._

import org.atnos.eff.addon.cats.effect.IOEffect._
import org.atnos.eff.syntax.addon.cats.effect._

class ScannerSpec extends mutable.Specification {

  "Report Format" ! {
    val base = deletedOnExit(Files.createTempDirectory("exercise1"))
    val base1 = deletedOnExit(fillFile(base, 1))
    val base2 = deletedOnExit(fillFile(base, 2))
    val subdir = deletedOnExit(Files.createTempDirectory(base, "subdir"))
    val sub1 = deletedOnExit(fillFile(subdir, 1))
    val sub3 = deletedOnExit(fillFile(subdir, 3))

    val scanProgram = Scanner.pathScan(base, 2)
    val actual = scanProgram.unsafeRunSync
    val expected = new PathScan(SortedSet(FileSize(sub3, 3), FileSize(base2, 2)), 7, 4)

    actual.mustEqual(expected)
  }

  def fillFile(dir: Path, size: Int) = {
    val path = dir.resolve(s"$size.txt")
    val w = new PrintWriter(path.toFile)
    try w.write("a" * size)
    finally w.close
    path
  }

  def deletedOnExit(p: Path) = {
    p.toFile.deleteOnExit()
    p
  }

} 
Example 54
Source File: ScannerSpec.scala    From GettingWorkDoneWithExtensibleEffects   with Apache License 2.0 5 votes vote down vote up
package scan

import java.io.PrintWriter
import java.nio.file._

import org.specs2._

import scala.collection.immutable.SortedSet

import scala.concurrent.duration._

import monix.execution.Scheduler.Implicits.global

class ScannerSpec extends mutable.Specification {

  "Report Format" ! {
    val base = deletedOnExit(Files.createTempDirectory("exerciseTask"))
    val base1 = deletedOnExit(fillFile(base, 1))
    val base2 = deletedOnExit(fillFile(base, 2))
    val subdir = deletedOnExit(Files.createTempDirectory(base, "subdir"))
    val sub1 = deletedOnExit(fillFile(subdir, 1))
    val sub3 = deletedOnExit(fillFile(subdir, 3))

    val actual = Scanner.pathScan(FilePath(base), 2).runSyncUnsafe(3.seconds)
    val expected = new PathScan(SortedSet(FileSize(sub3, 3), FileSize(base2, 2)), 7, 4)

    actual.mustEqual(expected)
  }

  def fillFile(dir: Path, size: Int) = {
    val path = dir.resolve(s"$size.txt")
    val w = new PrintWriter(path.toFile)
    try w.write("a" * size)
    finally w.close
    path
  }

  def deletedOnExit(p: Path) = {
    p.toFile.deleteOnExit()
    p
  }

} 
Example 55
Source File: LogicalPlanPrinter.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.sql.model
import java.io.{PrintWriter, StringWriter}

import wvlet.log.LogSupport
import wvlet.airframe.sql.model.LogicalPlan.EmptyRelation


object LogicalPlanPrinter extends LogSupport {
  def print(m: LogicalPlan): String = {
    val s = new StringWriter()
    val p = new PrintWriter(s)
    print(m, p, 0)
    p.close()
    s.toString
  }

  def print(m: LogicalPlan, out: PrintWriter, level: Int): Unit = {
    m match {
      case EmptyRelation =>
      // print nothing
      case _ =>
        val ws = " " * level

        val inputAttr  = m.inputAttributes.mkString(", ")
        val outputAttr = m.outputAttributes.mkString(", ")
        val attr       = m.expressions.map(_.toString)
        val prefix     = s"${ws}[${m.modelName}](${inputAttr}) => (${outputAttr})"
        attr.length match {
          case 0 =>
            out.println(prefix)
          case _ =>
            out.println(s"${prefix}")
            val attrWs  = " " * (level + 1)
            val attrStr = attr.map(x => s"${attrWs}- ${x}").mkString("\n")
            out.println(attrStr)
        }
        for (c <- m.children) {
          print(c, out, level + 1)
        }
    }
  }
} 
Example 56
Source File: Handler.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.log

import java.io.{PrintStream, PrintWriter}
import java.util.{logging => jl}


class BufferedLogHandler(formatter: LogFormatter) extends jl.Handler {
  private val buf = Seq.newBuilder[String]

  override def flush(): Unit = {}
  override def publish(record: jl.LogRecord): Unit =
    synchronized {
      buf += formatter.format(record)
    }
  override def close(): Unit = {
    // do nothing
  }

  def logs: Seq[String] = buf.result()

  def clear: Unit = {
    buf.clear()
  }
} 
Example 57
Source File: OrderedDocFreq.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import java.io.{File, PrintWriter}

import scala.io.Source

import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper



case class OrderedDocFreq(docs: Int, tokens: IndexedSeq[String], df: collection.Map[String, Int]) {
  def saveToJson(filename: String): Unit = {
    val mapper = new ObjectMapper() with ScalaObjectMapper
    mapper.registerModule(DefaultScalaModule)
    val out = new PrintWriter(filename)
    mapper.writeValue(out, Map(
      "docs" -> docs,
      "tokens" -> tokens,
      "df" -> df
    ))
    out.close()
  }
}

object OrderedDocFreq {
  def fromJson(file: File): OrderedDocFreq = {
    val docFreqMap = parseFile[Map[String, Any]](file)
    val docs = docFreqMap.get("docs") match {
      case Some(v) => v.asInstanceOf[Int]
      case None => throw new RuntimeException(s"Can not parse key 'docs' in docFreq:${file.getAbsolutePath}")
    }
    val df = docFreqMap.get("df") match {
      case Some(v) => v.asInstanceOf[Map[String, Int]]
      case None => throw new RuntimeException(s"Can not parse key 'df' in docFreq:${file.getAbsolutePath}")
    }
    val tokens = docFreqMap.get("tokens") match {
      case Some(v) => v.asInstanceOf[List[String]].toArray
      case None => throw new RuntimeException(s"Can not parse key 'tokens' in docFreq:${file.getAbsolutePath}")
    }
    OrderedDocFreq(docs, tokens, df)
  }

  def parseFile[T: Manifest](file: File): T = {
    val json = Source.fromFile(file)
    val mapper = new ObjectMapper with ScalaObjectMapper
    mapper.registerModule(DefaultScalaModule)
    mapper.readValue[T](json.reader)
  }
} 
Example 58
Source File: package.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot

import java.io.{File, PrintWriter, StringWriter}

import cats.effect.IO
import cats.syntax.either._
import io.circe._
import org.http4s.circe.{jsonEncoderWithPrinterOf, jsonOf}
import io.circe.parser._
import org.http4s.{EntityDecoder, EntityEncoder}

import scalaz.\/
import scalaz.syntax.std.either._

package object temperature {

  type Degrees = Int

  // NB not implicit as this seems to clash with http4s implicits that are kicking around
  def jsonDecoder[A: Decoder]: EntityDecoder[IO, A] = jsonOf[IO, A]
  def jsonEncoder[A: Encoder]: EntityEncoder[IO, A] = jsonEncoderWithPrinterOf(spaces2PlatformSpecific)

  def encode[A: Encoder](a: A): Json = Encoder[A].apply(a)

  // deprecated
  def decodeAsDisjunction[A: Decoder](value: String): temperature.Error \/ A = {
    decode(value)
      .leftMap(error => ParseError(error.getMessage))
      .disjunction
  }

  def stackTraceOf(error: Throwable): String = {
    val writer = new StringWriter()
    error.printStackTrace(new PrintWriter(writer))
    writer.toString
  }

  private val eol = sys.props("line.separator")
  val spaces2PlatformSpecific = Printer(
    preserveOrder = true
    , dropNullValues = false
    , indent = "  "
    , lbraceLeft = ""
    , lbraceRight = eol
    , rbraceLeft = eol
    , rbraceRight = ""
    , lbracketLeft = ""
    , lbracketRight = eol
    , rbracketLeft = eol
    , rbracketRight = ""
    , lrbracketsEmpty = ""
    , arrayCommaLeft = ""
    , arrayCommaRight = eol
    , objectCommaLeft = ""
    , objectCommaRight = eol
    , colonLeft = " "
    , colonRight = " "
  )

  implicit class JsonOps(json: Json) {
    
    def spaces2ps: String = spaces2PlatformSpecific.pretty(json)
  }

  implicit class FileOps(file: File) {
    def /(child: String): File = new File(file, child)
  }

} 
Example 59
Source File: FileUtils.scala    From Scala-for-Machine-Learning-Second-Edition   with MIT License 5 votes vote down vote up
package org.scalaml.util

import org.apache.log4j.Logger

import scala.io.Source._
import scala.util.{Failure, Success, Try}


  def write(content: String, pathName: String, className: String): Boolean = {
    import java.io.PrintWriter

    import DisplayUtils._

    var printWriter: Option[PrintWriter] = None
    var status = false
    Try {
      printWriter = Some(new PrintWriter(pathName))
      printWriter.foreach(_.write(content))
      printWriter.foreach(_.flush)
      printWriter.foreach(_.close)
      status = true
    } match {
      // Catch and display exception description and return false
      case Failure(e) =>
        error(s"$className.write failed for $pathName", logger, e)
        if (printWriter.isDefined) printWriter.foreach(_.close)
        status
      case Success(s) => status
    }
  }
}

// ---------------------------------  EOF ------------------------------------- 
Example 60
Source File: Reprocessor.scala    From nn_coref   with GNU General Public License v3.0 5 votes vote down vote up
package edu.berkeley.nlp.coref.preprocess

import edu.berkeley.nlp.PCFGLA.CoarseToFineMaxRuleParser
import edu.berkeley.nlp.coref.ConllDoc
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import java.io.PrintWriter
import edu.berkeley.nlp.coref.ConllDocReader
import edu.berkeley.nlp.syntax.Tree
import edu.berkeley.nlp.futile.util.Logger
import java.util.Arrays
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import edu.berkeley.nlp.coref.Chunk
import edu.berkeley.nlp.coref.ConllDocWriter

object Reprocessor {

  def redoConllDocument(parser: CoarseToFineMaxRuleParser, backoffParser: CoarseToFineMaxRuleParser, nerSystem: NerSystem, docReader: ConllDocReader, inputPath: String, outputPath: String) {
    val writer = IOUtils.openOutHard(outputPath);
    val docs = docReader.readConllDocs(inputPath);
    for (doc <- docs) {
      Logger.logss("Reprocessing: " + doc.docID + " part " + doc.docPartNo);
      val newPos = new ArrayBuffer[Seq[String]]();
      val newParses = new ArrayBuffer[edu.berkeley.nlp.futile.syntax.Tree[String]]();
      val newNerChunks = new ArrayBuffer[Seq[Chunk[String]]]();
      for (sentIdx <- 0 until doc.words.size) {
        if (sentIdx % 10 == 0) {
          Logger.logss("Sentence " + sentIdx);
        }
        val sent = doc.words(sentIdx);
        var parse = PreprocessingDriver.parse(parser, backoffParser, sent.asJava);
        parse = if (parse.getYield().size() != sent.length) {
          Logger.logss("Couldn't parse sentence: " + sent.toSeq);
          Logger.logss("Using default parse");
          convertFromFutileTree(doc.trees(sentIdx).constTree);
        } else {
          parse;
        }
        val posTags = parse.getPreTerminalYield().asScala.toArray;
        newPos += posTags;
        newParses += convertToFutileTree(parse);
        val nerBioLabels = nerSystem.runNerSystem(sent.toArray, posTags);
        newNerChunks += convertBioToChunks(nerBioLabels);
      }
      ConllDocWriter.writeIncompleteConllDoc(writer, doc.docID, doc.docPartNo, doc.words, newPos, newParses, doc.speakers, newNerChunks, doc.corefChunks);
    }
    writer.close();
  }
  
  def convertBioToChunks(nerBioLabels: Seq[String]): Seq[Chunk[String]] = {
    var lastNerStart = -1;
    val chunks = new ArrayBuffer[Chunk[String]]();
    for (i <- 0 until nerBioLabels.size) {
      if (nerBioLabels(i).startsWith("B")) {
        if (lastNerStart != -1) {
          chunks += new Chunk[String](lastNerStart, i, "MISC");
        }
        lastNerStart = i;
      } else if (nerBioLabels(i).startsWith("O")) {
        if (lastNerStart != -1) {
          chunks += new Chunk[String](lastNerStart, i, "MISC");
          lastNerStart = -1;
        }
      }
    }
    chunks;
  }
  
  def convertToFutileTree(slavTree: edu.berkeley.nlp.syntax.Tree[String]): edu.berkeley.nlp.futile.syntax.Tree[String] = {
    new edu.berkeley.nlp.futile.syntax.Tree[String](slavTree.getLabel(), slavTree.getChildren().asScala.map(convertToFutileTree(_)).asJava);
  }
  
  def convertFromFutileTree(myTree: edu.berkeley.nlp.futile.syntax.Tree[String]): edu.berkeley.nlp.syntax.Tree[String] = {
    new edu.berkeley.nlp.syntax.Tree[String](myTree.getLabel(), myTree.getChildren().asScala.map(convertFromFutileTree(_)).asJava);
  }
} 
Example 61
Source File: MessageWriter.scala    From lsp4s   with Apache License 2.0 5 votes vote down vote up
package scala.meta.jsonrpc

import java.io.ByteArrayOutputStream
import java.io.OutputStream
import java.io.OutputStreamWriter
import java.io.PrintWriter
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import scala.concurrent.Future
import io.circe.syntax._
import monix.execution.Ack
import monix.reactive.Observer
import scribe.LoggerSupport


  def write(msg: Message): Future[Ack] = lock.synchronized {
    baos.reset()
    val json = msg.asJson
    val protocol = BaseProtocolMessage.fromJson(json)
    logger.trace(s" --> $json")
    val byteBuffer = MessageWriter.write(protocol, baos, headerOut)
    out.onNext(byteBuffer)
  }
}

object MessageWriter {

  def headerWriter(out: OutputStream): PrintWriter = {
    new PrintWriter(new OutputStreamWriter(out, StandardCharsets.US_ASCII))
  }

  def write(message: BaseProtocolMessage): ByteBuffer = {
    val out = new ByteArrayOutputStream()
    val header = headerWriter(out)
    write(message, out, header)
  }

  def write(
      message: BaseProtocolMessage,
      out: ByteArrayOutputStream,
      headerOut: PrintWriter
  ): ByteBuffer = {
    message.header.foreach {
      case (key, value) =>
        headerOut.write(key)
        headerOut.write(": ")
        headerOut.write(value)
        headerOut.write("\r\n")
    }
    headerOut.write("\r\n")
    out.write(message.content)
    out.flush()
    val buffer = ByteBuffer.wrap(out.toByteArray, 0, out.size())
    buffer
  }
} 
Example 62
Source File: LoadDataHarvesterSuite.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas.sql

import java.io.{FileOutputStream, PrintWriter}
import java.nio.file.Files

import scala.util.Random
import org.apache.spark.sql.execution.LeafExecNode
import org.apache.spark.sql.execution.command.{ExecutedCommandExec, LoadDataCommand}
import com.hortonworks.spark.atlas.types.external
import com.hortonworks.spark.atlas._
import com.hortonworks.spark.atlas.sql.testhelper.BaseHarvesterSuite
import org.apache.spark.sql.SparkSession

abstract class BaseLoadDataHarvesterSuite
  extends BaseHarvesterSuite {

  protected val sourceTblName = "source_" + Random.nextInt(100000)

  protected override def initializeTestEnvironment(): Unit = {
    prepareDatabase()

    _spark.sql(s"CREATE TABLE $sourceTblName (name string)")
  }

  override protected def cleanupTestEnvironment(): Unit = {
    cleanupDatabase()
  }

  test("LOAD DATA [LOCAL] INPATH path source") {
    val file = Files.createTempFile("input", ".txt").toFile
    val out = new PrintWriter(new FileOutputStream(file))
    out.write("a\nb\nc\nd\n")
    out.close()

    val qe = _spark.sql(s"LOAD DATA LOCAL INPATH '${file.getAbsolutePath}' " +
      s"OVERWRITE INTO  TABLE $sourceTblName").queryExecution
    val qd = QueryDetail(qe, 0L)
    val node = qe.sparkPlan.collect { case p: LeafExecNode => p }
    assert(node.size == 1)
    val execNode = node.head.asInstanceOf[ExecutedCommandExec]

    val entities = CommandsHarvester.LoadDataHarvester.harvest(
      execNode.cmd.asInstanceOf[LoadDataCommand], qd)
    validateProcessEntity(entities.head, _ => {}, inputs => {
      inputs.size should be (1)
      val inputEntity = inputs.head.asInstanceOf[SACAtlasEntityWithDependencies].entity
      inputEntity.getTypeName should be (external.FS_PATH_TYPE_STRING)
      inputEntity.getAttribute("name") should be (file.getAbsolutePath.toLowerCase)
    }, outputs => {
      outputs.size should be (1)
      assertTable(outputs.head, _dbName, sourceTblName, _clusterName, _useSparkTable)
    })
  }
}

class LoadDataHarvesterSuite
  extends BaseLoadDataHarvesterSuite
  with WithHiveSupport {

  override def beforeAll(): Unit = {
    super.beforeAll()
    initializeTestEnvironment()
  }

  override def afterAll(): Unit = {
    cleanupTestEnvironment()
    super.afterAll()
  }

  override def getSparkSession: SparkSession = sparkSession

  override def getDbName: String = "sac"

  override def expectSparkTableModels: Boolean = true
}

class LoadDataHarvesterWithRemoteHMSSuite
  extends BaseLoadDataHarvesterSuite
  with WithRemoteHiveMetastoreServiceSupport {

  override def beforeAll(): Unit = {
    super.beforeAll()
    initializeTestEnvironment()
  }

  override def afterAll(): Unit = {
    cleanupTestEnvironment()
    super.afterAll()
  }

  override def getSparkSession: SparkSession = sparkSession

  override def expectSparkTableModels: Boolean = false

  override def getDbName: String = dbName
} 
Example 63
Source File: GatherActor.scala    From typebus   with MIT License 5 votes vote down vote up
package io.surfkit.typebus.actors

import java.io.{PrintWriter, StringWriter}
import java.time.Instant
import java.util.UUID

import akka.actor._
import akka.cluster.Cluster
import akka.util.Timeout
import io.surfkit.typebus.bus.Publisher
import io.surfkit.typebus.{AvroByteStreams, ByteStreamReader, ByteStreamWriter}
import io.surfkit.typebus.event._

import scala.reflect.ClassTag
import scala.util.Try

object GatherActor{
  //def props[T, U](producer: Publisher[T], timeout: Timeout, writer: ByteStreamWriter[T], reader: ByteStreamReader[U]): Props = Props(classOf[GatherActor[T, U]], producer, timeout, writer)

  
class GatherActor[T : ClassTag, U : ClassTag](serviceIdentifier: ServiceIdentifier, producer: Publisher, timeout: Timeout, writer: ByteStreamWriter[T], reader: ByteStreamReader[U]) extends Actor with ActorLogging with AvroByteStreams{
  implicit val system = context.system
  import system.dispatcher
  
  val cluster = Cluster(context.system)
  val correlationId = UUID.randomUUID().toString

  log.debug(s"adding http actor ${self.path.toStringWithoutAddress}")
  def clusterPath = s"${cluster.selfAddress}${self.path.toStringWithoutAddress}"
  var replyTo:ActorRef = null

  val cancel = context.system.scheduler.scheduleOnce(timeout.duration){
    log.warning("GatherActor timeout ... shutting down!")
    context.stop(self)
  }

  def receive = {
    case msg: GatherActor.Request[T] =>
      replyTo = context.sender()
      val meta =  EventMeta(
        eventId = UUID.randomUUID().toString,
        eventType = EventType.parse(msg.data.getClass.getCanonicalName),
        directReply = Some(RpcClient(clusterPath, serviceIdentifier)),
        correlationId = Some(correlationId)
      )
      try {
        log.info(s"[GatherActor] publish ${msg.data}")
        val outEvent = PublishedEvent(
          meta = meta,
          payload = writer.write(msg.data)
        )
        producer.publish(outEvent)
        producer.traceEvent(OutEventTrace(producer.serviceIdentifier, outEvent), outEvent.meta)
      }catch{
        case t:Exception =>
          producer.produceErrorReport(t,meta)
          cancel.cancel()
          context.stop(self)
      }

    case x:PublishedEvent =>
      log.info(s"GatherActor got reply.... ${x.meta.eventType}")
      try{
        log.info(s"GatherActor try to deserialize reader: ${reader} for type: ${x.meta.eventType}")
        val responsePayload = Try(reader.read(x.payload)).toOption.getOrElse( ServiceExceptionReader.read(x.payload) )
        replyTo ! responsePayload
        producer.traceEvent(InEventTrace(producer.serviceIdentifier, x), x.meta)
      }catch{
        case t: Throwable =>
          t.printStackTrace()
          val tType = scala.reflect.classTag[T].runtimeClass.getCanonicalName
          log.error(s"Gather actor failed to reply for response: ${tType}", t)
          replyTo ! producer.produceErrorReport(t, x.meta)
      }finally {
        cancel.cancel()
        context.stop(self)
      }

    case x =>
      log.warning(s"GatherActor got Wrong message type ${x.getClass.getSimpleName}")
      cancel.cancel()
      context.stop(self)

  }

  override def postStop() {
    log.debug(s"GatherActor ACTOR STOP !!! ${self.path.toStringWithoutAddress}")
  }

} 
Example 64
Source File: Bench.scala    From scalapy   with MIT License 5 votes vote down vote up
package me.shadaj.scalapy.py

import java.io.PrintWriter

object Bench {
  def test(name: String, batches: Int, batchSize: Int)(v: () => Unit): Unit = {
    val times = new Array[Long](batches)

    var i = 0
    while (i < batches) {
      val start = System.nanoTime()
      var j = 0
      while (j < batchSize) {
        v.apply()
        j += 1
      }

      times(i) = System.nanoTime() - start
      i += 1
    }

    val individualAverageTimes = times.map(_.toDouble / batchSize)
    val averageTime = individualAverageTimes.sum / batches.toDouble
    val standardDeviation = math.sqrt(
      individualAverageTimes.map(t => math.pow(t - averageTime, 2)).sum / batches.toDouble
    )

    println(s"$name: mean $averageTime ns, std dev $standardDeviation ns")

    val writer = new PrintWriter(s"$name-${if (Platform.isNative) "native" else "jvm"}.bench.txt")
    individualAverageTimes.foreach(writer.println)
    writer.close()
  }
} 
Example 65
Source File: GeneralFunctions.scala    From spark-bench   with Apache License 2.0 5 votes vote down vote up
package com.ibm.sparktc.sparkbench.utils

import java.io.StringWriter

import scala.util.{Failure, Random, Success, Try}

object GeneralFunctions {

  val any2Long: (Any) => Long = {
    case x: Number => x.longValue
    case x => x.toString.toLong
  }

  private def defaultFn[A](any: Any): A = any.asInstanceOf[A]

  private[utils] def tryWithDefault[A](a: Any, default: A, func: Any => A): A = Try(func(a)) match {
    case Success(b) => b
    case Failure(_) => default
  }

  def getOrDefaultOpt[A](opt: Option[Any], default: A, func: Any => A = defaultFn[A](_: Any)): A = {
    opt.map(tryWithDefault(_, default, func)).getOrElse(default)
  }

  def getOrDefault[A](map: Map[String, Any], name: String, default: A, func: Any => A = defaultFn[A](_: Any)): A = {
    getOrDefaultOpt(map.get(name), default, func)
  }

  def time[R](block: => R): (Long, R) = {
    val t0 = System.nanoTime()
    val result = block    // call-by-name
    val t1 = System.nanoTime()
    (t1 - t0, result)
  }

  // https://gist.github.com/lauris/7dc94fb29804449b1836
  def ccToMap(cc: AnyRef): Map[String, Any] =
    (Map[String, Any]() /: cc.getClass.getDeclaredFields) {
      (a, f) =>
        f.setAccessible(true)
        a + (f.getName -> f.get(cc))
    }

  def verifyOrThrow[A](
                        m: Map[String, Any],
                        key: String,
                        whatItShouldBe: A,
                        errorMessage: String): A = m.get(key) match {
    case None => throw SparkBenchException(s"Required key not found: $key")
    case Some(`whatItShouldBe`) => whatItShouldBe.asInstanceOf[A]
    case _ => throw SparkBenchException(errorMessage)
  }

  def getOrThrow[A](opt: Option[A], msg: String = "Empty option"): A = opt match {
    case Some(x) => x
    case _ => throw SparkBenchException(s"Error: $msg")
  }

  def getOrThrow(m: Map[String, Any], key: String): Any = getOrThrow(m.get(key))
  def getOrThrowT[T](m: Map[String, Any], key: String): T = getOrThrow(m.get(key)).asInstanceOf[T]

  def optionallyGet[A](m: Map[String, Any], key: String): Option[A] = m.get(key).map { any =>
    any.asInstanceOf[A]
  }

  def stringifyStackTrace(e: Throwable): String = {
    import java.io.PrintWriter
    val sw = new StringWriter()
    val pw = new PrintWriter(sw)
    e.printStackTrace(pw)
    sw.toString
  }

  def randomLong(max: Long): Long = {
    val start = 0L
    (start + (Random.nextDouble() * (max - start) + start)).toLong
  }

} 
Example 66
Source File: PageViewGenerator.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream

import java.io.PrintWriter
import java.net.ServerSocket
import java.util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/" -> .7,
                  "http://foo.com/news" -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01): _*)

  def pickFromDistribution[T](inputMap: Map[T, Double]): T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent(): String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println("Listening on port: " + port)

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from: " + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
}
// scalastyle:on println 
Example 67
Source File: PythonBroadcastSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import java.io.{File, PrintWriter}

import scala.io.Source

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 68
Source File: TnCmd.scala    From TopNotch   with Apache License 2.0 5 votes vote down vote up
package com.bfm.topnotch.tnengine

import java.io.{PrintWriter, StringWriter}

import org.json4s._
import org.json4s.native.Serialization
import org.json4s.native.Serialization.writePretty

/**
 * A command for TnEngine to run
 */
abstract class TnCmd {
  
  val outputKey: String
  /** Whether to cache the resulting dataframe in memory. This should be a boolean defaulting to false,
    * but json4s has a problem with default values other than None for option. Change it to a default value if json4s
    * solves the bug. */
  val cache: Option[Boolean]
  
  val outputPath: Option[String]
  /** If writing the output in hdfs, the name of the table to mount, otherwise none. Note: this will be ignored if
    * outputPath is not specified. */
  val tableName: Option[String]

  implicit val formats = Serialization.formats(NoTypeHints)
  /**
    * Overriding toString to making output of unit tests that have cmds in error logs easier to understand
    */
  override def toString = writePretty(this)
}

/**
 * The input to a command
 * @param ref The reference to the data set, either the path on hdfs or the name in the lookup table
 * @param onDisk Whether the input data set is stored on disk
 * @param delimiter The delimiter for plain text, delimited files. Leave to empty string for parquet.
 */
case class Input(ref: String, onDisk: Boolean, delimiter: Option[String] = None)

/**
 * The strings used for converting a config file into a TnCmd
 */
object TnCmdStrings {
  val ioNamespace = "io"
  val commandListStr = "commands"
  val writerStr = "writer"
  val commandStr = "command"
  val paramsStr = "params"
  val externalParamsStr = "externalParamsFile"
  val outputKeyStr = "outputKey"
  val writeToDiskStr = "writeToDisk"
  val outputPathStr = "outputPath"
}

/**
 * The class indicating that there was at least one error in the configuration for this command
 * @param cmdString The JSON string for the command.
 * @param errorStr The errors encountered in creating this command.
 * @param cmdIdx The index of the command in the plan that failed
 * @param outputKey This is meaningless in this class. This exists only so that TnErrorCmd can extend TnCmd.
 * @param writeToDisk This is meaningless in this class. This exists only so that TnErrorCmd can extend TnCmd.
 * @param outputPath This is meaningless in this class. This exists only so that TnErrorCmd can extend TnCmd.
 */
case class TnErrorCmd (
                            cmdString: String,
                            errorStr: String,
                            cmdIdx: Int,
                            outputKey: String = "",
                            cache: Option[Boolean] = None,
                            writeToDisk: Boolean = false,
                            outputPath: Option[String] = None,
                            tableName: Option[String] = None
                            ) extends TnCmd {
  override def toString: String = {
    s"There was an error with the command in position ${cmdIdx} in its plan. The command was: \n ${cmdString} \n " +
      s"The message was: \n ${errorStr} \n\n END OF ERROR MESSAGE FOR COMMAND IN POSITION ${cmdIdx} \n\n"
  }
}

object TnErrorCmd {
  /**
    * Helper method for easily getting the stack trace of an exception as a string
    * @param e The exception
    * @return The exception's stack trace
    */
  def getExceptionStackTrace(e: Exception): String = {
    val sw = new StringWriter
    e.printStackTrace(new PrintWriter(sw))
    sw.toString
  }
} 
Example 69
Source File: TemplateEngine.scala    From donut   with MIT License 5 votes vote down vote up
package report.donut.template

import java.io.{File, PrintWriter}

import com.gilt.handlebars.scala.Handlebars
import com.gilt.handlebars.scala.binding.dynamic._
import report.donut.gherkin.model.Report
import report.donut.log.Log
import report.donut._
import scala.util.Try

object TemplateEngine {
  def apply(report: Report, templatePath: String): Renderer = {
    val inputStream = getClass.getResourceAsStream(templatePath)
    val template = scala.io.Source.fromInputStream(inputStream).mkString
    val hbs: Handlebars[Any] = Handlebars(template)
    val rep = hbs(report)
    Renderer(rep)
  }
}

case class Renderer(boundTemplate: String) extends Log {
  def renderToHTML(outputPath: String, filePrefix: String): Either[String, Unit] = Try {
    val path =
      if (outputPath != "") {
        val outputDir = new File(outputPath)
        if (!outputDir.exists) outputDir.mkdirs
        outputDir.getAbsolutePath
      }

    val prefix = if (filePrefix != "") filePrefix + "-" else ""
    val out = new PrintWriter(outputPath + File.separator + prefix + "donut-report.html")
    out.write(boundTemplate.toString)
    out.close()
    log.info(s"Donuts created at: $path/${prefix}donut-report.html")
  }.toEither(_.getMessage)
}

object SpecialCharHandler {

  def escape(htmlReport: String) = {
    htmlReport
      .replace(">", "&gt;")
      .replace("<", "&lt;")
  }

} 
Example 70
Source File: FileUtils.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.wmexchanger.utils

import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.FilenameFilter
import java.io.ObjectInputStream
import java.io.ObjectOutputStream
import java.io.PrintWriter

import org.clulab.wm.wmexchanger.utils.Closer.AutoCloser

import scala.io.Source

object FileUtils {

  def appendingPrintWriterFromFile(file: File): PrintWriter = Sinker.printWriterFromFile(file, append = true)

  def appendingPrintWriterFromFile(path: String): PrintWriter = Sinker.printWriterFromFile(path, append = true)

  def printWriterFromFile(file: File): PrintWriter = Sinker.printWriterFromFile(file, append = false)

  def printWriterFromFile(path: String): PrintWriter = Sinker.printWriterFromFile(path, append = false)

  // Output
  def newBufferedOutputStream(file: File): BufferedOutputStream =
    new BufferedOutputStream(new FileOutputStream(file))

  def newBufferedOutputStream(filename: String): BufferedOutputStream =
    newBufferedOutputStream(new File(filename))

  def newAppendingBufferedOutputStream(file: File): BufferedOutputStream =
    new BufferedOutputStream(new FileOutputStream(file, true))

  def newAppendingBufferedOutputStream(filename: String): BufferedOutputStream =
    newAppendingBufferedOutputStream(new File(filename))

  def newObjectOutputStream(filename: String): ObjectOutputStream =
    new ObjectOutputStream(newBufferedOutputStream(filename))

  // Input
  def newBufferedInputStream(file: File): BufferedInputStream =
    new BufferedInputStream(new FileInputStream(file))

  def newBufferedInputStream(filename: String): BufferedInputStream =
    newBufferedInputStream(new File(filename))

  def newObjectInputStream(filename: String): ObjectInputStream =
    new ObjectInputStream(newBufferedInputStream(filename))

  def findFiles(collectionDir: String, extension: String): Seq[File] = {
    val dir = new File(collectionDir)
    val filter = new FilenameFilter {
      def accept(dir: File, name: String): Boolean = name.endsWith(extension)
    }

    val result = Option(dir.listFiles(filter))
        .getOrElse(throw Sourcer.newFileNotFoundException(collectionDir))
    result
  }

  protected def getTextFromSource(source: Source): String = source.mkString

  def getTextFromFile(file: File): String =
    Sourcer.sourceFromFile(file).autoClose { source =>
      getTextFromSource(source)
    }
} 
Example 71
Source File: ExtractFromFile.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.PrintWriter

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import org.clulab.odin.Mention
import org.clulab.wm.eidos.EidosSystem
import org.clulab.wm.eidos.utils.DisplayUtils.printMention
import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileUtils

object ExtractFromFile extends App {
  val inputDir = args(0)
  val outputFile = args(1)
  val files = FileUtils.findFiles(inputDir, "txt")
  println(s"There are ${files.length} files...")

  (FileUtils.printWriterFromFile(s"$outputFile")).autoClose { pw =>
    val ieSystem = new EidosSystem()

    for (filename <- files) {
      val text = FileUtils.getTextFromFile(filename)
      println(s"There are ${text.split('\n').length} lines in the file...")
      val annotatedDoc = ieSystem.extractFromText(text)
      val doc = annotatedDoc.document
      pw.println(s"Filename: ${filename.getName}")

      // keep the EidosMentions that are relevant to the CAG
      val cagEdgeMentions = annotatedDoc.odinMentions.filter(m => EidosSystem.CAG_EDGES.contains(m.label))
      val cagEdgeArguments = cagEdgeMentions.flatMap(mention => mention.arguments.values.flatten.toSeq)
      val eidosMentions = annotatedDoc.eidosMentions.filter(em => ieSystem.components.stopwordManager.isCAGRelevant(em.odinMention, cagEdgeMentions, cagEdgeArguments))

      val mentionsBySentence = eidosMentions.groupBy(_.odinMention.sentence).toSeq.sortBy(_._1)
      for ((sentence, sentenceMentions) <- mentionsBySentence) {
        pw.println(s"\nSENTENCE ${sentence}: ${doc.sentences(sentence).getSentenceText}")
        println(s"Number of Eidos mentions found: ${sentenceMentions.length}")
        sentenceMentions.foreach(
          m => {
            pw.println(s"CanonicalName: ${m.canonicalName}")
            pw.println(s"OntologyGrounding: \n\t${m.grounding.values.mkString("\n\t")}")
            printMention(m.odinMention, pw)

          }
        )
        pw.println(s"${"=" * 100}")
      }
    }
  }

  def prettyPrint(mentions:Seq[Mention], pw: PrintWriter): Unit = {
    val events = mentions.filter(_ matches "Event")
    val params = new mutable.HashMap[String, ListBuffer[(String, String, String)]]()
    for(e <- events) {
      val f = formal(e)
      if(f.isDefined) {
        val just = e.text
        val sent = e.sentenceObj.getSentenceText
        val quantifier = e.arguments.get("quantifier") match {
          case Some(quantifierMentions) => quantifierMentions.map(_.text).head
          case None => "None"
        }
        params.getOrElseUpdate(f.get, new ListBuffer[(String, String, String)]) += new Tuple3(just, sent, quantifier)
      }
    }

    if(params.nonEmpty) {
      println("Eidos Parameters:")
      for (k <- params.keySet) {
        val evidence = params.get(k).get
        pw.println(s"$k: ${evidence.size} instances:")
        for (e <- evidence) {
          pw.println(s"\tJustification: [${e._1}]")
          pw.println(s"""\tSentence: "${e._2}"""")
          pw.println(s"\tQuantifier: ${e._3}")
        }
        pw.println()
      }
    }
  }

  def formal(e: Mention): Option[String] = {
    val t =
        if (e matches "Decrease") Some("DECREASE")
        else if (e matches "Increase") Some("INCREASE")
        else None

    t.map(t => s"$t of ${e.arguments.get("theme").get.head.label}")
  }
} 
Example 72
Source File: FilterJsonCanonicalNames.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File
import java.io.PrintWriter

import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.Sinker
import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.TsvWriter
import org.json4s.DefaultFormats
import org.json4s.JArray
import org.json4s.JObject
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonCanonicalNames extends App {

  class Filter(tsvWriter: TsvWriter) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    tsvWriter.println("file", "id", "text", "canonicalName")

    def filter(jValue: JValue, inputFile: File): Unit = {
      val extractions: JValue = jValue \\ "extractions"

      extractions match {
        case JArray(extractions: List[_]) => // Type erasure removes the [JObject]
          extractions.foreach { extraction =>
            val id = (extraction \ "@id").extract[String]
            val text = (extraction \ "text").extract[String]
            val canonicalName = (extraction \ "canonicalName").extract[String]

            tsvWriter.println(inputFile.getName, id, text, canonicalName)
          }
        case JObject(_) =>
        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val outputFile = args(2)

  new TsvWriter(Sinker.printWriterFromFile(outputFile)).autoClose { tsvWriter =>
    val filter = new Filter(tsvWriter)
    val inputFiles = FileUtils.findFiles(inputDir, extension)

    inputFiles.sortBy(_.getName).foreach { inputFile =>
      val text = FileUtils.getTextFromFile(inputFile)
      val json = JsonMethods.parse(text)

      filter.filter(json, inputFile)
    }
  }
} 
Example 73
Source File: FilterJsonLigatures.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.apps

import java.io.File
import java.io.PrintWriter
import java.util.regex.Pattern

import org.clulab.wm.eidos.utils.Closer.AutoCloser
import org.clulab.wm.eidos.utils.FileUtils
import org.clulab.wm.eidos.utils.Sinker
import org.clulab.wm.eidos.utils.TsvWriter
import org.json4s.DefaultFormats
import org.json4s.JString
import org.json4s.JValue
import org.json4s.jackson.JsonMethods

object FilterJsonLigatures extends App {
  val pattern: Pattern = Pattern.compile("([A-Za-z]+(f([bhkl]|[ft]|[ij])|ij)) ([A-Za-z]+)")

  class Filter(tsvWriter: TsvWriter) {
    implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats

    tsvWriter.println("file", "left", "right")

    def filter(jValue: JValue, inputFile: File): Unit = {
      val extractions: JValue = jValue \ "_source" \ "extracted_text"

      extractions match {
        case text: JString =>
          val matcher = pattern.matcher(text.extract[String])

          while (matcher.find)
            tsvWriter.println(inputFile.getName, matcher.group(1), matcher.group(4))
        case _ => throw new RuntimeException(s"Unexpected extractions value: $extractions")
      }
    }
  }

  val inputDir = args(0)
  val extension = args(1)
  val outputFile = args(2)

  new TsvWriter(Sinker.printWriterFromFile(outputFile)).autoClose { tsvWriter =>
    val filter = new Filter(tsvWriter)
    val inputFiles = FileUtils.findFiles(inputDir, extension)

    inputFiles.sortBy(_.getName).foreach { inputFile =>
      val text = FileUtils.getTextFromFile(inputFile)
      val json = JsonMethods.parse(text)

      filter.filter(json, inputFile)
    }
  }
} 
Example 74
Source File: TestDiskFull.scala    From eidos   with Apache License 2.0 5 votes vote down vote up
package org.clulab.wm.eidos.utils

import java.io.BufferedOutputStream
import java.io.FileOutputStream
import java.io.IOException
import java.io.OutputStreamWriter
import java.io.PrintWriter
import java.io.SyncFailedException
import java.nio.charset.StandardCharsets

import org.clulab.wm.eidos.test.TestUtils._
import org.clulab.wm.eidos.utils.Closer.AutoCloser

class TestDiskFull extends Test {

  def test1 = {
    val file = "/E:/full.dat"
    var i = 0

    try {
      val text1 = "The quick brown fox jumped over the lazy dog."
      val text = text1 + text1

      for (limit <- 1 until 400) {
        val fos = new FileOutputStream(file)
        val osw = new OutputStreamWriter(new BufferedOutputStream(fos), StandardCharsets.UTF_8.toString)
        i = 0

        new PrintWriter(osw).autoClose { pw =>
          while (i < limit) {
            pw.print(text)
            i += 1
            //          pw.flush()
            //          osw.flush()
            //          fos.flush()
            fos.getFD.sync()
          }
        }
      }
    }
    catch {
      case exception: SyncFailedException =>
        println(s"Synchronization failed for file $file at $i")
        println("Exiting with code -2 on assumption that the disk is full")
        System.exit(-2)
      case exception: IOException =>
        println(s"IO failed for file $file at $i")
        println("Exiting with code -2 on assumption that the disk is full")
        System.exit(-2)
      case exception: Exception =>
        println(s"Exception for file $file at $i")
        exception.printStackTrace()
      case throwable: Throwable =>
        println(s"Throwable for file $file at $i")
        throwable.printStackTrace()
    }
  }

//  test1
} 
Example 75
Source File: PythonBroadcastSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import scala.io.Source

import java.io.{PrintWriter, File}

import org.scalatest.{Matchers, FunSuite}

import org.apache.spark.{SharedSparkContext, SparkConf}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends FunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 76
Source File: CodeToCpgFixture.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.semanticcpg.testfixtures

import java.io.{File, PrintWriter}
import java.nio.file.Files

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.codepropertygraph.cpgloading.{CpgLoader, CpgLoaderConfig}
import io.shiftleft.semanticcpg.layers.{LayerCreatorContext, Scpg}
import io.shiftleft.semanticcpg.testfixtures.CodeToCpgFixture.createEnhancements

object CodeToCpgFixture {

  def apply[T](): CodeToCpgFixture =
    new CodeToCpgFixture(LanguageFrontend.Fuzzyc)

  def apply[T](sourceCode: String,
               passes: (Cpg => Unit) = createEnhancements,
               frontend: LanguageFrontend = LanguageFrontend.Fuzzyc)(fun: Cpg => T): T =
    new CodeToCpgFixture(frontend).buildCpg(sourceCode, passes)(fun)

  def createEnhancements(cpg: Cpg): Unit = {
    val context = new LayerCreatorContext(cpg)
    new Scpg().run(context)
  }

}

object CodeDirToCpgFixture {

  def apply[T](dir: File,
               passes: (Cpg => Unit) = createEnhancements,
               frontend: LanguageFrontend = LanguageFrontend.Fuzzyc)(fun: Cpg => T): T =
    new CodeToCpgFixture(frontend).buildCpgForDir(dir, passes)(fun)

}

class CodeToCpgFixture(frontend: LanguageFrontend) {

  
  def buildCpg[T](sourceCode: String, passes: (Cpg => Unit) = CodeToCpgFixture.createEnhancements)(fun: Cpg => T): T = {
    val tmpDir = writeCodeToFile(sourceCode)
    buildCpgForDir(tmpDir, passes)(fun)
  }

  def buildCpgForDir[T](dir: File, passes: (Cpg => Unit) = CodeToCpgFixture.createEnhancements)(fun: Cpg => T): T = {
    val cpgFile = frontend.execute(dir)
    val config = CpgLoaderConfig.withoutOverflow
    val cpg = CpgLoader.load(cpgFile.getAbsolutePath, config)

    passes(cpg)

    try fun(cpg)
    finally { cpg.close() }
  }

  private def writeCodeToFile(sourceCode: String): File = {
    val tmpDir = Files.createTempDirectory("semanticcpgtest").toFile
    tmpDir.deleteOnExit()
    val codeFile = File.createTempFile("Test", frontend.fileSuffix, tmpDir)
    codeFile.deleteOnExit()
    new PrintWriter(codeFile) { write(sourceCode); close() }
    tmpDir
  }

} 
Example 77
Source File: Tpcds_1_4_Suite.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.view.rewrite

import org.apache.carbondata.view.MVCatalogInSpark
import org.apache.carbondata.view.testutil.ModularPlanTest
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.optimizer.MVRewrite
import org.scalatest.BeforeAndAfter
//import org.apache.spark.sql.catalyst.SQLBuilder
import java.io.{File, PrintWriter}

class Tpcds_1_4_Suite extends ModularPlanTest with BeforeAndAfter {
  import org.apache.carbondata.view.rewrite.matching.TestTPCDS_1_4_Batch._
  import org.apache.carbondata.view.testutil.Tpcds_1_4_Tables._

  val spark = sqlContext
  val testHive = sqlContext.sparkSession

  test("test using tpc-ds queries") {

    tpcds1_4Tables.foreach { create_table =>
      hiveClient.runSqlHive(create_table)
    }

    val writer = new PrintWriter(new File("batch.txt"))
//    val dest = "case_30"
//    val dest = "case_32"
//    val dest = "case_33"
// case_15 and case_16 need revisit

    val dest = "case_39"   
    
    }

    writer.close()
  }
} 
Example 78
Source File: TestBooleanCompressSuite.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.testsuite.booleantype.compress

import java.io.{File, PrintWriter}

import scala.util.Random

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest

class TestBooleanCompressSuite extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {

  override def beforeEach(): Unit = {
    sql("drop table if exists boolean_table")
  }

  override def afterAll(): Unit = {
    sql("drop table if exists boolean_table")
    assert(deleteFile(randomBoolean))
  }

  val randomBoolean = s"$resourcesPath/bool/supportRandomBooleanBigFile.csv"
  val trueNum = 10000000

  override def beforeAll(): Unit = {
    assert(createBooleanFileRandom(randomBoolean, trueNum, 0.5))
  }

  test("test boolean compress rate: random file") {
    sql(
      s"""
         | CREATE TABLE boolean_table(
         | booleanField BOOLEAN
         | )
         | STORED AS carbondata
       """.stripMargin)

    sql(
      s"""
         | LOAD DATA LOCAL INPATH '${randomBoolean}'
         | INTO TABLE boolean_table
         | options('FILEHEADER'='booleanField')
           """.stripMargin)

    //    Test for compress rate
    //    sql("select * from boolean_table").show(100)
    //    sql("select count(*) from boolean_table").show()
    //    sql("select count(*) from boolean_table where booleanField= true").show()
    //    sql("select count(*) from boolean_table where booleanField= false").show()
    checkAnswer(
      sql("select count(*) from boolean_table"),
      Row(trueNum))
  }

  val randomNumber = 10000
  def createBooleanFileRandom(path: String, totalLines: Int, rate: Double): Boolean = {
    try {
      val write = new PrintWriter(path)
      var d: Double = 0.0
      val random = new Random()
      for (i <- 0 until totalLines) {
        val eachNum = random.nextInt(randomNumber)
        var flag: Boolean = true
        if (eachNum >= randomNumber * rate) {
          flag = false
        }
        write.println(flag)
        d = d + 1
      }

      write.close()
    } catch {
      case _: Exception => assert(false)
    }
    return true
  }

  def deleteFile(path: String): Boolean = {
    try {
      val file = new File(path)
      file.delete()
    } catch {
      case _: Exception => assert(false)
    }
    return true
  }
} 
Example 79
Source File: BloomCoarseGrainIndexTestUtil.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.index.bloom

import java.io.{File, PrintWriter}
import java.util.UUID

import scala.util.Random

import org.apache.spark.sql.test.util.QueryTest
import org.apache.spark.sql.DataFrame

object BloomCoarseGrainIndexTestUtil extends QueryTest {

  def createFile(fileName: String, line: Int = 10000, start: Int = 0): Unit = {
    if (!new File(fileName).exists()) {
      val write = new PrintWriter(new File(fileName))
      for (i <- start until (start + line)) {
        write.println(
          s"$i,n$i,city_$i,${ Random.nextInt(80) }," +
          s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }," +
          s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }," +
          s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }," +
          s"${ UUID.randomUUID().toString },${ UUID.randomUUID().toString }")
      }
      write.close()
    }
  }

  def deleteFile(fileName: String): Unit = {
    val file = new File(fileName)
    if (file.exists()) {
      file.delete()
    }
  }

  private def checkSqlHitIndex(sqlText: String, indexName: String, shouldHit: Boolean): DataFrame = {
    // we will not check whether the query will hit the index because index may be skipped
    // if the former index pruned all the blocklets
    sql(sqlText)
  }

  def checkBasicQuery(indexName: String, bloomDMSampleTable: String, normalTable: String, shouldHit: Boolean = true): Unit = {
    checkAnswer(
      checkSqlHitIndex(s"select * from $bloomDMSampleTable where id = 1", indexName, shouldHit),
      sql(s"select * from $normalTable where id = 1"))
    checkAnswer(
      checkSqlHitIndex(s"select * from $bloomDMSampleTable where id = 999", indexName, shouldHit),
      sql(s"select * from $normalTable where id = 999"))
    checkAnswer(
      checkSqlHitIndex(s"select * from $bloomDMSampleTable where city = 'city_1'", indexName, shouldHit),
      sql(s"select * from $normalTable where city = 'city_1'"))
    checkAnswer(
      checkSqlHitIndex(s"select * from $bloomDMSampleTable where city = 'city_999'", indexName, shouldHit),
      sql(s"select * from $normalTable where city = 'city_999'"))
    checkAnswer(
      sql(s"select min(id), max(id), min(name), max(name), min(city), max(city)" +
          s" from $bloomDMSampleTable"),
      sql(s"select min(id), max(id), min(name), max(name), min(city), max(city)" +
          s" from $normalTable"))
  }
} 
Example 80
Source File: BlockingIoExample.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter1.scaladsl

import java.io.{ BufferedReader, IOException, InputStreamReader, PrintWriter }
import java.net.ServerSocket


  // #snip
  @throws[IOException]
  def serve(portNumber: Int): Unit = {
    //创建一个新的 ServerSocket,用以监听指定端口上的连接请求
    val serverSocket = new ServerSocket(portNumber)
    //对accept()方法的调用将被阻塞,直到一个连接建立
    val clientSocket = serverSocket.accept
    //这些流对象都派生于该套接字的流对象
    val in = new BufferedReader(new InputStreamReader(clientSocket.getInputStream))
    val out = new PrintWriter(clientSocket.getOutputStream, true)
    var request: String = in.readLine
    var response: String = null
    //处理循环开始
    while (request ne null) {
      if ("Done" != request) {
        //请求被传递给服务器的处理方法
        response = processRequest(request)
        //服务器的响应被发送给了客户端
        out.println(response)
        //继续执行处理循环
      }
      request = in.readLine
    }
    // #snip
  }
  private def processRequest(request: String): String = "Processed"
} 
Example 81
Source File: QueryExecutorWithLogging.scala    From variantsdwh   with Apache License 2.0 5 votes vote down vote up
package pl.edu.pw.ii.zsibio.dwh.benchmark.utils

import java.io.{File, FileOutputStream, PrintWriter}
import java.util.Calendar

import pl.edu.pw.ii.zsibio.dwh.benchmark.dao.{ConnectDriver, EngineConnection, QueryResult}
import net.jcazevedo.moultingyaml._
import net.jcazevedo.moultingyaml.DefaultYamlProtocol
import net.jcazevedo.moultingyaml.DefaultYamlProtocol._
import org.apache.log4j.Logger
import pl.edu.pw.ii.zsibio.dwh.benchmark.dao.ConnectDriver.Value
import pl.edu.pw.ii.zsibio.dwh.benchmark.utils.QueryType.QueryType


case class Query(queryId:String, queryType:String, queryEngine:String, storageFormat:String,queryDesc:String,
                 statement:String)

object QueryType extends Enumeration {

  type QueryType = Value
  val SELECT, CREATE, UPDATE  = Value


}
object QueryExecutorWithLogging {
  val log = Logger.getLogger("pl.edu.pw.ii.zsibio.dwh.benchmark.utils.QueryExecutorWithLogging")
  object QueryYamlProtocol extends DefaultYamlProtocol {
    implicit val queryFormat = yamlFormat6(Query)
  }


  def runStatement(query: Query, conn:EngineConnection, logFile:String, dryRun: Boolean) = {
    log.info(s"Running ${query.queryId} ... using ${query.queryEngine} engine")
    log.debug(s"Executing query: ${query.statement}")
    query.queryType.toLowerCase() match {
      case "select" => logQuery(conn, query, logFile, dryRun)
      case _ => conn.executeUpdate(query.statement.toLowerCase)
    }


  }

  def parseQueryYAML(file:String,storageType:String,connString:String, kuduMaster:String, dbName:String, ifExplain:Boolean = false)  : Query ={
    log.info(s"Parsing ${file}")
    val lines = scala.io.Source.fromFile(file).mkString
    val yml = lines.stripMargin.parseYaml
    import QueryYamlProtocol._
    queryPreprocess(yml.convertTo[Query], storageType, connString, kuduMaster, dbName, ifExplain)

  }

  private def logQuery(conn:EngineConnection, query: Query, logFile:String, dryRun:Boolean) ={
    val rs = conn.executeQuery(query.statement.toLowerCase,true)
    //rs.rs.next()
    val result = s"${Calendar.getInstance().getTime().toString},${query.queryId}," +
      s"${query.queryEngine},${query.storageFormat},${rs.timing.get.getTiming()},${dryRun.toString}\n"
    log.info(s"Result: ${result}")
    val writer = new PrintWriter(new FileOutputStream(new File(logFile),true))
    writer.write(result)
    writer.flush()
    writer.close()
  }

  private def queryPreprocess(query: Query, storageType: String, connString: String, kuduMaster: String, dbName: String, ifExplain: Boolean) = {
    def replaceVars(property:String) ={
      property
        .replaceAll("\\{\\{DATA_FORMAT\\}\\}",storageType.toLowerCase)
        .replaceAll("\\{\\{DB_NAME\\}\\}",dbName.toLowerCase)
        .replaceAll("\\{\\{KUDU_MASTER\\}\\}",kuduMaster )
        .replaceAll("\\{\\{IF_EXPLAIN\\}\\}", if(ifExplain) "EXPLAIN " else "")
        .replaceAll("\\{\\{PERCENTILE_APPROX\\}\\}", if(query.queryEngine.toLowerCase=="presto") "approx_percentile" else "percentile_approx")

    }
    query.copy(
      queryId = replaceVars(query.queryId),
      queryDesc = replaceVars(query.queryDesc),
      storageFormat = replaceVars(query.storageFormat),
      statement = replaceVars(query.statement.replaceAll(",",",\n").replaceAll("\\(","\\(  "))
    )
  }

} 
Example 82
Source File: generalDLM.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.models.lm

import java.io.{File, PrintWriter}
import java.lang.Math.sqrt

import breeze.stats.distributions.Gaussian

import scalaz.Scalaz._


object generalDLM {
  type Loglikelihood = Double
  type Observation = Double
  type State = Double
  type Time = Double

  case class Data(time: Time, observation: Observation, state: Option[State]) {
    override def toString = state match {
      case Some(x) => s"$time, $observation, $x"
      case None => s"$time, $observation"
    }
  }
  case class Parameters(a: Double, b: Double, l1: Double, l2: Double) {
    override def toString = s"$a, $b, $l1, $l2" 
  }

  def simulate(p: Parameters): Stream[Data] = {
    val stateSpace = unfold(Gaussian(p.l1, sqrt(p.l2)).draw)(x =>
      Some(x, x + Gaussian(0, sqrt(p.b)).draw)
    )
    stateSpace.zipWithIndex map { case (x, t) => 
      Data(t, x + Gaussian(0, sqrt(p.a)).draw, Some(x)) }
  }

  val runSimulateFirstOrder = {
    val p = Parameters(3.0, 0.5, 0.0, 10.0)
    // simulate 16 different realisations of 100 observations, representing 16 stations
    val observations = (1 to 16) map (id => (id, simulate(p).take(100).toVector))

    val pw = new PrintWriter(new File("data/firstOrderdlmRes.csv"))
    pw.write(
      observations.
        flatMap{ case (id, data) =>
          data map (x => id + ", " + x.toString)}.
        mkString("\n"))
    pw.close()
  }
} 
Example 83
Source File: mcmc.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.optimization

import java.io.{File, PrintWriter}
import breeze.numerics.log
import breeze.stats.distributions.{Gaussian, Uniform}
import io.github.mandar2812.dynaml.models.lm.KFilter
import io.github.mandar2812.dynaml.models.lm.generalDLM._
import KFilter._
import scalaz.Scalaz._

object mcmc {
  case class MetropolisState(params: Parameters, accepted: Int, ll: Loglikelihood)

    
  def metropolisIters(
    initParams: Parameters,
    likelihood: Parameters => Loglikelihood,
    perturb: Parameters => Parameters): Stream[MetropolisState] = {

    val initState = MetropolisState(initParams, 0, likelihood(initParams))
    unfold(initState)(metropolisStep(likelihood, perturb))
  }

  def main(args: Array[String]): Unit = {
    val n = 10000
    val p = Parameters(3.0, 0.5, 0.0, 10.0)
    val observations = simulate(p).take(100).toVector

    val iters = metropolisIters(p, filterll(observations), perturb(0.1)).take(n)
    println(s"Accepted: ${iters.last.accepted.toDouble/n}")

    // write the parameters to file
    val pw = new PrintWriter(new File("data/mcmcOutRes.csv"))
    pw.write(iters.map(_.params).mkString("\n"))
    pw.close()
  }
} 
Example 84
Source File: PropertiesConfigTest.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.utils.sumac

import org.scalatest.FunSuite
import java.io.{PrintWriter, File}
import java.util.Properties
import org.scalatest.Matchers

class PropertiesConfigTest extends FunSuite with Matchers {

  val testOutDir = new File("test_output/" + getClass.getSimpleName)
  testOutDir.mkdirs()

  test("load properties") {
    val propFile = new File(testOutDir, "load_properties_test.properties")
    val p = new Properties()
    p.put("x", "98")
    p.put("blah", "ooga booga")
    val out = new PrintWriter(propFile)
    p.store(out,null)
    out.close()


    val args = new PropertyArgs()
    args.parse(Array("--propertyFile", propFile.getAbsolutePath))
    args.x should be (98)
    args.blah should be ("ooga booga")
  }

  test("roundtrip properties") {
    val propFile = new File(testOutDir, "roundtrip_properties_test.properties")
    val args = new PropertyArgs()
    args.x = 5
    args.wakka = 93.4f
    args.propertyFile = propFile
    args.saveConfig()

    val args2 = new PropertyArgs()
    args2.propertyFile = propFile
    args2.parse(Map[String,String]())
    args2.x should be (5)
    args2.wakka should be (93.4f)
    args2.blah should be (null)
  }


  class PropertyArgs extends FieldArgs with PropertiesConfig {
    var x: Int = _
    var blah: String = _
    var wakka: Float = _
  }

} 
Example 85
Source File: package.scala    From sbt-kubeyml   with MIT License 5 votes vote down vote up
package kubeyml

import java.io.{File, PrintWriter}

import io.circe.Encoder
import io.circe.syntax._
import io.circe.yaml.syntax._

package object plugin {

  private[kubeyml] def writePlan[A](a: A, buildTarget: File, kind: String)(implicit encoder: Encoder[A]) = {
    val genTarget = new File(buildTarget, "kubeyml")
    genTarget.mkdirs()
    val file = new File(genTarget, s"${kind}.yml")
    val printWriter = new PrintWriter(file)
    try {
      printWriter.println(a.asJson.asYaml.spaces4)
    } finally {
      printWriter.close()
    }
  }

  private[kubeyml] def writePlansInSingle[A, B](a: A, b: B, buildTarget: File, kind: String)(implicit
                                              encoderA: Encoder[A], encoder: Encoder[B]
  ) = {
    val genTarget = new File(buildTarget, "kubeyml")
    genTarget.mkdirs()
    val file = new File(genTarget, s"${kind}.yml")
    val printWriter = new PrintWriter(file)
    try {
      printWriter.println(a.asJson.asYaml.spaces4)
      printWriter.println("---")
      printWriter.println(b.asJson.asYaml.spaces4)
    } finally {
      printWriter.close()
    }
  }
} 
Example 86
Source File: SynonymRecordsUnifier.scala    From attic-nlp4l   with Apache License 2.0 5 votes vote down vote up
package org.nlp4l.syn

import java.io.PrintWriter

import scala.collection.mutable.ListBuffer

object SynonymRecordsUnifier {

  def main(args: Array[String]): Unit = {
    if(args.length < 1) usage
    
    val synRecsList = ListBuffer[SynonymRecords]()
    for(arg <- args){
      synRecsList += new SynonymRecords(arg, SynonymCommon.readAllRecords(arg))
    }
    
    outputUniqueSynonymRecords(synRecsList.head, synRecsList.tail)
  }
  
  def usage(): Unit = {
    println("Usage: org.nlp4l.syn.SynonymRecordsUnifier synonyms.txt [synonyms-2.txt synonyms-3.txt...]");
    println("\tsynonyms.txt\tsynonyms file to be checked")
    sys.exit
  }
  
  def outputUniqueSynonymRecords(src: SynonymRecords, destList: Seq[SynonymRecords]): Unit = {
    if(destList.isEmpty){
      outputCheckedFile(src.headerComments, src.uniqueRecords, src.outFile)
    }
    else{
      val result = checkAcross(src, destList, List())
      outputCheckedFile(src.headerComments, result._1.uniqueRecords, src.outFile)
      outputUniqueSynonymRecords(result._2.head, result._2.tail)
    }
  }
  
  def checkAcross(src: SynonymRecords, destList: Seq[SynonymRecords], checkedDest: Seq[SynonymRecords]):
    (SynonymRecords, Seq[SynonymRecords]) = {
    if(destList.isEmpty){
      (src, checkedDest)
    }
    else{
      val checkedLists = checkTwoRecordsList(src, destList.head)
      checkAcross(checkedLists._1, destList.tail, checkedDest :+ checkedLists._2)
    }
  }
  
  def checkTwoRecordsList(src: SynonymRecords, dest: SynonymRecords): (SynonymRecords, SynonymRecords) = {
    val result = checkTwoRecordsList(src.uniqueRecords, dest.uniqueRecords, List())
    (new SynonymRecords(src.synFile, src.outFile, src.headerComments, result._1),
      new SynonymRecords(dest.synFile, dest.outFile, dest.headerComments, result._2))
  }
  
  def checkTwoRecordsList(src: Seq[Seq[String]], dest: Seq[Seq[String]], outSrc: Seq[Seq[String]]):
    (Seq[Seq[String]], Seq[Seq[String]]) = {
    if(src.isEmpty){
      (outSrc, dest)
    }
    else{
      val result = checkRecord2List(src.head, dest, List())
      checkTwoRecordsList(src.tail, result._2, outSrc :+ result._1)
    }
  }
  
  def checkRecord2List(srcRecord: Seq[String], destList: Seq[Seq[String]], outDest: Seq[Seq[String]]):
    (Seq[String], Seq[Seq[String]]) = {
    if(destList.isEmpty){
      (srcRecord, outDest)
    }
    else{
      SynonymCommon.unifyRecordsIfNeeded(srcRecord, destList.head) match {
        case Some(unifiedSrcRecord) => checkRecord2List(unifiedSrcRecord, destList.tail, outDest)
        case None => checkRecord2List(srcRecord, destList.tail, outDest :+ destList.head)
      }
    }
  }
  
  def outputCheckedFile(headerComments: Seq[String], records: Seq[Seq[String]], outFile: String): Unit ={
    if(records.nonEmpty){
      val pw = new PrintWriter(outFile)
      try{
        // write header comment lines
        for(headerComment <- headerComments){
          pw.println(headerComment)
        }
        
        // write synonym lines
        for(record <- records){
          pw.println(record.mkString(","))
        }
      }
      finally{
        pw.close
      }
    }
  }
} 
Example 87
Source File: SynonymTest.scala    From attic-nlp4l   with Apache License 2.0 5 votes vote down vote up
package org.nlp4l.syn

import java.io.PrintWriter
import scala.sys.SystemProperties
import java.io.File

trait SynonymTest {
  
  val TEMP_DIR = new SystemProperties().apply("java.io.tmpdir")
  
  def l(a: String): Seq[String] = {
    a.split(",")
  }
  
  def ll(a: String): Seq[Seq[String]] = {
    val result = scala.collection.mutable.ArrayBuffer[Seq[String]]()
    a.split("/").foreach{ s =>
      result.append(l(s))
    }
    return result
  }
  
  def assertL(expected: Seq[String], actual: Seq[String]): Unit = {
    if(expected.isEmpty){
      assert(actual.isEmpty)
    }
    else{
      assert(actual.nonEmpty)
      assert(expected.head == actual.head, "expected \"%s\", but was \"%s\"".format(expected.head, actual.head))
      assertL(expected.tail, actual.tail)
    }
  }
  
  def assertLL(expected: Seq[Seq[String]], actual: Seq[Seq[String]]): Unit = {
    if(expected.isEmpty){
      assert(actual.isEmpty)
    }
    else{
      assert(actual.nonEmpty)
      assertL(expected.head, actual.head)
      assertLL(expected.tail, actual.tail)
    }
  }
  
  def tempFile(file: String): String ={
    "%s/%s".format(TEMP_DIR, file)
  }
  
  def createSynonymFile(file: String, content: String): Unit = {
    val lines = ll(content)
    val out = new PrintWriter(file)
    
    try{
      for(line <- lines){
        out.println(line.mkString(","))
      }
    }
    finally{
      out.close
    }
  }
  
  def assertSynonymFile(llExpected: String, fileActual: String): Unit = {
    val cont = SynonymCommon.readAllRecords(fileActual)
    assertLL(ll(llExpected), cont._2)
  }
  
  def rmFile(file: String): Unit ={
    val f = new File(file)
    f.delete()
  }
} 
Example 88
Source File: CodeGenWrite.scala    From bosatsu   with Apache License 2.0 5 votes vote down vote up
package org.bykn.bosatsu

import cats.data.NonEmptyList
import cats.effect.IO
import java.nio.file.Path
import java.io.PrintWriter
import org.typelevel.paiges.Doc

object CodeGenWrite {
  @annotation.tailrec
  final def toPath(root: Path, pn: PackageName): Path =
    pn.parts match {
      case NonEmptyList(h, Nil) => root.resolve(h).resolve("Values.java")
      case NonEmptyList(h0, h1 :: tail) =>
        toPath(root.resolve(h0), PackageName(NonEmptyList(h1, tail)))
    }

  def writeDoc(p: Path, d: Doc): IO[Unit] =
    IO {
      Option(p.getParent).foreach(_.toFile.mkdirs)
      val pw = new PrintWriter(p.toFile, "UTF-8")
      try d.renderStream(80).foreach(pw.print(_))
      finally {
        pw.close
      }
    }
} 
Example 89
Source File: package.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov

import java.io.{PrintWriter, File}

import scala.io.Source

package object monads {
  def readFile(path: String) = {
    System.out.println(s"Reading file ${path}")
    Source.fromFile(path).getLines()
  }

  def writeFile(path: String, lines: Iterator[String]) = {
      System.out.println(s"Writing file ${path}")
      val file = new File(path)
      printToFile(file) { p => lines.foreach(p.println) }
    }

  private def printToFile(file: File)(writeOp: PrintWriter => Unit): Unit = {
    val writer = new PrintWriter(file)
    try {
      writeOp(writer)
    } finally {
      writer.close()
    }
  }
} 
Example 90
Source File: package.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov

import java.io.{PrintWriter, File}

import scala.io.Source

package object monads {
  def readFile(path: String) = {
    System.out.println(s"Reading file ${path}")
    Source.fromFile(path).getLines()
  }

  def writeFile(path: String, lines: Iterator[String]) = {
      System.out.println(s"Writing file ${path}")
      val file = new File(path)
      printToFile(file) { p => lines.foreach(p.println) }
    }

  private def printToFile(file: File)(writeOp: PrintWriter => Unit): Unit = {
    val writer = new PrintWriter(file)
    try {
      writeOp(writer)
    } finally {
      writer.close()
    }
  }
} 
Example 91
Source File: package.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.monads

import java.io.{PrintWriter, File}

import scala.io.Source

package object io {
  def readFile(path: String) =
    IOAction(Source.fromFile(path).getLines())

  def writeFile(path: String, lines: Iterator[String]) =
    IOAction({
      val file = new File(path)
      printToFile(file) { p => lines.foreach(p.println) }
    })

  private def printToFile(file: File)(writeOp: PrintWriter => Unit): Unit = {
    val writer = new PrintWriter(file)
    try {
      writeOp(writer)
    } finally {
      writer.close()
    }
  }
} 
Example 92
Source File: package.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.monads

import java.io.{PrintWriter, File}

import scala.io.Source

package object io {
  def readFile(path: String) =
    IOAction(Source.fromFile(path).getLines())

  def writeFile(path: String, lines: Iterator[String]) =
    IOAction({
      val file = new File(path)
      printToFile(file) { p => lines.foreach(p.println) }
    })

  private def printToFile(file: File)(writeOp: PrintWriter => Unit): Unit = {
    val writer = new PrintWriter(file)
    try {
      writeOp(writer)
    } finally {
      writer.close()
    }
  }
} 
Example 93
Source File: BasicApp.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import java.io.PrintWriter
import java.net.ServerSocket

object BasicApp extends App {

  val text =
    """HTTP/1.0 200 OK
        Content-Type: text/html
        Content-Length: 200

        <HTML> <HEAD> <TITLE>Hello, World!</TITLE> </HEAD> <BODY LANG="en-US" BGCOLOR="#e6e6ff" DIR="LTR"> <P ALIGN="CENTER"> <FONT FACE="Arial, sans-serif" SIZE="6">Hello, World!</FONT> </P> </BODY> </HTML>"""
  val port = 8080
  val listener = new ServerSocket(port)

  while (true) {
    val sock = listener.accept()
    new PrintWriter(sock.getOutputStream, true).println(text)
    sock.shutdownOutput()
  }
} 
Example 94
Source File: BasicAppSpec.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import org.scalatest._
import scala.Console._
import scala.sys.process._
import scalaj.http.Http
import org.scalatest.Tag
import org.scalatest.concurrent._
import org.scalatest.exceptions._
import java.io.{ByteArrayOutputStream, PrintWriter}

//You can define a specific tag to indicate which test should be run against the Docker Compose instance
object DockerComposeTag extends Tag("DockerComposeTag")

class BasicAppSpec extends fixture.FunSuite with fixture.ConfigMapFixture with Eventually with IntegrationPatience with Matchers {

  // The configMap passed to each test case will contain the connection information for the running Docker Compose
  // services. The key into the map is "serviceName:containerPort" and it will return "host:hostPort" which is the
  // Docker Compose generated endpoint that can be connected to at runtime. You can use this to endpoint connect to
  // for testing. Each service will also inject a "serviceName:containerId" key with the value equal to the container id.
  // You can use this to emulate service failures by killing and restarting the container.
  val basicServiceName = "basic"
  val basicServiceHostKey = s"$basicServiceName:8080"
  val basicServiceContainerIdKey = s"$basicServiceName:containerId"

  test("Validate that the Docker Compose endpoint returns a success code and the string 'Hello, World!'", DockerComposeTag) {
    configMap =>{
      println(configMap)
      val hostInfo = getHostInfo(configMap)
      val containerId = getContainerId(configMap)

      println(s"Attempting to connect to: $hostInfo, container id is $containerId")

      eventually {
        val output = Http(s"http://$hostInfo").asString
        output.isSuccess shouldBe true
        output.body should include ("Hello, World!")
      }
    }
  }

  test("Example Untagged Test. Will not be run.") {
    configMap =>
  }

  test("Validate presence of docker config information in system properties", DockerComposeTag) {
    configMap =>
      Option(System.getProperty(basicServiceHostKey)) shouldBe defined
  }

  def getHostInfo(configMap: ConfigMap): String = getContainerSetting(configMap, basicServiceHostKey)
  def getContainerId(configMap: ConfigMap): String = getContainerSetting(configMap, basicServiceContainerIdKey)

  def getContainerSetting(configMap: ConfigMap, key: String): String = {
    if (configMap.keySet.contains(key)) {
      configMap(key).toString
    }
    else {
      throw new TestFailedException(s"Cannot find the expected Docker Compose service key '$key' in the configMap", 10)
    }
  }
} 
Example 95
Source File: BasicApp.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import java.io.PrintWriter
import java.net.ServerSocket

object BasicApp extends App {

  val text =
    """HTTP/1.0 200 OK
        Content-Type: text/html
        Content-Length: 200

        <HTML> <HEAD> <TITLE>Hello, World!</TITLE> </HEAD> <BODY LANG="en-US" BGCOLOR="#e6e6ff" DIR="LTR"> <P ALIGN="CENTER"> <FONT FACE="Arial, sans-serif" SIZE="6">Hello, World!</FONT> </P> </BODY> </HTML>"""
  val port = 8080
  val listener = new ServerSocket(port)

  while (true) {
    val sock = listener.accept()
    new PrintWriter(sock.getOutputStream, true).println(text)
    sock.shutdownOutput()
  }
} 
Example 96
Source File: BasicAppSpec.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import org.scalatest._
import scala.Console._
import scala.sys.process._
import scalaj.http.Http
import org.scalatest.Tag
import org.scalatest.concurrent._
import org.scalatest.exceptions._
import java.io.{ByteArrayOutputStream, PrintWriter}

class BasicAppSpec extends fixture.FunSuite with fixture.ConfigMapFixture with Eventually with IntegrationPatience with Matchers {

  // The configMap passed to each test case will contain the connection information for the running Docker Compose
  // services. The key into the map is "serviceName:containerPort" and it will return "host:hostPort" which is the
  // Docker Compose generated endpoint that can be connected to at runtime. You can use this to endpoint connect to
  // for testing. Each service will also inject a "serviceName:containerId" key with the value equal to the container id.
  // You can use this to emulate service failures by killing and restarting the container.
  val basicServiceName = "basic"
  val basicServiceHostKey = s"$basicServiceName:8080"
  val basicServiceContainerIdKey = s"$basicServiceName:containerId"

  test("Validate that the Docker Compose endpoint returns a success code and the string 'Hello, World!'") {
    configMap =>{
      println(configMap)
      val hostInfo = getHostInfo(configMap)
      val containerId = getContainerId(configMap)

      println(s"Attempting to connect to: $hostInfo, container id is $containerId")

      eventually {
        val output = Http(s"http://$hostInfo").asString
        output.isSuccess shouldBe true
        output.body should include ("Hello, World!")
      }
    }
  }

  test("Validate presence of docker config information in system properties") {
    configMap =>
      Option(System.getProperty(basicServiceHostKey)) shouldBe defined
  }

  def getHostInfo(configMap: ConfigMap): String = getContainerSetting(configMap, basicServiceHostKey)
  def getContainerId(configMap: ConfigMap): String = getContainerSetting(configMap, basicServiceContainerIdKey)

  def getContainerSetting(configMap: ConfigMap, key: String): String = {
    if (configMap.keySet.contains(key)) {
      configMap(key).toString
    }
    else {
      throw new TestFailedException(s"Cannot find the expected Docker Compose service key '$key' in the configMap", 10)
    }
  }
} 
Example 97
Source File: BasicApp.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import java.io.PrintWriter
import java.net.ServerSocket

object BasicApp extends App {

  val text =
    """HTTP/1.0 200 OK
        Content-Type: text/html
        Content-Length: 200

        <HTML> <HEAD> <TITLE>Hello, World!</TITLE> </HEAD> <BODY LANG="en-US" BGCOLOR="#e6e6ff" DIR="LTR"> <P ALIGN="CENTER"> <FONT FACE="Arial, sans-serif" SIZE="6">Hello, World!</FONT> </P> </BODY> </HTML>"""
  val port = 8080
  val listener = new ServerSocket(port)

  while (true) {
    val sock = listener.accept()
    new PrintWriter(sock.getOutputStream, true).println(text)
    sock.shutdownOutput()
  }
} 
Example 98
Source File: BasicAppSpec.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
import scala.Console._
import scala.sys.process._
import org.specs2._
import org.specs2.execute._
import scalaj.http.Http
import java.io.{ByteArrayOutputStream, PrintWriter}

class BasicAppSpec extends mutable.Specification  {

  // The System Properties will contain the connection information for the running Docker Compose
  // services. The key into the map is "serviceName:containerPort" and it will return "host:hostPort" which is the
  // Docker Compose generated endpoint that can be connected to at runtime. You can use this to endpoint connect to
  // for testing. Each service will also inject a "serviceName:containerId" key with the value equal to the container id.
  // You can use this to emulate service failures by killing and restarting the container.
  val basicServiceName = "basic"
  val basicServiceHostKey = s"$basicServiceName:8080"
  val basicServiceContainerIdKey = s"$basicServiceName:containerId"
  val hostInfo = getHostInfo()
  val containerId = getContainerId()

  "Validate that the Docker Compose endpoint returns a success code and the string 'Hello, World!'" >> {
      println(s"Attempting to connect to: $hostInfo, container id is $containerId")

      eventually {
        val output = Http(s"http://$hostInfo").asString
        output.isSuccess mustEqual true
        output.body must contain ("Hello, World!")
      }
  }

  def getHostInfo(): String = getContainerSetting(basicServiceHostKey)
  def getContainerId(): String = getContainerSetting(basicServiceContainerIdKey)

  def getContainerSetting(key: String): String = {
    if (System.getProperty(key) !=  null) {
      System.getProperty(key)
    }
    else {
      throw new FailureException(Failure(s"Cannot find the expected Docker Compose service key '$key' in the System Properties"))
    }
  }
} 
Example 99
Source File: EventsController.scala    From chatoverflow   with Eclipse Public License 2.0 5 votes vote down vote up
package org.codeoverflow.chatoverflow.ui.web.rest.events

import java.io.PrintWriter
import java.util.concurrent.ConcurrentHashMap

import javax.servlet.AsyncContext
import javax.servlet.http.HttpServletRequest
import org.codeoverflow.chatoverflow.ui.web.JsonServlet
import org.scalatra.servlet.ScalatraAsyncSupport
import org.scalatra.{BadRequest, Unauthorized}
import org.scalatra.swagger.Swagger

class EventsController(implicit val swagger: Swagger) extends JsonServlet with ScalatraAsyncSupport with EventsControllerDefinition {
  private val connectionWriters = new ConcurrentHashMap[AsyncContext, PrintWriter]()

  def broadcast(messageType: String, message: String = null): Unit = {
    connectionWriters.forEach((_, writer) => {
      try {
        sendMessage(writer, messageType, message)
      } catch {
        //probably lost or closed connection, remove from the list of connected clients
        case _: Throwable => connectionWriters.remove(writer)
      }
    })
  }

  def closeConnections(): Unit = {
    connectionWriters.forEach((_, writer) => {
      try {
        sendMessage(writer, "close", null)
        writer.close()
      } finally {
        connectionWriters.remove(writer)
      }
    })
  }

  private def sendMessage(writer: PrintWriter, messageType: String, message: String): Unit = {
    

    var msg = "event: " + messageType.replace("\n", "") + "\n"
    if (message != null)
      msg += "data: " + message.replace("\n", "\ndata: ") + "\n\n"
    writer.write(msg)
    writer.flush()
  }

  get("/", operation(getEvents)) {
    val accept = request.getHeader("Accept")
    if (accept == null || !accept.replace(" ", "").split(",").contains("text/event-stream")) {
      status = 406
    } else {
      authParamRequired {
        contentType = "text/event-stream"

        val asyncContext = request.startAsync()
        asyncContext.setTimeout(0)

        val writer = asyncContext.getResponse.getWriter
        connectionWriters.put(asyncContext, writer)
      }
    }
  }

  private def authParamRequired(func: => Any)(implicit request: HttpServletRequest): Any = {
    val authKeyKey = "authKey"

    if (!request.parameters.contains(authKeyKey) || request.getParameter(authKeyKey).isEmpty) {
      BadRequest()
    } else if (request.getParameter(authKeyKey) != chatOverflow.credentialsService.generateAuthKey()) {
      Unauthorized()
    } else {
      func
    }
  }
} 
Example 100
Source File: framian.scala    From scala-course   with GNU General Public License v3.0 5 votes vote down vote up
import java.io.{File,PrintWriter}
import framian.{Index,Cols}
import framian.csv.{Csv,CsvFormat}

object FramianTest {

  def main(args: Array[String]) = {
    println("Hello")
    val df=Csv.parseFile(new File("../r/cars93.csv")).labeled.toFrame
    println(""+df.rows+" "+df.cols)
    val df2=df.filter(Cols("EngineSize").as[Double])( _ <= 4.0 )
    println(""+df2.rows+" "+df2.cols)
    val df3=df2.map(Cols("Weight").as[Int],"WeightKG")(r=>r.toDouble*0.453592)
    println(""+df3.rows+" "+df3.cols)
    println(df3.colIndex)
    val csv = Csv.fromFrame(new CsvFormat(",", header = true))(df3)
    new PrintWriter("out.csv") { write(csv.toString); close }
    println("Done")
  }

} 
Example 101
Source File: WriterEffectPage.scala    From eff   with MIT License 5 votes vote down vote up
package org.atnos.site
package lib

object WriterEffectPage extends UserGuidePage { def is = "Writer".title ^ s2"""

You can then define your own custom `Fold` to log the values to a file:${snippet{
import org.atnos.eff._, all._, syntax.all._
import cats.data.Writer
import java.io.PrintWriter
import scala.io

type S = Fx.fx1[Writer[String, *]]

val action: Eff[S, Int] = for {
  a <- pure[S, Int](1)
  _ <- tell("first value "+a)
  b <- pure[S, Int](2)
  _ <- tell("second value "+b)

} yield a + b

// define a fold to output values
def fileFold(path: String) = new RightFold[String, Unit] {
  type S = PrintWriter
  val init: S = new PrintWriter(path)

  def fold(a: String, s: S): S =
  { s.println(a); s }

  def finalize(s: S): Unit =
    s.close
}

action.runWriterFold(fileFold("target/log")).run
io.Source.fromFile("target/log").getLines.toList
}.eval}


"""
} 
Example 102
Source File: AnalyzerRule.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package analyzer

import java.io.{PrintWriter, StringWriter}

import scala.tools.nsc.Global
import scala.util.control.NonFatal

abstract class AnalyzerRule(val global: Global, val name: String, defaultLevel: Level = Level.Warn) {

  import global._

  var level: Level = defaultLevel
  var argument: String = _

  protected def classType(fullName: String): Type =
    try rootMirror.staticClass(fullName).asType.toType.erasure catch {
      case _: ScalaReflectionException => NoType
    }

  protected def analyzeTree(fun: PartialFunction[Tree, Unit])(tree: Tree): Unit =
    try fun.applyOrElse(tree, (_: Tree) => ()) catch {
      case NonFatal(t) =>
        val sw = new StringWriter
        t.printStackTrace(new PrintWriter(sw))
        reporter.error(tree.pos, s"Analyzer rule $this failed: " + sw.toString)
    }

  private def adjustMsg(msg: String): String = s"[AVS] $msg"

  protected def report(pos: Position, message: String): Unit =
    level match {
      case Level.Off =>
      case Level.Info => reporter.echo(pos, adjustMsg(message))
      case Level.Warn => reporter.warning(pos, adjustMsg(message))
      case Level.Error => reporter.error(pos, adjustMsg(message))
    }

  def analyze(unit: CompilationUnit): Unit

  override def toString: String =
    getClass.getSimpleName
}

sealed trait Level
object Level {
  case object Off extends Level
  case object Info extends Level
  case object Warn extends Level
  case object Error extends Level
} 
Example 103
Source File: MergeDeps.scala    From bazel-deps   with MIT License 5 votes vote down vote up
package com.github.johnynek.bazel_deps

import cats.data.{ NonEmptyList, Validated, ValidatedNel }
import cats.Foldable
import cats.implicits._
import io.circe.jawn.JawnParser
import java.io.{ File, PrintWriter }
import scala.util.{ Failure, Success }
import java.nio.file.Path

object MergeDeps {
  private def load(f: Path): ValidatedNel[String, Model] =
    FormatDeps.readModel(f.toFile) match {
      case Right(m) => Validated.valid(m)
      case Left(err) => Validated.invalidNel(err)
    }

  def fail(errs: NonEmptyList[String]): Nothing = {
    errs.toList.foreach(System.err.println)
    System.exit(1)
    sys.error("unreachable")
  }

  def apply(models: NonEmptyList[Path], out: Option[Path]): Unit = {

    type A[T] = ValidatedNel[String, T]
    val mod = models.traverse[A, Model](load).toEither.right.flatMap {
      Model.combine(_)
    }

    mod match {
      case Left(errs) => fail(errs)
      case Right(m) =>
        val stream = m.toDoc.renderStream(100)
        out match {
          case None => stream.foreach(System.out.print)
          case Some(path) =>
            val pw = new PrintWriter(path.toFile)
            stream.foreach(pw.print(_))
            pw.flush
            pw.close
        }
      }
  }

  def addDep(model: Path, lang: Language, coords: NonEmptyList[MavenCoordinate]): Unit =
    load(model) match {
      case Validated.Invalid(errs) => fail(errs)
      case Validated.Valid(m) =>
        val realLang = m.getOptions.replaceLang(lang)
        val deps = coords.map(realLang.unmangle(_).toDependencies(realLang))

        def combine(d1: Dependencies, d2: Dependencies): Either[NonEmptyList[String], Dependencies] =
          Dependencies.combine(m.getOptions.getVersionConflictPolicy, d1, d2).toEither

        type E[T] = Either[NonEmptyList[String], T]
        Foldable[NonEmptyList].foldM[E, Dependencies, Dependencies](deps, m.dependencies)(combine) match {
          case Left(errs) => fail(errs)
          case Right(resDep) =>
            val stream = m.copy(dependencies = resDep).toDoc.renderStream(100)
            val pw = new PrintWriter(model.toFile)
            stream.foreach(pw.print(_))
            pw.flush
            pw.close
        }
  }
} 
Example 104
Source File: FormatDeps.scala    From bazel-deps   with MIT License 5 votes vote down vote up
package com.github.johnynek.bazel_deps

import java.io.{ File, PrintWriter }
import io.circe.jawn.JawnParser
import scala.util.{ Failure, Success }

object FormatDeps {
  def readModel(path: File): Either[String, Model] = {
    val content: Either[String, String] =
      Model.readFile(path) match {
        case Success(str) => Right(str)
        case Failure(err) =>
          Left(s"[ERROR]: Failed to read ${path}.\n$err")
      }

    val parser = if (path.toString.endsWith(".json")) new JawnParser else Yaml
    content.right.flatMap { c =>
      Decoders.decodeModel(parser, c) match {
        case Right(m) => Right(m)
        case Left(err) => Left(s"[ERROR]: Failed to parse ${path}.\n$err")
      }
    }
  }

  def apply(path: File, overwrite: Boolean): Unit = {
    val model = readModel(path) match {
      case Left(msg) =>
        System.err.println(msg)
        System.exit(1)
        sys.error("unreachable")
      case Right(m) => m
    }

    val stream = model.toDoc.renderStreamTrim(100)
    if (overwrite) {
      val pw = new PrintWriter(path)
      stream.foreach(pw.print(_))
      pw.flush
      pw.close
    }
    else {
      stream.foreach(System.out.print(_))
    }
  }
} 
Example 105
Source File: FormatActor.scala    From scastie   with Apache License 2.0 5 votes vote down vote up
package com.olegych.scastie
package sbt

import api.{FormatRequest, FormatResponse, ScalaTarget}

import akka.actor.Actor

import org.scalafmt.{Scalafmt, Formatted}
import org.scalafmt.config.{ScalafmtConfig, ScalafmtRunner}

import org.slf4j.LoggerFactory

import java.io.{PrintWriter, StringWriter}

class FormatActor() extends Actor {
  private val log = LoggerFactory.getLogger(getClass)

  private def format(code: String, isWorksheetMode: Boolean, scalaTarget: ScalaTarget): Either[String, String] = {
    log.info(s"format (isWorksheetMode: $isWorksheetMode)")
    log.info(code)

    val config =
      if (isWorksheetMode && scalaTarget.hasWorksheetMode)
        ScalafmtConfig.default.copy(runner = ScalafmtRunner.sbt)
      else
        ScalafmtConfig.default

    Scalafmt.format(code, style = config) match {
      case Formatted.Success(formattedCode) => Right(formattedCode)
      case Formatted.Failure(failure) =>
        Left(failure.toString)
    }
  }

  override def receive: Receive = {
    case FormatRequest(code, isWorksheetMode, scalaTarget) =>
      sender ! FormatResponse(format(code, isWorksheetMode, scalaTarget))
  }
} 
Example 106
Source File: RuntimeError.scala    From scastie   with Apache License 2.0 5 votes vote down vote up
package com.olegych.scastie.api

import java.io.{PrintWriter, StringWriter}
import play.api.libs.json._

case class RuntimeError(
    message: String,
    line: Option[Int],
    fullStack: String
)

object RuntimeError {
  implicit val formatRuntimeError: OFormat[RuntimeError] =
    Json.format[RuntimeError]

  def wrap[T](in: => T): Either[Option[RuntimeError], T] = {
    try {
      Right(in)
    } catch {
      case ex: Exception =>
        Left(RuntimeError.fromThrowable(ex, fromScala = false))
    }
  }

  def fromThrowable(t: Throwable, fromScala: Boolean = true): Option[RuntimeError] = {
    def search(e: Throwable) = {
      e.getStackTrace
        .find(
          trace =>
            if (fromScala)
              trace.getFileName == "main.scala" && trace.getLineNumber != -1
            else true
        )
        .map(v => (e, Some(v.getLineNumber)))
    }
    def loop(e: Throwable): Option[(Throwable, Option[Int])] = {
      val s = search(e)
      if (s.isEmpty)
        if (e.getCause != null) loop(e.getCause)
        else Some((e, None))
      else s
    }

    loop(t).map {
      case (err, line) =>
        val errors = new StringWriter()
        t.printStackTrace(new PrintWriter(errors))
        val fullStack = errors.toString

        RuntimeError(err.toString, line, fullStack)
    }
  }
}

object RuntimeErrorWrap {
  implicit val formatRuntimeErrorWrap: OFormat[RuntimeErrorWrap] =
    Json.format[RuntimeErrorWrap]
}

case class RuntimeErrorWrap(error: Option[RuntimeError]) 
Example 107
Source File: RuntimeErrorLogger.scala    From scastie   with Apache License 2.0 5 votes vote down vote up
package com.olegych.scastie.sbtscastie

import java.io.{OutputStream, PrintWriter}

import com.olegych.scastie.api._
import org.apache.logging.log4j.core.LogEvent
import org.apache.logging.log4j.core.appender.AbstractAppender
import org.apache.logging.log4j.core.layout.PatternLayout
import org.apache.logging.log4j.message.ObjectMessage
import play.api.libs.json.Json
import sbt.Keys._
import sbt._
import sbt.internal.LogManager.suppressedMessage
import sbt.internal.util.MainAppender.defaultScreen
import sbt.internal.util.{ObjectEvent, TraceEvent}

object RuntimeErrorLogger {
  private object NoOp {
    def apply(): NoOp = {
      def out(in: String): Unit = {
        println(
          Json.stringify(
            Json.toJson[ConsoleOutput](
              ConsoleOutput.SbtOutput(ProcessOutput(in.trim, ProcessOutputType.StdOut, None))
            )
          )
        )
      }

      new NoOp(new OutputStream {
        override def close(): Unit = ()
        override def flush(): Unit = ()
        override def write(b: Array[Byte]): Unit =
          out(new String(b))
        override def write(b: Array[Byte], off: Int, len: Int): Unit =
          out(new String(b, off, len))
        override def write(b: Int): Unit = ()
      })
    }
  }
  private class NoOp(os: OutputStream) extends PrintWriter(os)

  private val clientLogger = new AbstractAppender("sbt-scastie-appender", null, PatternLayout.createDefaultLayout(), true, Array()) {
    def append(event: LogEvent): Unit = {
      //daaamn
      val throwable = Option(event.getThrown).orElse {
        for {
          e <- Option(event.getMessage).collect {
            case e: ObjectMessage => e
          }
          e <- Option(e.getParameter).collect {
            case e: ObjectEvent[_] => e
          }
          e <- Option(e.message).collect {
            case e: TraceEvent => e
          }
          //since worksheet wraps the code in object we unwrap it to display clearer message
          e <- Option(e.message).collect {
            case e: ExceptionInInitializerError if e.getCause != null && e.getCause.getStackTrace.headOption.exists { e =>
                  e.getClassName == Instrumentation.instrumentedObject + "$" && e.getMethodName == "<clinit>"
                } =>
              e.getCause
            case e => e
          }
        } yield e
      }
      throwable.foreach { throwable =>
        val error = RuntimeErrorWrap(RuntimeError.fromThrowable(throwable))
        println(Json.stringify(Json.toJson(error)))
      }
    }
    start()
  }

  val settings: Seq[sbt.Def.Setting[_]] = Seq(
    extraLoggers := { (key: ScopedKey[_]) =>
      Seq(clientLogger)
    },
    showSuccess := false,
    logManager := sbt.internal.LogManager.withLoggers(
      (task, state) => defaultScreen(ConsoleOut.printWriterOut(NoOp()), suppressedMessage(task, state)),
      relay = _ => clientLogger
    )
  )
} 
Example 108
Source File: InstrumentedInputs.scala    From scastie   with Apache License 2.0 5 votes vote down vote up
package com.olegych.scastie.instrumentation

import java.io.{PrintWriter, StringWriter}
import java.time.Instant

import com.olegych.scastie.api._

import scala.meta.parsers.Parsed

case class InstrumentationFailureReport(message: String, line: Option[Int]) {
  def toProgress(snippetId: SnippetId): SnippetProgress = {
    SnippetProgress.default.copy(
      ts = Some(Instant.now.toEpochMilli),
      snippetId = Some(snippetId),
      compilationInfos = List(Problem(Error, line, message))
    )
  }
}

object InstrumentedInputs {
  def apply(inputs0: Inputs): Either[InstrumentationFailureReport, InstrumentedInputs] = {
    if (inputs0.isWorksheetMode) {
      val instrumented = Instrument(inputs0.code, inputs0.target).map { instrumentedCode =>
        inputs0.copy(code = instrumentedCode)
      }

      instrumented match {
        case Right(inputs) =>
          success(inputs)

        case Left(error) =>
          import InstrumentationFailure._

          error match {
            case HasMainMethod =>
              Right(InstrumentedInputs(inputs0.copy(_isWorksheetMode = false), isForcedProgramMode = true))

            case UnsupportedDialect =>
              Left(InstrumentationFailureReport("This Scala target does not have a worksheet mode", None))

            case ParsingError(Parsed.Error(pos, message, _)) =>
              val lineOffset = Instrument.getParsingLineOffset(inputs0)
              Left(InstrumentationFailureReport(message, Some(pos.startLine + lineOffset)))

            case InternalError(exception) =>
              val errors = new StringWriter()
              exception.printStackTrace(new PrintWriter(errors))
              val fullStack = errors.toString

              Left(InstrumentationFailureReport(fullStack, None))
          }

      }
    } else {
      success(inputs0)
    }
  }

  private def success(inputs: Inputs): Either[InstrumentationFailureReport, InstrumentedInputs] = {
    Right(InstrumentedInputs(inputs, isForcedProgramMode = false))
  }
}

case class InstrumentedInputs(
    inputs: Inputs,
    isForcedProgramMode: Boolean
) 
Example 109
Source File: BfsStrategyStopWatchDecorator.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.bmcmt.search

import java.io.{FileWriter, PrintWriter, Writer}
import java.time.{Duration, LocalDateTime}

import at.forsyte.apalache.tla.bmcmt.search.SearchStrategy.{Finish, FinishOnDeadlock, NextStep}


class BfsStrategyStopWatchDecorator(strategy: SearchStrategy, filename: String) extends SearchStrategy {
  private var currentStep: Int = 0
  private var printWriter: Option[PrintWriter] = None
  private var startTime: LocalDateTime = LocalDateTime.now()

  override def getCommand: SearchStrategy.Command = {
    val command = strategy.getCommand
    command match {
      case NextStep(stepNo, _, _) =>
        if (stepNo == 0) {
          currentStep = 0
          // create a log file and add a header
          printWriter = Some(new PrintWriter(new FileWriter(filename, false)))
          printWriter.get.println("step,total_sec,nanosec_adjustment")
          // start the timer
          startTime = LocalDateTime.now()
        } else {
          appendCsvEntry()
          currentStep = stepNo
        }

      case Finish() | FinishOnDeadlock() =>
        appendCsvEntry()
        printWriter.get.close()
    }
    command
  }

  private def appendCsvEntry(): Unit = {
    val currentTime = LocalDateTime.now()
    val duration = Duration.between(startTime, currentTime)
    printWriter.get.println("%d,%d,%d".format(currentStep, duration.getSeconds, duration.getNano))
    printWriter.get.flush() // get the results as soon as possible
  }

  override def registerResponse(response: SearchStrategy.Response): Unit = {
    strategy.registerResponse(response)
  }
} 
Example 110
Source File: RuleStatLocator.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.bmcmt.profiler

import java.io.{BufferedWriter, FileWriter, PrintWriter}

import scala.collection.immutable.SortedMap


class RuleStatLocator {
  private var ruleStats: Map[String, RuleStat] = Map()

  def getRuleStat(ruleName: String): RuleStat = {
    ruleStats.get(ruleName) match {
      case Some(r) => r
      case None =>
        val newRule = new RuleStat(ruleName)
        ruleStats += ruleName -> newRule
        newRule
    }
  }

  def getStats = SortedMap(ruleStats.toSeq :_*)

  def writeStats(filename: String): Unit = {
    val writer = new PrintWriter(new FileWriter(filename, false))
    writer.println("Rule profiling statistics")
    val hrule = List.fill(80)('-').mkString
    writer.println(hrule)
    writer.println("%20s %9s %9s %9s %9s %9s"
      .format("name", "calls", "cells", "smt-consts", "smt-asserts", "smt-avg-size"))
    writer.println(hrule)
    val stats = ruleStats.values.toSeq.sortWith(_.nCalls > _.nCalls)
    for (rs <- stats) {
      writer.println("%-20s %9d %9d %9d %9d %9d"
        .format(rs.ruleName, rs.nCalls, rs.nCellsSelf, rs.nSmtConstsSelf, rs.nSmtAssertsSelf, rs.smtAssertsSizeAvg))
    }
    writer.close()
  }
} 
Example 111
Source File: RewriterBase.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.bmcmt

import java.io.{PrintWriter, StringWriter}

import at.forsyte.apalache.tla.bmcmt.types.eager.TrivialTypeFinder
import at.forsyte.apalache.tla.lir.convenience.tla
import org.scalatest.{BeforeAndAfterEach, FunSuite}

class RewriterBase extends FunSuite with BeforeAndAfterEach {
  protected var solverContext: SolverContext = new PreproSolverContext(new Z3SolverContext())
  protected var arena: Arena = Arena.create(solverContext)

  override def beforeEach() {
    solverContext = new PreproSolverContext(new Z3SolverContext(debug = true))
    arena = Arena.create(solverContext)
  }

  override def afterEach() {
    solverContext.dispose()
  }

  protected def create(): SymbStateRewriterAuto = {
    new SymbStateRewriterAuto(solverContext)
  }

  protected def createWithoutCache(): SymbStateRewriter = {
    new SymbStateRewriterImpl(solverContext, new TrivialTypeFinder())
  }

  protected def assertUnsatOrExplain(rewriter: SymbStateRewriter, state: SymbState): Unit = {
    assertOrExplain("UNSAT", rewriter, state, !solverContext.sat())
  }

  protected def assumeTlaEx(rewriter: SymbStateRewriter, state: SymbState): SymbState = {
    val nextState = rewriter.rewriteUntilDone(state.setTheory(BoolTheory()))
    solverContext.assertGroundExpr(nextState.ex)
    assert(solverContext.sat())
    nextState
  }

  protected def assertTlaExAndRestore(rewriter: SymbStateRewriter, state: SymbState): Unit = {
    rewriter.push()
    val nextState = rewriter.rewriteUntilDone(state.setTheory(BoolTheory()))
    assert(solverContext.sat())
    rewriter.push()
    solverContext.assertGroundExpr(nextState.ex)
    assert(solverContext.sat())
    rewriter.pop()
    rewriter.push()
    solverContext.assertGroundExpr(tla.not(nextState.ex))
    assertUnsatOrExplain(rewriter, nextState)
    rewriter.pop()
    rewriter.pop()
  }

  private def assertOrExplain(msg: String, rewriter: SymbStateRewriter,
                              state: SymbState, outcome: Boolean): Unit = {
    if (!outcome) {
      val writer = new StringWriter()
      new SymbStateDecoder(solverContext, rewriter).dumpArena(state, new PrintWriter(writer))
      solverContext.log(writer.getBuffer.toString)
      solverContext.push() // push and pop flush the log output
      solverContext.pop()
      fail("Expected %s, check log.smt for explanation".format(msg))
    }

  }
} 
Example 112
Source File: ThrowableImplicits.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.utils

import java.io.{PrintWriter, StringWriter}

import scala.language.implicitConversions

object ThrowableImplicits {
  implicit def Throwable2StackTraceString(ex: Throwable): String = {
    val sw = new StringWriter()
    val pw = new PrintWriter(sw)
    ex.printStackTrace(pw)
    sw.toString
  }

  implicit class PrintableThrowable(ex: Throwable) {
    def printableStackTrace: String = Throwable2StackTraceString(ex)
  }

} 
Example 113
Source File: DagrScriptManagerTest.scala    From dagr   with MIT License 5 votes vote down vote up
package dagr.core.cmdline

import java.io.PrintWriter
import java.nio.file.{Files, Path}

import com.fulcrumgenomics.commons.CommonsDef._
import com.fulcrumgenomics.commons.util.ClassFinder
import dagr.core.UnitSpec
import dagr.core.tasksystem.Pipeline
import org.reflections.util.ClasspathHelper

object DagrScriptManagerTest {
  def packageName: String = "dagr.example"

  def helloWorldScript(packageName: String = packageName) =
    s"""
      |package ${packageName}
      |
      |import com.fulcrumgenomics.sopt._
      |import dagr.core.tasksystem.Pipeline
      |
      |// NB: output directories must exist
      |@clp(description = "Hello World Pipeline")
      |class HelloWorldPipeline
      |(
      |  @arg(doc = "First set of text")
      |  var blockOne: List[String] = Nil,
      |  @arg(doc = "Second set of text")
      |  var blockTwo: List[String] = Nil
      |) extends Pipeline {
      |
      |  override def build(): Unit = {
      |      println("blockOne: " + blockOne.mkString("\\n"))
      |      println("blockTwo: " + blockTwo.mkString("\\n"))
      |  }
      |}
    """.stripMargin

  val buggyScript =
    """
      |package dagr.example
      |
      | Bug Bug Bug
      | Bug Bug Bug
      | BUg Bug Bug
      |
      | class flarfle extends noodle ()
    """.stripMargin

  def writeScript(content: String): (Path, Path) = {
    val tmpDir: Path = Files.createTempDirectory("dagrScriptDirectory")
    val tmpFile: Path = Files.createTempFile(tmpDir, "dagrScript", ".dagr")
    tmpDir.toFile.deleteOnExit()
    tmpFile.toFile.deleteOnExit()

    // write the script
    val writer = new PrintWriter(tmpFile.toFile)
    writer.println(content)
    writer.close()

    (tmpDir, tmpFile)
  }
}

class DagrScriptManagerTest extends UnitSpec {
  import DagrScriptManagerTest._

  "DagrScriptManager" should "compile and load a Dagr script" in {
    val (tmpDir: Path, tmpFile: Path) = writeScript(helloWorldScript())

    val manager = new DagrScriptManager
    manager.loadScripts(Seq(tmpFile), tmpDir)

    // make sure tmpDir is not on the classpath
    ClasspathHelper.forManifest.iterator.toSet.exists(url => url.toString.contains(tmpDir.toString)) shouldBe true

    // make sure we find the class in the classpath
    val classFinder: ClassFinder = new ClassFinder
    classFinder.find("dagr.example", classOf[Pipeline])
    classFinder
      .getClasses
      .iterator
      .map(_.getCanonicalName)
      .exists(name => 0 == name.compareTo("dagr.example.HelloWorldPipeline")) shouldBe true

  }

  it should "fail to compile a buggy Dagr script" in {
    val (tmpDir: Path, tmpFile: Path) = writeScript(buggyScript)

    val manager = new DagrScriptManager
    an[RuntimeException] should be thrownBy manager.loadScripts(Seq(tmpFile), tmpDir)

    // make sure tmpDir is not on the classpath
    ClasspathHelper.forManifest.iterator.exists(url => url.toString.contains(tmpDir.toString)) shouldBe false
  }
} 
Example 114
Source File: ControlledFacesSettings.scala    From parametric-face-image-generator   with Apache License 2.0 5 votes vote down vote up
package faces.settings

import java.io.{File, PrintWriter}

import scala.io.Source

case class ControlledFacesSettings(
                                    override val general: General,
                                    override val outputLocation: OutputLocation,
                                    override val backgrounds: Backgrounds,
                                    override val renderingMethods: RenderingMethods,
                                    override val morphableModelParameters: MorphableModelParameters,
                                    override val imageDimensions: ImageDimensions,
                                    override val defaultParameters: DefaultParameters,
                                    override val landmarkTags: IndexedSeq[String],
                                    override val regionMaps: IndexedSeq[TextureMappedPropertyDescription],
                                    illuminationVariation: ControlledIlluminationVariation,
                                    poseVariation: ControlledPoseVariation,
                                    backgroundVariation: ControlledBackgroundVariation
                                  ) extends FacesSettings {
}


object ControlledFacesSettings {

  import ControlledFacesSettingsJsonFormatV1._
  import spray.json._

  def write(setting:ControlledFacesSettings, file: File): Unit = {

    val writer = new PrintWriter(file)
    writer.println(setting.toJson prettyPrint)
    writer.flush()
    writer.close()
  }

  def read(file: File): ControlledFacesSettings = {
    val source = Source.fromFile(file)
    read(source)
  }

  def read(source: Source): ControlledFacesSettings = {

    val content = try source.getLines mkString "\n" finally source.close()
    content.parseJson.convertTo[ControlledFacesSettings]
  }

} 
Example 115
Source File: RandomFacesSettings.scala    From parametric-face-image-generator   with Apache License 2.0 5 votes vote down vote up
package faces.settings

import java.io.{File, PrintWriter}

import scala.io.Source

case class RandomFacesSettings(  override val general: General,
                                 override val outputLocation: OutputLocation,
                                 override val backgrounds: Backgrounds,
                                 override val renderingMethods: RenderingMethods,
                                 override val morphableModelParameters: MorphableModelParameters,
                                 override val imageDimensions: ImageDimensions,
                                 override val defaultParameters: DefaultParameters,
                                 override val landmarkTags: IndexedSeq[String],
                                 override val regionMaps: IndexedSeq[TextureMappedPropertyDescription],
                                 illuminationParameters: IlluminationParameters,
                                 poseVariation: RandomPoseVariation
                              ) extends FacesSettings {
}

object RandomFacesSettings  {
  import RandomFacesSettingsJsonFormatV1._
  import spray.json._

  def write(setting: RandomFacesSettings, file: File): Unit = {

    val writer = new PrintWriter(file)
    writer.println(setting.toJson prettyPrint)
    writer.flush()
    writer.close()
  }

  def read(file: File): RandomFacesSettings = {
    val source = Source.fromFile(file)
    read(source)
  }

  def read(source: Source): RandomFacesSettings = {

    val content = try source.getLines mkString "\n" finally source.close()
    content.parseJson.convertTo[RandomFacesSettings]
  }

} 
Example 116
Source File: VPCWriter.scala    From cloudformation-template-generator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.monsanto.arch.cloudformation.model

import java.io.{File, PrintWriter}

import spray.json._



trait VPCWriter {
  def jsonToFile[T : JsonWriter](fileName: String, subDir: String, jsObj: T) {
    val json = jsObj.toJson
    val filePath = new File("target/" + subDir + "/" + fileName)
    filePath.getParentFile.mkdirs()
    val printWriter = new PrintWriter(filePath)
    printWriter.print(json.prettyPrint)
    printWriter.close()
  }

  def writeStaxModule(fileName: String, template: Template) {
    jsonToFile(fileName, "template", template)
    jsonToFile(fileName, "config", InputParameter.templateParameterToInputParameter(template.Parameters))
  }
} 
Example 117
Source File: ExceptionStacktraceToString.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.util.exceptions

import java.io.{PrintWriter, StringWriter}

object ExceptionStacktraceToString {

  implicit class ThrowableStacktraceExtension(t: Throwable) {
    def stackTraceAsString: String = ExceptionStacktraceToString(t)
  }

  def apply(t: Throwable): String = {
    val sw = new StringWriter()
    val pw = new PrintWriter(sw)
    t.printStackTrace(pw)
    sw.toString()
  }
} 
Example 118
Source File: BitcoinSpout.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.examples.blockchain.spouts

import java.io.File
import java.io.PrintWriter

import com.raphtory.core.components.Spout.SpoutTrait
import com.raphtory.examples.blockchain.BitcoinTransaction
import scalaj.http.Http
import scalaj.http.HttpRequest
import spray.json._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.sys.process._

class BitcoinSpout extends SpoutTrait {

  var blockcount    = 1
  val rpcuser       = System.getenv().getOrDefault("BITCOIN_USERNAME", "").trim
  val rpcpassword   = System.getenv().getOrDefault("BITCOIN_PASSWORD", "").trim
  val serverAddress = System.getenv().getOrDefault("BITCOIN_NODE", "").trim
  val id            = "scala-jsonrpc"
  val baseRequest   = Http(serverAddress).auth(rpcuser, rpcpassword).header("content-type", "text/plain")

  //************* MESSAGE HANDLING BLOCK
  override def ProcessSpoutTask(message: Any): Unit = message match {
    case StartSpout => AllocateSpoutTask(Duration(1, MILLISECONDS), "parseBlock")
    case "parseBlock" =>
      try {
        getTransactions()
        blockcount += 1
        AllocateSpoutTask(Duration(1, MILLISECONDS), "parseBlock")
      } catch {
        case e: java.net.SocketTimeoutException => AllocateSpoutTask(Duration(1, MILLISECONDS), "parseBlock")
      }
    case _ => println("message not recognized!")
  }

  def outputScript() = {
    val pw = new PrintWriter(new File("bitcoin.sh"))
    pw.write("""curl --user $1:$2 --data-binary $3 -H 'content-type: text/plain;' $4""")
    pw.close
    "chmod 777 bitcoin.sh" !
  }

  def curlRequest(command: String, params: String): String = {
    //val data = """{"jsonrpc":"1.0","id":"scala-jsonrpc","method":"getblockhash","params":[2]}"""
    val data = s"""{"jsonrpc":"1.0","id":"$id","method":"$command","params":[$params]}"""
    s"bash bitcoin.sh $rpcuser $rpcpassword $data $serverAddress" !!
  }

  def request(command: String, params: String = ""): HttpRequest =
    baseRequest.postData(s"""{"jsonrpc": "1.0", "id":"$id", "method": "$command", "params": [$params] }""")

  def getTransactions(): Unit = {
    val re        = request("getblockhash", blockcount.toString).execute().body.toString.parseJson.asJsObject
    val blockID   = re.fields("result")
    val blockData = request("getblock", s"$blockID,2").execute().body.toString.parseJson.asJsObject
    val result    = blockData.fields("result")
    val time      = result.asJsObject.fields("time")
    for (transaction <- result.asJsObject().fields("tx").asInstanceOf[JsArray].elements)
      sendTuple(BitcoinTransaction(time, blockcount, blockID, transaction))
    //val time = transaction.asJsObject.fields("time")

  }

}
//def request(command: String, params: String = ""): HttpRequest = baseRequest.postData(s"""{"jsonrpc": "1.0", "id":"$id", "method": "$command", "params": [$params] }""") 
Example 119
Source File: InvokeMigrationHandler.scala    From flyway-awslambda   with MIT License 5 votes vote down vote up
package crossroad0201.aws.flywaylambda

import java.io.{BufferedOutputStream, InputStream, OutputStream, PrintWriter}

import com.amazonaws.regions.{Region, Regions}
import com.amazonaws.services.lambda.runtime.{Context, RequestStreamHandler}
import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client}

import scala.io.{BufferedSource, Codec}
import scala.util.{Failure, Success, Try}

class InvokeMigrationHandler extends RequestStreamHandler with S3MigrationHandlerBase {
  type BucketName = String
  type Prefix = String
  type ConfFileName = String

  override def handleRequest(input: InputStream, output: OutputStream, context: Context): Unit = {
    def parseInput: Try[(BucketName, Prefix, ConfFileName)] = Try {
      import spray.json._
      import DefaultJsonProtocol._

      val json = new BufferedSource(input)(Codec("UTF-8")).mkString
      val jsObj = JsonParser(json).toJson.asJsObject
      jsObj.getFields(
        "bucket_name",
        "prefix"
      ) match {
        case Seq(JsString(b), JsString(p)) => {
          jsObj.getFields(
            "flyway_conf"
          ) match {
            case Seq(JsString(c)) => (b, p, c)
            case _ => (b, p, "flyway.conf")
          }
        }
        case _ => throw new IllegalArgumentException(s"Missing require key [bucketName, prefix]. - $json")
      }
    }

    val logger = context.getLogger

    implicit val s3Client: AmazonS3 = new AmazonS3Client().withRegion(Region.getRegion(Regions.fromName(sys.env("AWS_REGION"))))

    (for {
      i <- parseInput
      _ = { logger.log(s"Flyway migration start. by invoke lambda function(${i._1}, ${i._2}, ${i._3}).") }
      r <- migrate(i._1, i._2, i._3)(context, s3Client)
    } yield r) match {
      case Success(r) =>
        logger.log(r)
        val b = r.getBytes("UTF-8")
        val bout = new BufferedOutputStream(output)
        Stream.continually(bout.write(b))
        bout.flush()
      case Failure(e) =>
        e.printStackTrace()
        val w = new PrintWriter(output)
        w.write(e.toString)
        w.flush()
    }
  }

} 
Example 120
Source File: PrintCreateStatements.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra

import java.io.File
import java.io.PrintWriter
import akka.actor.ActorSystem


object PrintCreateStatements {

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("PrintCreateStatements")
    val statements = new KeyspaceAndTableStatements(system, "akka.persistence.cassandra", PluginSettings(system))

    def withWriter(name: String)(f: PrintWriter => Unit): Unit = {
      val writer: PrintWriter = new PrintWriter(new File(name))
      try {
        f(writer)
      } finally {
        writer.flush()
        writer.close()
      }

    }

    withWriter("./target/journal-keyspace.txt") { pw =>
      pw.println("//#journal-keyspace")
      pw.println(statements.createJournalKeyspaceStatement + ";")
      pw.println("//#journal-keyspace")
    }

    withWriter("./target/journal-tables.txt") { pw =>
      pw.println("//#journal-tables")
      pw.println(statements.createJournalTablesStatements.mkString(";\n\n") + ";")
      pw.println("//#journal-tables")
    }

    withWriter("./target/snapshot-keyspace.txt") { pw =>
      pw.println("//#snapshot-keyspace")
      pw.println(statements.createSnapshotKeyspaceStatement + ";")
      pw.println("//#snapshot-keyspace")
    }
    withWriter("./target/snapshot-tables.txt") { pw =>
      pw.println("//#snapshot-tables")
      pw.println(statements.createSnapshotTablesStatements.mkString(";\n\n") + ";")
      pw.println("//#snapshot-tables")
    }

    system.terminate()
  }

} 
Example 121
Source File: CliLogger.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.jnsaf.native_statistics

import java.io.{File, FileWriter, PrintWriter}
import java.text.SimpleDateFormat
import java.util.Date

 
object CliLogger {
  
  def timeStamp = new SimpleDateFormat("yyyyMMdd-HHmmss").format(new Date)
  
  def outPrint(s : String) {
    scala.Console.out.print(s)
    scala.Console.out.flush()
  }

  def outPrintln(s : String) {
    scala.Console.out.println(s)
    scala.Console.out.flush()
  }

  def outPrintln() {
    scala.Console.out.println()
    scala.Console.out.flush()
  }

  def errPrintln(s : String) {
    scala.Console.err.println(s)
    scala.Console.err.flush()
  }

  def errPrintln() {
    scala.Console.err.println()
    scala.Console.err.flush()
  }
  
  def logError(dir: File, text: String, e: Throwable) {
    outPrintln()
    errPrintln(text + e.getMessage)
    val f = new File(dir, ".errorlog")
    f.getParentFile.mkdirs
    val fw = new FileWriter(f)
    try {
      val pw = new PrintWriter(fw)
      pw.println("An error occurred on " + timeStamp)
      e.printStackTrace(pw)
      fw.close()
      outPrintln("Written: " + f.getAbsolutePath)
    } catch {
      case e : Throwable =>
        errPrintln("Error: " + e.getMessage)
    }
  }
} 
Example 122
Source File: TaintAnalysisTask.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.amandroid.plugin

import java.io.PrintWriter

import org.argus.amandroid.alir.componentSummary.{ApkYard, ComponentBasedAnalysis}
import org.argus.amandroid.alir.dataRecorder.DataCollector
import org.argus.amandroid.alir.pta.model.AndroidModelCallHandler
import org.argus.amandroid.alir.pta.summaryBasedAnalysis.AndroidSummaryProvider
import org.argus.amandroid.alir.taintAnalysis.DataLeakageAndroidSourceAndSinkManager
import org.argus.amandroid.core.{AndroidGlobalConfig, ApkGlobal}
import org.argus.amandroid.core.decompile.{DecompileLayout, DecompileStrategy, DecompilerSettings}
import org.argus.amandroid.plugin.communication.CommunicationSourceAndSinkManager
import org.argus.amandroid.plugin.dataInjection.IntentInjectionSourceAndSinkManager
import org.argus.amandroid.plugin.oauth.OAuthSourceAndSinkManager
import org.argus.amandroid.plugin.password.PasswordSourceAndSinkManager
import org.argus.jawa.flow.taintAnalysis.TaintAnalysisResult
import org.argus.jawa.core.io.Reporter
import org.argus.jawa.core.util.FileUtil
import org.argus.jawa.core.util._
import org.argus.jawa.flow.summary.store.TaintStore
import org.argus.jawa.flow.summary.taint.BottomUpTaintAnalysis

import scala.concurrent.duration._
import scala.language.postfixOps

object TaintAnalysisApproach extends Enumeration {
  val COMPONENT_BASED, BOTTOM_UP = Value
}

case class TaintAnalysisTask(module: TaintAnalysisModules.Value, fileUris: ISet[(FileResourceUri, FileResourceUri)], forceDelete: Boolean, reporter: Reporter, guessPackage: Boolean, approach: TaintAnalysisApproach.Value) {
  import TaintAnalysisModules._
//  private final val TITLE = "TaintAnalysisTask"
  def run: Option[TaintAnalysisResult] = {
    val yard = new ApkYard(reporter)
    val apks = fileUris.map{ case (apkUri, outputUri) =>
      val layout = DecompileLayout(outputUri)
      val strategy = DecompileStrategy(layout)
      val settings = DecompilerSettings(debugMode = false, forceDelete = forceDelete, strategy, reporter)
      yard.loadApk(apkUri, settings, collectInfo = true, resolveCallBack = true, guessPackage)
    }
    val ssm = module match {
      case INTENT_INJECTION =>
        new IntentInjectionSourceAndSinkManager(AndroidGlobalConfig.settings.injection_sas_file)
      case PASSWORD_TRACKING =>
        new PasswordSourceAndSinkManager(AndroidGlobalConfig.settings.sas_file)
      case OAUTH_TOKEN_TRACKING =>
        new OAuthSourceAndSinkManager(AndroidGlobalConfig.settings.sas_file)
      case DATA_LEAKAGE =>
        new DataLeakageAndroidSourceAndSinkManager(AndroidGlobalConfig.settings.sas_file)
      case COMMUNICATION_LEAKAGE =>
        new CommunicationSourceAndSinkManager(AndroidGlobalConfig.settings.sas_file)
    }
    approach match {
      case TaintAnalysisApproach.BOTTOM_UP =>
        var tar: Option[TaintStore] = None
        apks.foreach { apk =>
          val ta = new BottomUpTaintAnalysis[ApkGlobal](apk, new AndroidSummaryProvider(apk), new AndroidModelCallHandler, ssm, reporter)
          val eps = apk.model.getEnvMap.map(_._2._1).toSet
          val taintMap = ta.process(eps)
          taintMap.foreach { case (_, t) =>
            tar match {
              case Some(ts) =>
                ts.merge(t)
              case None =>
                tar = Some(t)
                apk.addTaintAnalysisResult(t)
            }
          }
        }
        writeResult(apks)
        tar
      case TaintAnalysisApproach.COMPONENT_BASED =>
        ComponentBasedAnalysis.prepare(apks)(AndroidGlobalConfig.settings.timeout minutes)
        val cba = new ComponentBasedAnalysis(yard)
        cba.phase1(apks)
        val iddResult = cba.phase2(apks)
        val tar = cba.phase3(iddResult, ssm)
        writeResult(apks)
        tar
    }
  }

  private def writeResult(apks: ISet[ApkGlobal]): Unit = {
    apks.foreach { apk =>
      val appData = DataCollector.collect(apk)
      val outputDirUri = FileUtil.appendFileName(apk.model.layout.outputSrcUri, "result")
      val outputDir = FileUtil.toFile(outputDirUri)
      if (!outputDir.exists()) outputDir.mkdirs()
      val out = new PrintWriter(FileUtil.toFile(FileUtil.appendFileName(outputDirUri, "AppData.txt")))
      out.print(appData.toString)
      out.close()
    }
  }
} 
Example 123
Source File: FixResources.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.amandroid.core.util

import org.argus.jawa.core.util._
import java.io.PrintWriter

import org.argus.amandroid.core.dedex.JawaDeDex
import org.argus.amandroid.core.parser.ManifestParser
import org.argus.jawa.core.util.FileUtil


object FixResources {
  def fix(decFolder: FileResourceUri, dedex: JawaDeDex): Unit = {
    val xml = FileUtil.appendFileName(decFolder, "AndroidManifest.xml")
    if(dedex.haveRenamedElements) {
      var filestr = FileUtil.readFileContent(xml)
      val (pkg, recs) = ManifestParser.loadPackageAndComponentNames(xml)
      val newpkg = dedex.mapPackage(pkg)
      filestr = filestr.replaceAll("\"" + pkg + "\"", "\"" + newpkg + "\"")
      recs.foreach {
        case (origstr, comclass) =>
          val newclass = dedex.mapRecord(comclass)
          filestr = filestr.replaceAll("\"" + origstr + "\"", "\"" + newclass + "\"")
      }
      val pw = new PrintWriter(FileUtil.toFile(xml))
      pw.write(filestr)
      pw.flush()
      pw.close()
    }
  }
} 
Example 124
Source File: CliLogger.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.saf.cli.util

import java.io.{File, FileWriter, PrintWriter}
import java.text.SimpleDateFormat
import java.util.Date

 
object CliLogger {
  
  def timeStamp: String = new SimpleDateFormat("yyyyMMdd-HHmmss").format(new Date)
  
  def outPrint(s : String) {
    scala.Console.out.print(s)
    scala.Console.out.flush()
  }

  def outPrintln(s : String) {
    scala.Console.out.println(s)
    scala.Console.out.flush()
  }

  def outPrintln() {
    scala.Console.out.println()
    scala.Console.out.flush()
  }

  def errPrintln(s : String) {
    scala.Console.err.println(s)
    scala.Console.err.flush()
  }

  def errPrintln() {
    scala.Console.err.println()
    scala.Console.err.flush()
  }
  
  def logError(dir: File, text: String, e: Throwable) {
    outPrintln()
    errPrintln(text + e.getMessage)
    val f = new File(dir, ".errorlog")
    f.getParentFile.mkdirs
    val fw = new FileWriter(f)
    try {
      val pw = new PrintWriter(fw)
      pw.println("An error occurred on " + timeStamp)
      e.printStackTrace(pw)
      fw.close()
      outPrintln("Written: " + f.getAbsolutePath)
    } catch {
      case e : Throwable =>
        errPrintln("Error: " + e.getMessage)
    }
  }
} 
Example 125
Source File: Runner.scala    From doobie-codegen   with MIT License 5 votes vote down vote up
package mdmoss.doobiegen

import java.io.{File, PrintWriter}
import java.nio.file.Paths

import mdmoss.doobiegen.GenOptions.{GenOption, Ignore}
import mdmoss.doobiegen.StatementTypes.Statement
import mdmoss.doobiegen.output.SourceWriter
import org.parboiled2.ParseError

import scala.collection.mutable.ListBuffer
import scala.util.{Failure, Success, Try}

object Runner {

  sealed trait TestDBSource
  case class TestDatabase(driver: String, url: String, username: String, password: String) extends TestDBSource
  case class InsertString(source: String) extends TestDBSource

  sealed trait TargetVersion

  object TargetVersion {
    case object DoobieV023 extends TargetVersion
    case object DoobieV024 extends TargetVersion
    case object DoobieV030 extends TargetVersion
    case object DoobieV04 extends TargetVersion
  }

  case class Target(
    schemaDir: String,
    testDb: TestDBSource,
    src: String,
    `package`: String,
    statements: Option[Map[String, List[Statement]]],
    columnOptions: Map[String, Map[String, List[GenOption]]],
    quiet: Boolean = false,
    targetVersion: TargetVersion = TargetVersion.DoobieV023,
    // This is mainly an override for testing
    tableSpecificStatements: Map[String, List[Statement]]
  ) {

    def enclosingPackage = `package`.split('.').reverse.headOption
  }

  object Target {
    def apply(
      schemaDir: String,
      testDb: TestDatabase,
      src: String,
      `package`: String
    ): Target = Target(
      schemaDir = schemaDir,
      testDb = testDb,
      src = src,
      `package` = `package`,
      statements = None,
      columnOptions = Map(),
      quiet = false,
      targetVersion = TargetVersion.DoobieV023,
      tableSpecificStatements = Map.empty[String, List[Statement]]
    )
  }

  val Default = Target(
    schemaDir = "sql/",
    TestDatabase(
      "org.postgresql.Driver",
      "jdbc:postgresql:gen",
      "test",
      "test"
    ),
    src = "out/src",
    `package` = "mdmoss.doobiegen.db"
  )

  def run(target: Target) = {
    
        destDir.toFile.getParentFile.listFiles()
          .filter(_.isDirectory)
          .filter(_.toPath.endsWith("gen"))
          .foreach(delete)
      }
    }
  }

  def delete(file: File): Unit = {
    if (file.isDirectory) {
      file.listFiles().foreach(delete)
      file.delete()
    } else {
      file.delete()
    }
  }

  val Seperator = "*" * 80
} 
Example 126
Source File: Compile.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail.expr.ir

import java.io.PrintWriter

import is.hail.annotations._
import is.hail.asm4s._
import is.hail.expr.ir.agg.AggStateSig
import is.hail.expr.ir.lowering.LoweringPipeline
import is.hail.types.physical.PType
import is.hail.types.virtual.Type
import is.hail.utils._

case class CodeCacheKey(aggSigs: IndexedSeq[AggStateSig], args: Seq[(String, PType)], body: IR)

case class CodeCacheValue(typ: PType, f: (Int, Region) => Any)

object Compile {
  private[this] val codeCache: Cache[CodeCacheKey, CodeCacheValue] = new Cache(50)

  def apply[F: TypeInfo](
    ctx: ExecuteContext,
    params: IndexedSeq[(String, PType)],
    expectedCodeParamTypes: IndexedSeq[TypeInfo[_]], expectedCodeReturnType: TypeInfo[_],
    body: IR,
    optimize: Boolean = true,
    print: Option[PrintWriter] = None
  ): (PType, (Int, Region) => F) = {

    val normalizeNames = new NormalizeNames(_.toString)
    val normalizedBody = normalizeNames(body,
      Env(params.map { case (n, _) => n -> n }: _*))
    val k = CodeCacheKey(FastIndexedSeq[AggStateSig](), params.map { case (n, pt) => (n, pt) }, normalizedBody)
    codeCache.get(k) match {
      case Some(v) =>
        return (v.typ, v.f.asInstanceOf[(Int, Region) => F])
      case None =>
    }

    var ir = body
    ir = Subst(ir, BindingEnv(params
      .zipWithIndex
      .foldLeft(Env.empty[IR]) { case (e, ((n, t), i)) => e.bind(n, In(i, t)) }))
    ir = LoweringPipeline.compileLowerer(optimize).apply(ctx, ir).asInstanceOf[IR].noSharing

    TypeCheck(ir, BindingEnv.empty)
    InferPType(ir)
    val returnType = ir.pType

    val fb = EmitFunctionBuilder[F](ctx, "Compiled",
      CodeParamType(typeInfo[Region]) +: params.map { case (_, pt) =>
        EmitParamType(pt)
      }, returnType)

    

    Emit(ctx, ir, fb, Some(aggSigs))

    val f = fb.resultWithIndex()
    codeCache += k -> CodeCacheValue(ir.pType, f)
    (ir.pType, f.asInstanceOf[(Int, Region) => (F with FunctionWithAggRegion)])
  }
} 
Example 127
Source File: LogTestListener.scala    From hail   with MIT License 5 votes vote down vote up
package is.hail

import java.io.{PrintWriter, StringWriter}

import is.hail.utils._
import org.testng.{ITestContext, ITestListener, ITestResult}

class LogTestListener extends ITestListener {
  def testString(result: ITestResult): String = {
    s"${ result.getTestClass.getName }.${ result.getMethod.getMethodName }"
  }

  def onTestStart(result: ITestResult) {
    info(s"starting test ${ testString(result) }...")
  }

  def onTestSuccess(result: ITestResult) {
    info(s"test ${ testString(result) } SUCCESS")
  }

  def onTestFailure(result: ITestResult) {
    val cause = result.getThrowable
    if (cause != null) {
      val sw = new StringWriter()
      val pw = new PrintWriter(sw)
      cause.printStackTrace(pw)
      info(s"Exception:\n$sw")
    }
    info(s"test ${ testString(result) } FAILURE\n")
  }

  def onTestSkipped(result: ITestResult) {
    info(s"test ${ testString(result) } SKIPPED")
  }

  def onTestFailedButWithinSuccessPercentage(result: ITestResult) {

  }

  def onStart(context: ITestContext) {

  }

  def onFinish(context: ITestContext) {

  }
} 
Example 128
Source File: DockerRunAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.docker

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.{DevClusterProcess, GeneralConstants, GeneralUtilities}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.{Process, ProcessLogger}


object DockerRunAction {

  private val logger = LoggerFactory.getLogger(classOf[DockerBuildAction])

  private val processLogger = ProcessLogger(
    (e: String) => logger.info("err " + e))

  private val ports = new ListBuffer[String]

  def apply(hosts: Map[String, String],
            image: String,
            mountDir: Option[String] = None,
            command: Option[String] = None,
            ports: Array[PortMeta],
            conf: Map[String, String],
            background: Boolean = false) : Option[String] = {
    val sb = new StringBuilder("docker run -P")
    if (background) {
      sb.append(" -d")
    } else {
      sb.append(" -it")
    }
    hosts.foreach {
      case (ip, name) => sb.append(s" --add-host=$name:$ip")
    }

    ports.foreach(p => {
      if (p.exposedPort.isDefined) {
        sb.append(s" -p ${p.exposedPort.get}:${p.port}")
      } else {
        sb.append(s" -p ${p.port}")
      }
    })

    if (mountDir.isDefined) {
      sb.append(s" -v ${mountDir.get}")
      sb.append(":/mount")
    }

    sb.append(s" $image")

    if (command.isDefined) {
      sb.append(s" ${command.get}")
    }

    val commandString = sb.toString
    println(commandString)

    if (background) {
      val output = DevClusterProcess.process(sb.toString).!!.stripLineEnd
      Some(output)
    } else {
      //write command to execute later (in dev-cluster script)
      DockerUtilities.writeDockerCommand(commandString)
      None
    }
  }
} 
Example 129
Source File: ThreadPoolSchedulerProvider.scala    From scala-game-library   with MIT License 5 votes vote down vote up
package sgl.util

import java.util.concurrent.Executors
import java.io.{StringWriter, PrintWriter}
import scala.collection.mutable.Queue

trait ThreadPoolSchedulerProvider extends SchedulerProvider {
  this: LoggingProvider =>

  private implicit val Tag = Logger.Tag("threadpool-scheduler")

  class ThreadPoolScheduler extends Scheduler {
    private val pool = Executors.newFixedThreadPool(4)

    private val tasks: Queue[ChunkedTask] = new Queue
    private val taskQueueLock = new Object

    private var r1: ChunksRunner = null
    private var r2: ChunksRunner = null
    private var r3: ChunksRunner = null
    private var r4: ChunksRunner = null

    override def schedule(task: ChunkedTask): Unit = {
      taskQueueLock.synchronized {
        tasks.enqueue(task)
      }
    }

    
    def shutdown(): Unit = {
      pool.shutdown()

      // Need to check for null because we could have skipped resume.
      if(r1 != null)
        r1.shouldStop = true
      if(r2 != null)
        r2.shouldStop = true
      if(r3 != null)
        r3.shouldStop = true
      if(r4 != null)
        r4.shouldStop = true
    }

    // Simple Runnable class that picks up the first available ChunkedTask and
    // run one chunk of it.
    // Note that if there is only one ChunkedTask in the queue, there will only
    // be one busy Thread at a time as ChunkedTask are assumed to be sequentials.
    // In order to optimize the use of the thread pool, one should try to split
    // parallel work into several independent ChunkedTask.
    class ChunksRunner extends Runnable {
      var shouldStop = false
      override def run(): Unit = {
        while(!shouldStop) {
          val task = taskQueueLock.synchronized {
            if(tasks.isEmpty) {
              None
            } else {
              Some(tasks.dequeue())
            }
          }
          task match {
            case None => Thread.sleep(50)
            case Some(task) => {
              logger.debug("Executing some ChunkedTask from the task queue.")
              try {
                task.doRun(5l)
                if(task.status != ChunkedTask.Completed)
                  taskQueueLock.synchronized { tasks.enqueue(task) }
              } catch {
                case (e: Throwable) => {
                  logger.error(s"Unexpected error while executing task ${task.name}: ${e.getMessage}")
                  val sw = new StringWriter()
                  val pw = new PrintWriter(sw, true)
                  e.printStackTrace(pw)
                  logger.error(sw.toString)
                }
              }
            }
          }
        }
      }
    }
  }
  override val Scheduler = new ThreadPoolScheduler

} 
Example 130
Source File: UIITest.scala    From cuttle   with Apache License 2.0 5 votes vote down vote up
package com.criteo.cuttle.cron

import java.io.PrintWriter
import java.time.Instant

import scala.concurrent.Future

import org.scalatest.{FlatSpec, Matchers}

import com.criteo.cuttle.Auth.User
import com.criteo.cuttle.{Job, _}
import com.criteo.cuttle.ThreadPools.Implicits.sideEffectThreadPool
import com.criteo.cuttle.cron.Utils.logger
import com.criteo.cuttle.cron.CronPipeline._


// TODO: Implement a snapshot testing of the UI ala https://jestjs.io/docs/en/snapshot-testing
class UIITest extends FlatSpec with Matchers {
  val tickerJob = Job(
    id = "ticker_job",
    scheduling = CronScheduling(1),
    name = "Ticker Job",
    description = "Get ticker for Bitcoin price from CoinMarketCap"
  ) { implicit e =>
    Future.successful(Completed)
  }
  val tickerJobDag = tickerJob.toCronDag("0-59/10 * * ? * *", "ticker_job_dag")
  val avgJob = Job(id = "avg_job",
                   name = "Average Job",
                   scheduling = CronScheduling(10),
                   description = "Average Bitcoin price for last 3 value") { implicit e =>
    Future.successful(Completed)
  }
  val avgJobDag = avgJob.toCronDag("0-59/10 * * ? * *", "avg_job_dag")
  val workload = CronWorkload(Set(tickerJobDag, avgJobDag))
  implicit val scheduler = CronScheduler(logger)
  val project = CronProject(
    name = "Hello Cron Scheduling Example",
    version = "0.0.1",
    description = "My first Cron with Cuttle project"
  )(workload)
  implicit val xa = com.criteo.cuttle.Database.newHikariTransactor(DatabaseConfig.fromEnv).allocated.unsafeRunSync()._1
  val executor = new Executor[CronScheduling](Seq.empty, xa, logger, project.name, project.version)

  val ui = UI(project, executor)

  def saveToFile(s: String, fileName: String) = {
    // running project inside of a Cron module default directory is set as module root
    val writer = new PrintWriter(fileName)
    writer.print(s)
    writer.close()
  }

  val avgExecution = Execution[CronScheduling](
    "786d1b69-a603-4eb8-9178-fed2a195a1ed",
    avgJob,
    CronContext(Instant.now(), 0, "avg_job_dag"),
    new ExecutionStreams {
      override private[cuttle] def writeln(str: CharSequence): Unit = ???
    },
    Seq.empty,
    "",
    "",
    previousFailures = List.empty
  )

  "home page" should "render active jobs" in {
    val activeJobs = Map(
      tickerJobDag -> Left(Instant.now()),
      avgJobDag -> Right(Set(avgExecution))
    )
    val pausedJobs = Map(
      tickerJobDag -> PausedJob(tickerJobDag.id, User("Bobby"), Instant.now())
    )
    val activeAndPausedJobs = (activeJobs, pausedJobs)
    saveToFile(ui.home(activeAndPausedJobs).content, "index.html")
  }

  "executions page" should "render execution list" in {
    val executionLogs = Map(
      Instant.now() -> Seq(avgExecution.toExecutionLog(ExecutionStatus.ExecutionSuccessful))
    )
    saveToFile(ui.runs(avgJobDag, executionLogs).content, "executions.html")
  }
} 
Example 131
Source File: JsonFileStorage.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.utils

import java.io.{File, PrintWriter}

import javax.crypto.Cipher
import javax.crypto.spec.SecretKeySpec
import play.api.libs.json.{Json, Reads, Writes}
import java.util.Base64

import scala.io.Source
import scala.util.control.NonFatal

object JsonFileStorage {
  private[this] val KeySalt           = "0495c728-1614-41f6-8ac3-966c22b4a62d"
  private[this] val AES               = "AES"
  private[this] val Algorithm         = AES + "/ECB/PKCS5Padding"
  private[this] val HashingAlgorithm  = "PBKDF2WithHmacSHA512"
  private[this] val HashingIterations = 999999
  private[this] val KeySizeBits       = 128

  def prepareKey(key: String): SecretKeySpec = {
    import java.security.NoSuchAlgorithmException
    import java.security.spec.InvalidKeySpecException

    import javax.crypto.SecretKeyFactory
    import javax.crypto.spec.PBEKeySpec

    def hashPassword(password: Array[Char], salt: Array[Byte], iterations: Int, keyLength: Int): Array[Byte] =
      try {
        val keyFactory = SecretKeyFactory.getInstance(HashingAlgorithm)
        val keySpec    = new PBEKeySpec(password, salt, iterations, keyLength)
        val key        = keyFactory.generateSecret(keySpec)
        key.getEncoded
      } catch {
        case e @ (_: NoSuchAlgorithmException | _: InvalidKeySpecException) =>
          throw new RuntimeException("Password hashing error", e)
      }

    new SecretKeySpec(hashPassword(key.toCharArray, KeySalt.utf8Bytes, HashingIterations, KeySizeBits), AES)
  }

  def save[T](value: T, path: String, key: Option[SecretKeySpec])(implicit w: Writes[T]): Unit = {
    val folder = new File(path).getParentFile
    if (!folder.exists()) folder.mkdirs()

    val file = new PrintWriter(path)
    try {
      val json = Json.toJson(value).toString()
      val data = key.fold(json)(k => encrypt(k, json))
      file.write(data)
    } finally file.close()
  }

  def save[T](value: T, path: String)(implicit w: Writes[T]): Unit =
    save(value, path, None)

  def load[T](path: String, key: Option[SecretKeySpec] = None)(implicit r: Reads[T]): T = {
    val file = Source.fromFile(path)
    try {
      val dataStr = file.mkString
      Json.parse(key.fold(dataStr)(k => decrypt(k, dataStr))).as[T]
    } finally file.close()
  }

  def load[T](path: String)(implicit r: Reads[T]): T =
    load(path, Option.empty[SecretKeySpec])(r)

  private[this] def encrypt(key: SecretKeySpec, value: String): String = {
    try {
      val cipher: Cipher = Cipher.getInstance(Algorithm)
      cipher.init(Cipher.ENCRYPT_MODE, key)
      new String(Base64.getEncoder.encode(cipher.doFinal(value.utf8Bytes)))
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException("File storage encrypt error", e)
    }
  }

  private[this] def decrypt(key: SecretKeySpec, encryptedValue: String): String = {
    try {
      val cipher: Cipher = Cipher.getInstance(Algorithm)
      cipher.init(Cipher.DECRYPT_MODE, key)
      new String(cipher.doFinal(Base64.getDecoder.decode(encryptedValue)))
    } catch {
      case NonFatal(e) =>
        throw new RuntimeException("File storage decrypt error", e)
    }
  }
} 
Example 132
Source File: SshHiveAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster._
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process._


@Public
class SshHiveAction(node: Node, ignoreError: Boolean = false) extends HiveAction {

  private final val localFilepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}-hivequery"
  private final val remoteFilepath = s"/tmp/${GeneralUtilities.getTempPrefix}-hivequery"  //concurrent hive actions on same node not supported for now

  private val commands = new ListBuffer[String]
  private val logger = LoggerFactory.getLogger(classOf[SshHiveAction])

  def add(action: String): Unit = {
    commands.+=(action)
  }

  def run(): String = {
    val localQueryFile = new File(localFilepath)
    val writer = new PrintWriter(localQueryFile)
    commands.foreach(s => {
      writer.write(s"$s;\n")
      logger.info(s)
    })
    writer.close

    localQueryFile.setExecutable(true)
    localQueryFile.setReadable(true)
    localQueryFile.deleteOnExit()

    ScpAction(None, localFilepath, Some(node), remoteFilepath)
    val ignoreErrorFlag = if (ignoreError) "-hiveconf hive.cli.errors.ignore=true" else ""

    val res = SshAction(node, s"hive $ignoreErrorFlag -f $remoteFilepath", returnResult = true, ignoreError)
    SshAction(node, s"rm $remoteFilepath")
    localQueryFile.delete()
    res
  }

  override def toString = {
    commands.mkString("\n")
  }
}

object SshHiveAction {
  def apply(node: Node, statements: List[String], ignoreError: Boolean = false) = {
    val action = new SshHiveAction(node, ignoreError)
    statements.foreach(action.add)
    action.run
  }
} 
Example 133
Source File: SshMultiAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster._
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.ProcessLogger


@Public
case class SshMultiAction(node: Node) extends MultiAction {

  private val logger = LoggerFactory.getLogger(this.getClass)
  private val commands = new ListBuffer[String]

  //to allow concurrency
  val localFilepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}.sh"
  val remoteFilePath = s"/tmp/${GeneralUtilities.getTempPrefix}.sh"

  def add(command : String): Unit = {
    commands.+=(command)
  }

  def run(returnResult: Boolean = false, ignoreError: Boolean = false) : String = {
    val localTmpShellFile = new File(localFilepath)
    SshAction(node, " rm " + remoteFilePath, returnResult = false, true)

    //Write a temp shell script
    val writer = new PrintWriter(localTmpShellFile)
    commands.foreach(s => writer.write(s"$s\n"))
    writer.close

    localTmpShellFile.setExecutable(true)
    localTmpShellFile.setReadable(true)
    localTmpShellFile.deleteOnExit()

    commands.foreach(s => logger.info(s))

    ScpAction(None, localFilepath, Some(node), remoteFilePath)
    val res = SshAction(node, s"source $remoteFilePath", returnResult, ignoreError)
    SshAction(node, s"rm $remoteFilePath", returnResult = false, true)
    localTmpShellFile.delete()
    res
  }
}

object SshMultiAction {
  def apply(node: Node,
            commands: List[String],
            returnResult: Boolean = false,
            ignoreError: Boolean = false) : String = {
    val action = new SshMultiAction(node)
    commands.foreach(action.add)
    action.run(returnResult = returnResult, ignoreError = ignoreError)
  }
} 
Example 134
Source File: ShellHiveAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.{GeneralUtilities, Public}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer


@Public
class ShellHiveAction(ignoreError: Boolean = false) extends HiveAction {
  private val commands = new ListBuffer[String]
  private val logger = LoggerFactory.getLogger(this.getClass)

  private final val filepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}-hivequery"

  def add(action: String): Unit = {
    commands += action
  }

  def run(): String = {
    val localQueryFile = new File(filepath)
    val writer = new PrintWriter(localQueryFile)
    commands.foreach(s => {
      writer.write(s"$s;\n")
      logger.info(s)
    })
    writer.close

    localQueryFile.setExecutable(true)
    localQueryFile.setReadable(true)
    localQueryFile.deleteOnExit()

    val ignoreErrorFlag = if (ignoreError) "-hiveconf hive.cli.errors.ignore=true" else ""
    val res = ShellAction(s"hive $ignoreErrorFlag -f $filepath", returnResult = true, ignoreError)
    localQueryFile.delete()
    res
  }
} 
Example 135
Source File: ShellMultiAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.command

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.{GeneralUtilities, Public}
import org.slf4j.LoggerFactory

import scala.collection.mutable.ListBuffer
import scala.sys.process.ProcessLogger


@Public
case class ShellMultiAction() extends MultiAction {
  private val logger = LoggerFactory.getLogger(this.getClass)
  private val commands = new ListBuffer[String]

  //to allow concurrency
  val filepath = s"${GeneralUtilities.getHomeDir}/${GeneralUtilities.getTempPrefix}.sh"

  def add(command: String): Unit = {
    commands.+=(command)
  }

  def run(returnResult: Boolean = false, ignoreError: Boolean = false): String = {
    val localTmpShellFile = new File(filepath)
    ShellAction(s"rm $filepath", returnResult = false, true)

    //Write a temp shell script
    val writer = new PrintWriter(localTmpShellFile)
    commands.foreach(s => writer.write(s"$s\n"))
    writer.close

    localTmpShellFile.setExecutable(true)
    localTmpShellFile.setReadable(true)
    localTmpShellFile.deleteOnExit()

    commands.foreach(s => logger.info(s))
    val res = ShellAction(filepath, returnResult, ignoreError)
    localTmpShellFile.delete()
    res
  }
}

object ShellMultiAction {
  def apply(
             commands: List[String],
             returnResult: Boolean = false,
             ignoreError: Boolean = false
           ): String = {
    val action = new ShellMultiAction()
    commands.foreach(action.add)
    action.run(returnResult, ignoreError)
  }
} 
Example 136
Source File: GetSourceSummaryCliAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.source

import java.io.{File, PrintWriter}

import com.criteo.dev.cluster.config.GlobalConfig
import com.criteo.dev.cluster.{CliAction, GeneralUtilities, NodeFactory}
import org.slf4j.LoggerFactory

object GetSourceSummaryCliAction extends CliAction[List[Either[InvalidTable, FullTableInfo]]] {
  private val logger = LoggerFactory.getLogger(this.getClass)

  override def command: String = "get-source-summary"

  override def usageArgs: List[Any] = List.empty

  override def help: String = "Get summary of source tables"

  override def applyInternal(args: List[String], config: GlobalConfig): List[Either[InvalidTable, FullTableInfo]] = {
    logger.info(s"Getting the summary of source tables, parallelism ${config.source.parallelism}")
    val source = NodeFactory.getSourceFromConf(config.source)
    val getSourceSummary = GetSourceMetadataAction(config, source)
    val summary = getSourceSummary(config.source.tables)
    printSummary(summary)
    exportToCSV(summary, s"${GeneralUtilities.getHomeDir}/source_summary_${GeneralUtilities.getSimpleDate}.csv")
    summary
  }

  def printSummary(summary: List[Either[InvalidTable, FullTableInfo]]): Unit = {
    println("Source tables summary")
    val (invalid, valid) = summary.partition(_.isLeft)
    invalid.map(_.left.get).foreach { case InvalidTable(name, input, message) =>
      println(s"$name is invalid, input: $input, reason: $message")
    }
    valid.map(_.right.get) foreach { case FullTableInfo(_, TableHDFSInfo(db, table, size, files, partitions)) =>
      println(s"$db.$table is available, size: $size Bytes, files: ${files.size}, partitions: $partitions")
    }
    val totalSize = valid.map(_.right.get.hdfsInfo.size).sum
    println(s"Invalid tables: ${invalid.size}, valid tables: ${valid.size}")
    println(s"Total size: $totalSize Bytes")
  }

  def exportToCSV(summary: List[Either[InvalidTable, FullTableInfo]], filepath: String): Unit = {
    logger.info(s"writing source summary to $filepath")
    val file = new File(filepath)
    val printWriter = new PrintWriter(file)
    val headers = List("name", "bytes", "files", "partitions", "error")
    printWriter.println(headers.mkString(","))
    summary foreach {
      case Right(FullTableInfo(table, TableHDFSInfo(_, _, size, files, partitions))) =>
        printWriter.println(List(table.fullName, size, files.size, partitions, "").mkString(","))
      case Left(InvalidTable(name, _, message)) =>
        printWriter.println(List(name, "", "", "", message).mkString(","))
    }
    printWriter.flush()
    printWriter.close()
    logger.info(s"source summary has been written to $filepath")
  }
} 
Example 137
Source File: S3ConfigManager.scala    From teamcity-s3-plugin   with Apache License 2.0 5 votes vote down vote up
package com.gu.teamcity

import java.io.{File, PrintWriter}

import com.amazonaws.auth.{BasicAWSCredentials, AWSCredentialsProvider, AWSCredentials}
import jetbrains.buildServer.serverSide.ServerPaths
import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization
import org.json4s.native.Serialization._

case class S3Config(
  artifactBucket: Option[String], buildManifestBucket: Option[String], tagManifestBucket: Option[String],
  awsAccessKey: Option[String], awsSecretKey: Option[String]
)

class S3ConfigManager(paths: ServerPaths) extends AWSCredentialsProvider {
  implicit val formats = Serialization.formats(NoTypeHints)

  val configFile = new File(s"${paths.getConfigDir}/s3.json")

  private[teamcity] var config: Option[S3Config] = {
    if (configFile.exists()) {
      parse(configFile).extractOpt[S3Config]
    } else None
  }

  def artifactBucket: Option[String] = config.flatMap(_.artifactBucket)
  def buildManifestBucket: Option[String] = config.flatMap(_.buildManifestBucket)
  def tagManifestBucket: Option[String] = config.flatMap(_.tagManifestBucket)

  private[teamcity] def update(config: S3Config): Unit = {
    this.config = Some(if (config.awsSecretKey.isEmpty && config.awsAccessKey == this.config.flatMap(_.awsAccessKey)) {
      config.copy(awsSecretKey = this.config.flatMap(_.awsSecretKey))
    } else config)
  }

  def updateAndPersist(newConfig: S3Config): Unit = {
    synchronized {
      update(newConfig)
      val out = new PrintWriter(configFile, "UTF-8")
      try { writePretty(config, out) }
      finally { out.close }
    }
  }

  def details: Map[String, Option[String]] = Map(
    "artifactBucket" -> artifactBucket,
    "buildManifestBucket" -> buildManifestBucket,
    "tagManifestBucket" -> tagManifestBucket,
    "accessKey" -> config.flatMap(_.awsAccessKey)
  )

  override def getCredentials: AWSCredentials = (for {
    c <- config
    accessKey <- c.awsAccessKey
    secretKey <- c.awsSecretKey
  } yield new BasicAWSCredentials(accessKey, secretKey)).getOrElse(null) // Yes, this is sad

  override def refresh(): Unit = ()
}

object S3ConfigManager {
  val bucketElement = "bucket"
  val s3Element = "S3"
} 
Example 138
Source File: StatsController.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.document.stats

import com.mohiva.play.silhouette.api.Silhouette
import controllers.{BaseOptAuthController, Security, HasVisitLogging, HasPrettyPrintJSON}
import java.io.{ByteArrayOutputStream, PrintWriter}
import javax.inject.{Inject, Singleton}
import kantan.csv._
import kantan.csv.ops._
import kantan.csv.CsvConfiguration.{Header, QuotePolicy}
import kantan.csv.engine.commons._
import services.annotation.AnnotationService
import services.document.DocumentService
import services.user.UserService
import services.user.Roles._
import services.visit.VisitService
import org.webjars.play.WebJarsUtil
import play.api.Configuration
import play.api.mvc.{AnyContent, Request, Result, ControllerComponents}
import play.api.libs.json._
import play.api.libs.functional.syntax._
import play.api.i18n.I18nSupport
import plugins.PluginRegistry
import scala.concurrent.{ExecutionContext, Future}

@Singleton
class StatsController @Inject() (
  val components: ControllerComponents,
  val config: Configuration,
  val documents: DocumentService,
  val annotations: AnnotationService,
  val users: UserService,
  val silhouette: Silhouette[Security.Env],
  implicit val visitService: VisitService,
  implicit val webjars: WebJarsUtil,
  implicit val ctx: ExecutionContext
) extends BaseOptAuthController(components, config, documents, users) 
    with HasVisitLogging 
    with HasPrettyPrintJSON 
    with I18nSupport {
  
  private val CSV_CONFIG = CsvConfiguration(',', '"', QuotePolicy.WhenNeeded, Header.None)
    
  implicit val tuple2Writes: Writes[Tuple2[String, Long]] = (
    (JsPath \ "value").write[String] and
    (JsPath \ "count").write[Long]
  )(t => (t._1, t._2))
  
  private def toCSV(stats: Seq[(String, Long)]): String = {
    val out = new ByteArrayOutputStream()
    val writer = out.asCsvWriter[(String, Long)](CSV_CONFIG)
    stats.foreach(writer.write(_))
    writer.close()
    new String(out.toByteArray, "UTF-8")
  }
  
  def showDocumentStats(documentId: String, tab: Option[String]) = silhouette.UserAwareAction.async { implicit request =>
    documentReadResponse(documentId, request.identity,  { case (doc, accesslevel) =>
      logDocumentView(doc.document, None, accesslevel)      
      tab.map(_.toLowerCase) match {
        case Some(t) if t == "activity" =>  
          val plugins = PluginRegistry.listConfigs("document.stats.activity")
          Future.successful(Ok(views.html.document.stats.activity(doc, request.identity, accesslevel, plugins)))
          
        case Some(t) if t == "entities" =>
          val plugins = PluginRegistry.listConfigs("document.stats.entities")
          Future.successful(Ok(views.html.document.stats.entities(doc, request.identity, accesslevel, plugins)))
          
        case Some(t) if t == "tags" =>
          val plugins = PluginRegistry.listConfigs("document.stats.tags")
          Future.successful(Ok(views.html.document.stats.tags(doc, request.identity, accesslevel, plugins)))
          
        case _ =>
          val plugins = PluginRegistry.listConfigs("document.stats.activity")
          Future.successful(Ok(views.html.document.stats.activity(doc, request.identity, accesslevel, plugins)))
      }
    })
  }
  
  private def getTags(documentId: String)(action: (Seq[(String, Long)], Request[AnyContent]) => Result) =
    silhouette.UserAwareAction.async { implicit request =>
      documentReadResponse(documentId, request.identity,  { case (doc, accesslevel) =>
          annotations.getTagStats(documentId).map { buckets =>
            action(buckets, request.request)
          }
        }
      )
    }
  
  def getTagsAsJSON(documentId: String) = getTags(documentId) { case (buckets, request) =>
    jsonOk(Json.toJson(buckets))(request)
  }
  
  def getTagsAsCSV(documentId: String) = getTags(documentId) { case(buckets, request) =>
    Ok(toCSV(buckets)).withHeaders(CONTENT_DISPOSITION -> { s"attachment; filename=${documentId}_tags.csv" })
  }

} 
Example 139
Source File: BackupWriter.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.document

import controllers.HasConfig
import java.io.{File, FileInputStream, FileOutputStream, BufferedInputStream, ByteArrayInputStream, InputStream, PrintWriter}
import java.nio.file.Paths
import java.math.BigInteger
import java.security.{MessageDigest, DigestInputStream}
import java.util.UUID
import java.util.zip.{ZipEntry, ZipOutputStream}
import services.HasDate
import services.annotation.{Annotation, AnnotationService}
import services.document.{ExtendedDocumentMetadata, DocumentToJSON}
import services.generated.tables.records.{DocumentRecord, DocumentFilepartRecord}
import play.api.libs.json.Json
import play.api.libs.Files.TemporaryFileCreator
import scala.concurrent.{ExecutionContext, Future}
import storage.TempDir
import storage.uploads.Uploads

trait BackupWriter extends HasBackupValidation { self: HasConfig =>
  
  // Frontend annotation format
  import services.annotation.FrontendAnnotation._
  
  private val BUFFER_SIZE = 2048
  
  private def writeToZip(inputStream: InputStream, filename: String, zip: ZipOutputStream) = {
    zip.putNextEntry(new ZipEntry(filename))
     
    val md = MessageDigest.getInstance(ALGORITHM)    
    val in = new DigestInputStream(new BufferedInputStream(inputStream), md)

    var data= new Array[Byte](BUFFER_SIZE)
    var count: Int = 0

    while ({ count = in.read(data, 0, BUFFER_SIZE); count } > -1) {
      zip.write(data, 0, count)
    }

    in.close()
    zip.closeEntry()
    
    new BigInteger(1, md.digest()).toString(16)
  }
  
  def createBackup(doc: ExtendedDocumentMetadata)(implicit ctx: ExecutionContext, uploads: Uploads, 
      annotations: AnnotationService, tmpFile: TemporaryFileCreator): Future[File] = {
    
    def getFileAsStream(owner: String, documentId: String, filename: String) = {
      val dir = uploads.getDocumentDir(owner, documentId).get // Fail hard if the dir doesn't exist
      new FileInputStream(new File(dir, filename))
    }
    
    def getManifestAsStream() = {
      val manifest = "Recogito-Version: 2.0.1-alpha"
      new ByteArrayInputStream(manifest.getBytes)
    }
    
    def getMetadataAsStream(doc: ExtendedDocumentMetadata) = {
      
      // DocumentRecord JSON serialization
      import services.document.DocumentToJSON._
      
      val json = Json.prettyPrint(Json.toJson((doc.document, doc.fileparts)))
      new ByteArrayInputStream(json.getBytes)
    }
    
    def getAnnotationsAsStream(docId: String, annotations: Seq[Annotation], parts: Seq[DocumentFilepartRecord]): InputStream = {
      val path = Paths.get(TempDir.get()(self.config), s"${docId}_annotations.json")
      val tmp = tmpFile.create(path)
      val writer = new PrintWriter(path.toFile)
      annotations.foreach(a => writer.println(Json.stringify(Json.toJson(a))))
      writer.close()
      new FileInputStream(path.toFile)
    }
    
    Future {
      tmpFile.create(Paths.get(TempDir.get()(self.config), s"${doc.id}.zip"))
    } flatMap { zipFile =>
      val zipStream = new ZipOutputStream(new FileOutputStream(zipFile.path.toFile))

      writeToZip(getManifestAsStream(), "manifest", zipStream)
      val metadataHash = writeToZip(getMetadataAsStream(doc), "metadata.json", zipStream)

      val fileHashes = doc.fileparts.map { part =>
        writeToZip(getFileAsStream(doc.ownerName, doc.id, part.getFile), "parts" + File.separator + part.getFile, zipStream)
      }

      annotations.findByDocId(doc.id).map { annotations =>
        val annotationsHash = writeToZip(getAnnotationsAsStream(doc.id, annotations.map(_._1), doc.fileparts), "annotations.jsonl", zipStream)
        
        val signature = computeSignature(metadataHash, fileHashes, annotationsHash)
        writeToZip(new ByteArrayInputStream(signature.getBytes), "signature", zipStream)
        
        zipStream.close()
        zipFile.path.toFile
      }
    }
  }
  
} 
Example 140
Source File: TEIParserService.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package transform.tei

import akka.actor.ActorSystem
import akka.routing.RoundRobinPool
import java.io.{File, PrintWriter}
import javax.inject.{Inject, Singleton}
import org.joox.JOOX._
import org.w3c.dom.ranges.DocumentRange
import scala.collection.JavaConversions._
import services.annotation.{Annotation, AnnotationService}
import services.generated.tables.records.{DocumentRecord, DocumentFilepartRecord}
import services.task.{TaskService, TaskType}
import storage.uploads.Uploads
import transform.{WorkerActor, WorkerService}

@Singleton
class TEIParserService @Inject() (
  uploads: Uploads,
  annotationService: AnnotationService,
  taskService: TaskService, 
  system: ActorSystem
) extends WorkerService(
  system, uploads,
  TEIParserActor.props(taskService, annotationService), 10
)

object TEIParserService {

  val TASK_TYPE = TaskType("TEI_PARSING")
  
  private[tei] def extractEntities(
    part: DocumentFilepartRecord,
    file: File,
    replaceOriginalFile: Boolean = true
  ): Seq[Annotation] = {    
    val teiXML = $(file).document()
    val ranges = teiXML.asInstanceOf[DocumentRange]

    val places = $(teiXML).find("placeName").get
    val people = $(teiXML).find("persName").get
    val spans  = $(teiXML).find("span").get

    val annotations = (places ++ people ++ spans).map(TEITag.convert(part, _, ranges))

    if (replaceOriginalFile)
      new PrintWriter(file.getAbsolutePath)  {
        write($(teiXML).toString)
        close
      }

    annotations
  }
} 
Example 141
Source File: LossFuncGrapher.scala    From neuroflow   with Apache License 2.0 5 votes vote down vote up
package neuroflow.core

import java.io.{File, FileOutputStream, PrintWriter}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.Try


  def maybeGraph(loss: Double): Unit =
    self.settings.lossFuncOutput.foreach {
      lfo =>
        Future {
          Try {
            val handleOpt = lfo.file
              .map(f => new PrintWriter(new FileOutputStream(new File(f), true)))
            handleOpt.foreach(_.println(loss))
            handleOpt.foreach(_.close())
            lfo.action.foreach(_ (loss))
          }
        }
    }

}

case class LossFuncOutput(file: Option[String] = None, action: Option[Double => Unit] = None) 
Example 142
Source File: JoinOrderTestSuite.scala    From bdg-sequila   with Apache License 2.0 5 votes vote down vote up
package org.biodatageeks.sequila.tests.rangejoins

import java.io.{OutputStreamWriter, PrintWriter}

import com.holdenkarau.spark.testing.{DataFrameSuiteBase, SharedSparkContext}
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{
  IntegerType,
  StringType,
  StructField,
  StructType
}
import org.bdgenomics.utils.instrumentation.{
  Metrics,
  MetricsListener,
  RecordedMetrics
}
import org.biodatageeks.sequila.rangejoins.IntervalTree.IntervalTreeJoinStrategyOptim
import org.scalatest.{BeforeAndAfter, FunSuite}

class JoinOrderTestSuite
    extends FunSuite
    with DataFrameSuiteBase
    with BeforeAndAfter
    with SharedSparkContext {

  val schema = StructType(
    Seq(StructField("chr", StringType),
        StructField("start", IntegerType),
        StructField("end", IntegerType)))
  val metricsListener = new MetricsListener(new RecordedMetrics())
  val writer = new PrintWriter(new OutputStreamWriter(System.out))
  before {
    System.setSecurityManager(null)
    spark.experimental.extraStrategies = new IntervalTreeJoinStrategyOptim(
      spark) :: Nil
    Metrics.initialize(sc)
    val rdd1 = sc
      .textFile(getClass.getResource("/refFlat.txt.bz2").getPath)
      .map(r => r.split('\t'))
      .map(
        r =>
          Row(
            r(2).toString,
            r(4).toInt,
            r(5).toInt
        ))
    val ref = spark.createDataFrame(rdd1, schema)
    ref.createOrReplaceTempView("ref")

    val rdd2 = sc
      .textFile(getClass.getResource("/snp150Flagged.txt.bz2").getPath)
      .map(r => r.split('\t'))
      .map(
        r =>
          Row(
            r(1).toString,
            r(2).toInt,
            r(3).toInt
        ))
    val snp = spark
      .createDataFrame(rdd2, schema)
    snp.createOrReplaceTempView("snp")
  }

  test("Join order - broadcasting snp table") {
    spark.sqlContext.setConf("spark.biodatageeks.rangejoin.useJoinOrder",
                             "true")
    val query =
      s"""
         |SELECT snp.*,ref.* FROM ref JOIN snp
         |ON (ref.chr=snp.chr AND snp.end>=ref.start AND snp.start<=ref.end)
       """.stripMargin

    assert(spark.sql(query).count === 616404L)

  }

  test("Join order - broadcasting ref table") {
    spark.sqlContext.setConf("spark.biodatageeks.rangejoin.useJoinOrder",
                             "true")
    val query =
      s"""
         |SELECT snp.*,ref.* FROM snp JOIN ref
         |ON (ref.chr=snp.chr AND snp.end>=ref.start AND snp.start<=ref.end)
       """.stripMargin
    assert(spark.sql(query).count === 616404L)

  }
  after {
    Metrics.print(writer, Some(metricsListener.metrics.sparkMetrics.stageTimes))
    writer.flush()
    Metrics.stopRecording()
  }
} 
Example 143
Source File: PileupApp.scala    From bdg-sequila   with Apache License 2.0 5 votes vote down vote up
package org.biodatageeks.sequila.apps

import java.io.{OutputStreamWriter, PrintWriter}

import org.apache.spark.sql.{SequilaSession, SparkSession}
import org.bdgenomics.utils.instrumentation.{Metrics, MetricsListener, RecordedMetrics}
import org.biodatageeks.sequila.utils.{InternalParams, SequilaRegister}

object PileupApp extends App{
  override def main(args: Array[String]): Unit = {

    System.setProperty("spark.kryo.registrator", "org.biodatageeks.sequila.pileup.serializers.CustomKryoRegistrator")
    val spark = SparkSession
      .builder()
      .master("local[1]")
      .config("spark.driver.memory","4g")
      .config( "spark.serializer", "org.apache.spark.serializer.KryoSerializer" )
      .enableHiveSupport()
      .getOrCreate()

    val ss = SequilaSession(spark)
    SequilaRegister.register(ss)
    spark.sparkContext.setLogLevel("INFO")

    val bamPath = "/Users/aga/NA12878.chr20.md.bam"
    val referencePath = "/Users/aga/Homo_sapiens_assembly18_chr20.fasta"

    //    val bamPath = "/Users/marek/data/NA12878.chrom20.ILLUMINA.bwa.CEU.low_coverage.20121211.md.bam"
    //    val referencePath = "/Users/marek/data/hs37d5.fa"

    val tableNameBAM = "reads"

    ss.sql(s"""DROP  TABLE IF  EXISTS $tableNameBAM""")
    ss.sql(s"""
              |CREATE TABLE $tableNameBAM
              |USING org.biodatageeks.sequila.datasources.BAM.BAMDataSource
              |OPTIONS(path "$bamPath")
              |
      """.stripMargin)

    val query =
      s"""
         |SELECT count(*)
         |FROM  pileup('$tableNameBAM', 'NA12878', '${referencePath}')
       """.stripMargin
    ss
      .sqlContext
      .setConf(InternalParams.EnableInstrumentation, "true")
    Metrics.initialize(ss.sparkContext)
    val metricsListener = new MetricsListener(new RecordedMetrics())
    ss
      .sparkContext
      .addSparkListener(metricsListener)
    val results = ss.sql(query)
    ss.time{
      results.show()
    }
    val writer = new PrintWriter(new OutputStreamWriter(System.out, "UTF-8"))
    Metrics.print(writer, Some(metricsListener.metrics.sparkMetrics.stageTimes))
    writer.close()
    ss.stop()
  }
} 
Example 144
Source File: PackagePlatformExtensions.scala    From qamr   with MIT License 5 votes vote down vote up
package qamr.util

import java.io.StringWriter
import java.io.PrintWriter

import scala.util.{Try, Success, Failure}

import com.typesafe.scalalogging.Logger

trait PackagePlatformExtensions {
  implicit class RichTry[A](val t: Try[A]) {
    def toOptionLogging(logger: Logger): Option[A] = t match {
      case Success(a) =>
        Some(a)
      case Failure(e) =>
        val sw = new StringWriter()
        val pw = new PrintWriter(sw, true)
        e.printStackTrace(pw)
        logger.error(e.getLocalizedMessage + "\n" + sw.getBuffer.toString)
        None
    }
  }

} 
Example 145
Source File: PythonBroadcastSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import scala.io.Source

import java.io.{PrintWriter, File}

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 146
Source File: TableGraphBaseExporter.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.graph.export

import java.io.{File, PrintWriter}

import com.flaminem.flamy.exec.utils.io.FlamyOutput
import com.flaminem.flamy.graph.TableGraph
import com.flaminem.flamy.model.core.Model
import com.flaminem.flamy.model.names.ItemName

import scala.io.Source
import scalax.collection.Graph
import scalax.collection.GraphEdge.DiEdge
import scalax.collection.io.dot.{graph2DotExport, _}


abstract class TableGraphBaseExporter[V <: ItemName](tableGraph: TableGraph) {

  val model: Model = tableGraph.model

  val root = new DotRootGraph(
    directed = true,
    id = None,
    strict = true
  )

  val graph: Graph[V, DiEdge]

  protected def edgeTransformer(innerEdge: Graph[V,DiEdge]#EdgeT): Option[(DotGraph,DotEdgeStmt)]
  protected def nodeTransformer(innerNode: Graph[V,DiEdge]#NodeT): Option[(DotGraph,DotNodeStmt)]

  final def toDot: String = {
    graph2DotExport(graph).toDot(
      root,
      edgeTransformer=edgeTransformer,
      cNodeTransformer=Some(nodeTransformer),
      iNodeTransformer=Some(nodeTransformer)
    )
  }

  final def exportToPng(path: String): Unit = {
    val file = new File(path + ".dot")
    val writer = new PrintWriter(file)
    writer.write(this.toDot)
    writer.close()

    scala.sys.process.stringSeqToProcess(Seq("dot", "-T", "png", "-o", path + ".png", path + ".dot")).!
  }

  final def exportToSvg(path: String): Unit = {
    val file = new File(path + ".dot")
    val writer = new PrintWriter(file)
    writer.write(this.toDot)
    writer.close()

    scala.sys.process.stringSeqToProcess(Seq("dot", "-T", "svg", "-o", path + ".svg", path + ".dot")).!

    Source.fromFile(path + ".svg").getLines().dropWhile(!_.startsWith("<svg")).foreach{line => FlamyOutput.out.println(line.toString)}
  }

} 
Example 147
Source File: Neo4jPersistence.scala    From csb   with GNU General Public License v3.0 5 votes vote down vote up
package edu.msstate.dasi.csb.persistence

import java.io.{File, PrintWriter}

import edu.msstate.dasi.csb.model.{EdgeData, VertexData}
import edu.msstate.dasi.csb.util.Util
import org.apache.hadoop.fs.FileUtil
import org.apache.spark.graphx.Graph

object Neo4jPersistence extends GraphPersistence {
  private val vertices_suffix = "_nodes"
  private val edges_suffix = "_relationships"

  
  def saveAsText(graph: Graph[VertexData, EdgeData], graphName: String, overwrite :Boolean = false): Unit = {
    val verticesPath = graphName + vertices_suffix
    val verticesTmpPath = "__" + verticesPath
    val edgesPath = graphName + edges_suffix
    val edgesTmpPath = "__" + edgesPath

    if (overwrite) {
      FileUtil.fullyDelete(new File(verticesPath + "-header"))
      FileUtil.fullyDelete(new File(verticesPath))
      FileUtil.fullyDelete(new File(edgesPath + "-header"))
      FileUtil.fullyDelete(new File(edgesPath))
    }

    val nodeHeader = s"name:ID($graphName),:LABEL\n"

    val nodeHeaderWriter = new PrintWriter(new File(verticesPath + "-header"))
    nodeHeaderWriter.write(nodeHeader)
    nodeHeaderWriter.close()

    graph.vertices.map {
      case (id, _) => s"$id,$graphName"
    }.saveAsTextFile(verticesTmpPath)

    Util.merge(verticesTmpPath, verticesPath)
    FileUtil.fullyDelete(new File(verticesTmpPath))

    val relationshipHeader = s":START_ID($graphName),:END_ID($graphName),:TYPE,${EdgeData.neo4jCsvHeader}\n"

    val relHeaderWriter = new PrintWriter(new File(edgesPath + "-header"))
    relHeaderWriter.write(relationshipHeader)
    relHeaderWriter.close()

    graph.edges.map(edge =>
      edge.attr match {
        case edgeData: EdgeData => s"${edge.srcId},${edge.dstId},EDGE,${edgeData.toCsv}"
        case _ => s"${edge.srcId},${edge.dstId},EDGE"
      }
    ).saveAsTextFile(edgesTmpPath)

    Util.merge(edgesTmpPath, edgesPath)
    FileUtil.fullyDelete(new File(edgesTmpPath))
  }
} 
Example 148
Source File: AbstractOperationExample.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.deeplang.doperations.examples

import java.io.{File, PrintWriter}

import io.deepsense.commons.utils.Logging
import io.deepsense.deeplang.doperables.dataframe.DataFrame
import io.deepsense.deeplang.doperations.ReadDataFrame
import io.deepsense.deeplang.doperations.inout.CsvParameters.ColumnSeparatorChoice.Comma
import io.deepsense.deeplang.doperations.readwritedataframe.FileScheme
import io.deepsense.deeplang.{DOperable, DOperation, DeeplangIntegTestSupport}

abstract class AbstractOperationExample[T <: DOperation]
    extends DeeplangIntegTestSupport
    with Logging {

  def dOperation: T

  final def className: String = dOperation.getClass.getSimpleName

  def fileNames: Seq[String] = Seq.empty

  def loadCsv(fileName: String): DataFrame = {
    ReadDataFrame(
      FileScheme.File.pathPrefix + this.getClass.getResource(s"/test_files/$fileName.csv").getPath,
      Comma(),
      csvNamesIncluded = true,
      csvConvertToBoolean = false
    ).executeUntyped(Vector.empty[DOperable])(executionContext)
      .head
      .asInstanceOf[DataFrame]
  }

  def inputDataFrames: Seq[DataFrame] = fileNames.map(loadCsv)

  className should {
    "successfully run execute() and generate example" in {
      val op = dOperation
      val outputDfs = op
        .executeUntyped(inputDataFrames.toVector)(executionContext)
        .collect { case df: DataFrame => df }
      val html =
        ExampleHtmlFormatter.exampleHtml(op, inputDataFrames, outputDfs)

      // TODO Make it not rely on relative path it's run from
      val examplePageFile = new File(
        "docs/operations/examples/" + className + ".md")

      examplePageFile.getParentFile.mkdirs()
      examplePageFile.createNewFile()

      val writer = new PrintWriter(examplePageFile)
      // scalastyle:off println
      writer.println(html)
      // scalastyle:on println
      writer.flush()
      writer.close()
      logger.info(
        "Created doc page for " + className)
    }
  }
} 
Example 149
Source File: DriverFiles.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.deeplang.doperations.readwritedataframe.filestorage

import java.io.{File, IOException, PrintWriter}

import scala.io.Source
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.datasources.csv.{DataframeToDriverCsvFileWriter, RawCsvRDDToDataframe}
import org.apache.spark.sql.{SaveMode, DataFrame => SparkDataFrame}
import io.deepsense.commons.resources.ManagedResource
import io.deepsense.deeplang.ExecutionContext
import io.deepsense.deeplang.doperables.dataframe.DataFrame
import io.deepsense.deeplang.doperations.inout.{InputFileFormatChoice, OutputFileFormatChoice}
import io.deepsense.deeplang.doperations.readwritedataframe.filestorage.csv.CsvOptions
import io.deepsense.deeplang.doperations.readwritedataframe.{FilePath, FileScheme}
import io.deepsense.sparkutils.SQL

object DriverFiles {

  def read(driverPath: String, fileFormat: InputFileFormatChoice)
          (implicit context: ExecutionContext): SparkDataFrame = fileFormat match {
    case csv: InputFileFormatChoice.Csv => readCsv(driverPath, csv)
    case json: InputFileFormatChoice.Json => readJson(driverPath)
    case parquet: InputFileFormatChoice.Parquet => throw ParquetNotSupported
  }

  def write(dataFrame: DataFrame, path: FilePath, fileFormat: OutputFileFormatChoice, saveMode: SaveMode)
           (implicit context: ExecutionContext): Unit = {
    path.verifyScheme(FileScheme.File)
    if (saveMode == SaveMode.ErrorIfExists && new File(path.pathWithoutScheme).exists()){
      throw new IOException(s"Output file ${path.fullPath} already exists")
    }
    fileFormat match {
      case csv: OutputFileFormatChoice.Csv => writeCsv(path, csv, dataFrame)
      case json: OutputFileFormatChoice.Json => writeJson(path, dataFrame)
      case parquet: OutputFileFormatChoice.Parquet => throw ParquetNotSupported
    }
  }

  private def readCsv
      (driverPath: String, csvChoice: InputFileFormatChoice.Csv)
      (implicit context: ExecutionContext): SparkDataFrame = {
    val params = CsvOptions.map(csvChoice.getNamesIncluded, csvChoice.getCsvColumnSeparator())
    val lines = Source.fromFile(driverPath).getLines().toStream
    val fileLinesRdd = context.sparkContext.parallelize(lines)

    RawCsvRDDToDataframe.parse(fileLinesRdd, context.sparkSQLSession, params)
  }

  private def readJson(driverPath: String)(implicit context: ExecutionContext) = {
    val lines = Source.fromFile(driverPath).getLines().toStream
    val fileLinesRdd = context.sparkContext.parallelize(lines)
    context.sparkSQLSession.read.json(fileLinesRdd)
  }

  private def writeCsv
      (path: FilePath, csvChoice: OutputFileFormatChoice.Csv, dataFrame: DataFrame)
      (implicit context: ExecutionContext): Unit = {
    val params = CsvOptions.map(csvChoice.getNamesIncluded, csvChoice.getCsvColumnSeparator())

    DataframeToDriverCsvFileWriter.write(
      dataFrame.sparkDataFrame,
      params,
      dataFrame.schema.get,
      path.pathWithoutScheme
    )
  }

  private def writeJson(path: FilePath, dataFrame: DataFrame)
                       (implicit context: ExecutionContext): Unit = {
    val rawJsonLines: RDD[String] = SQL.dataFrameToJsonRDD(dataFrame.sparkDataFrame)
    writeRddToDriverFile(path.pathWithoutScheme, rawJsonLines)
  }

  private def writeRddToDriverFile(driverPath: String, lines: RDD[String]): Unit = {
    val recordSeparator = System.getProperty("line.separator", "\n")
    ManagedResource(new PrintWriter(driverPath)) { writer =>
      lines.collect().foreach(line => writer.write(line + recordSeparator))
    }
  }

} 
Example 150
Source File: RedirectCreator.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.docgen

import java.io.{File, PrintWriter}

import io.deepsense.deeplang.DOperation

trait RedirectCreator {

  // scalastyle:off println

  
  def createRedirects(
      sparkOperations: Seq[OperationWithSparkClassName],
      forceUpdate: Boolean): Int = {

    sparkOperations.map { case OperationWithSparkClassName(operation, sparkClassName) =>
      val redirectFile = new File("docs/uuid/" + operation.id + ".md")
      if (!redirectFile.exists() || forceUpdate) {
        createRedirect(redirectFile, operation, sparkClassName)
        1
      } else {
        0
      }
    }.sum
  }

  private def createRedirect(redirectFile: File, operation: DOperation, sparkClassName: String) = {
    val writer = new PrintWriter(redirectFile)
    writer.println("---")
    writer.println("layout: redirect")
    writer.println("redirect: ../operations/" + DocUtils.underscorize(operation.name) + ".html")
    writer.println("---")
    writer.flush()
    writer.close()
    println("Created redirect for " + operation.name)
  }
  // scalastyle:on println
} 
Example 151
Source File: DataframeToDriverCsvFileWriter.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.csv

import java.io.PrintWriter

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types._

import io.deepsense.sparkutils.readwritedataframe.{DataframeToRawCsvRDD, ManagedResource}

object DataframeToDriverCsvFileWriter {

  def write(
      dataFrame: DataFrame,
      options: Map[String, String],
      dataSchema: StructType,
      pathWithoutScheme: String): Unit = {
    val rawCsvLines = DataframeToRawCsvRDD(dataFrame, options)(dataFrame.sqlContext.sparkContext)
    writeRddToDriverFile(pathWithoutScheme, rawCsvLines)
  }

  // TODO extract to commons from DriverFiles
  private def writeRddToDriverFile(driverPath: String, lines: RDD[String]): Unit = {
    val recordSeparator = System.getProperty("line.separator", "\n")
    ManagedResource(new PrintWriter(driverPath)) { writer =>
      lines.collect().foreach(line => writer.write(line + recordSeparator))
    }
  }

} 
Example 152
Source File: DataframeToDriverCsvFileWriter.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.csv

import java.io.PrintWriter

import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types._

import io.deepsense.sparkutils.readwritedataframe.ManagedResource

object DataframeToDriverCsvFileWriter {

  def write(
       dataFrame: DataFrame,
       options: Map[String, String],
       dataSchema: StructType,
       pathWithoutScheme: String): Unit = {
    val data = dataFrame.rdd.collect()
    val params = new CSVOptions(options)
    ManagedResource(
      new LocalCsvOutputWriter(dataSchema, params, pathWithoutScheme)
    ) { writer =>
      data.foreach(row => {
        writer.write(row.toSeq.map(_.asInstanceOf[String]))
      })
    }
  }

}


class LocalCsvOutputWriter(
      dataSchema: StructType,
      params: CSVOptions,
      driverPath: String) {

  private val driverFileWriter = new PrintWriter(driverPath)

  private val FLUSH_BATCH_SIZE = 1024L
  private var records: Long = 0L
  private val csvWriter = new LineCsvWriter(params, dataSchema.fieldNames.toSeq)

  def write(row: Seq[String]): Unit = {
    csvWriter.writeRow(row, records == 0L && params.headerFlag)
    records += 1
    if (records % FLUSH_BATCH_SIZE == 0) {
      flush()
    }
  }

  private def flush(): Unit = {
    val lines = csvWriter.flush()
    if (lines.nonEmpty) {
      driverFileWriter.write(lines)
    }
  }

  def close(): Unit = {
    flush()
    driverFileWriter.close()
  }
} 
Example 153
package spark.ml.cookbook.chapter13

import java.io.{BufferedOutputStream, PrintWriter}
import java.net.Socket
import java.net.ServerSocket

import scala.util.Random

class CountSreamThread(socket: Socket) extends Thread {

  val villians = Array("Bane", "Thanos", "Loki", "Apocalypse", "Red Skull", "The Governor", "Sinestro", "Galactus",
    "Doctor Doom", "Lex Luthor", "Joker", "Magneto", "Darth Vader")

  override def run(): Unit = {

        println("Connection accepted")
        val out = new PrintWriter(new BufferedOutputStream(socket.getOutputStream()))

        println("Producing Data")
        while (true) {
            out.println(villians(Random.nextInt(villians.size)))
            Thread.sleep(10)
        }

        println("Done Producing")
  }
}

object CountStreamProducer {

  def main(args: Array[String]): Unit = {

      val ss = new ServerSocket(9999)
      while (true) {
        println("Accepting Connection...")
        new CountSreamThread(ss.accept()).start()
      }
  }
} 
Example 154
Source File: ParadoxLogger.scala    From paradox   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.paradox

import java.io.{ PrintWriter, StringWriter }

import scala.collection.immutable.StringOps

trait ParadoxLogger {
  def debug(t: Throwable): Unit = {
    // we provide our own implementation because sbt doesn't offer any exception logging at debug
    val writer = new StringWriter()
    t.printStackTrace(new PrintWriter(writer))
    new StringOps(writer.toString).lines.foreach(debug(_))
  }
  def debug(msg: => String): Unit
  def info(msg: => String): Unit
  def warn(msg: => String): Unit
  def error(msg: => String): Unit
}

object NullLogger extends ParadoxLogger {
  override def debug(msg: => String): Unit = ()
  override def info(msg: => String): Unit = ()
  override def warn(msg: => String): Unit = ()
  override def error(msg: => String): Unit = ()
}

object PrintlnLogger extends ParadoxLogger {
  override def debug(msg: => String): Unit = println(s"[debug] $msg")
  override def info(msg: => String): Unit = println(s"[info] $msg")
  override def warn(msg: => String): Unit = println(s"[warn] $msg")
  override def error(msg: => String): Unit = println(s"[error] $msg")
} 
Example 155
Source File: PageViewGenerator.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream

import java.net.ServerSocket
import java.io.PrintWriter
import util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/"        -> .7,
                  "http://foo.com/news"    -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01) : _*)

  def pickFromDistribution[T](inputMap : Map[T, Double]) : T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent() : String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args : Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println("Listening on port: " + port)

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from: " + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
}
// scalastyle:on println 
Example 156
Source File: SchrodingerExceptionTest.scala    From aloha   with MIT License 5 votes vote down vote up
package com.eharmony.aloha.ex

import org.junit.{Before, Test}
import org.junit.Assert._
import java.io.{PrintWriter, OutputStreamWriter, ByteArrayOutputStream, PrintStream}

class SchrodingerExceptionTest {

    
    private[this] var ex: SchrodingerException = _

    @Before def before() {
        ex = new SchrodingerException
    }

    @Test def testFillInStackTrace() {
        assertTrue(new SchrodingerException().fillInStackTrace().isInstanceOf[SchrodingerException])
    }

    @Test(expected = classOf[SchrodingerException]) def testGetMessage() {
        ex.getMessage()
    }

    @Test(expected = classOf[SchrodingerException]) def testGetStackTrace() {
        ex.getStackTrace()
    }

    @Test(expected = classOf[SchrodingerException]) def testGetCause() {
        ex.getCause()
    }

    @Test(expected = classOf[SchrodingerException]) def testSetStackTrace() {
        ex.setStackTrace(Array.empty)
    }

    @Test(expected = classOf[SchrodingerException]) def testGetLocalizedMessage() {
        ex.getLocalizedMessage()
    }

    @Test(expected = classOf[SchrodingerException]) def testPrintStackTraceEmpty() {
        ex.printStackTrace()
    }

    @Test(expected = classOf[SchrodingerException]) def testPrintStackTraceStream() {
        val baos = new ByteArrayOutputStream()
        val ps = new PrintStream(baos)
        ex.printStackTrace(ps)
    }

    @Test(expected = classOf[SchrodingerException]) def testPrintStackTraceWriter() {
        val baos = new ByteArrayOutputStream()
        val osw = new OutputStreamWriter(baos)
        val ps = new PrintWriter(osw)
        ex.printStackTrace(ps)
    }

    @Test(expected = classOf[SchrodingerException]) def testInitCause() {
        ex.initCause(new Throwable)
    }

    @Test(expected = classOf[SchrodingerException]) def testToString() {
        ex.toString()
    }

    @Test def testNoThrowForSchrodingerExceptionWithSchrodingerExceptionCause() {
        new SchrodingerException(new SchrodingerException)
    }

    @Test def testNoThrowForSchrodingerExceptionWithExceptionCause() {
        new SchrodingerException(new Exception)
    }

    @Test(expected = classOf[SchrodingerException]) def testThrowForThrowableWithSchrodingerExceptionCause() {
        new Throwable(ex)
    }

    @Test(expected = classOf[SchrodingerException]) def testThrowForExceptionWithSchrodingerExceptionCause() {
        new Exception(ex)
    }

    @Test(expected = classOf[SchrodingerException]) def testThrowForRuntimeExceptionWithSchrodingerExceptionCause() {
        new RuntimeException(ex)
    }
} 
Example 157
Source File: SchrodingerException.scala    From aloha   with MIT License 5 votes vote down vote up
package com.eharmony.aloha.ex

import java.io.{PrintWriter, PrintStream}
import com.eharmony.aloha.AlohaException


    override def fillInStackTrace() = this
    override def getCause() = throw this
    override def getLocalizedMessage() = throw this
    override def getMessage() = throw this
    override def getStackTrace() = throw this
    override def initCause(cause: Throwable) = throw this
    override def printStackTrace() = throw this
    override def printStackTrace(s: PrintStream) = throw this
    override def printStackTrace(s: PrintWriter) = throw this
    override def setStackTrace(stackTrace: Array[StackTraceElement]) = throw this
    override def toString() = throw this

    def safeToString() = {
        val m = Option(message) getOrElse ""
        s"SchrodingerException($m)"
    }
}

object SchrodingerException {
    val Instance = new SchrodingerException
} 
Example 158
Source File: ScalafmtSbtReporter.scala    From sbt-scalafmt   with Apache License 2.0 5 votes vote down vote up
package org.scalafmt.sbt

import java.io.PrintWriter
import java.io.OutputStreamWriter
import java.nio.file.Path

import org.scalafmt.interfaces.ScalafmtReporter
import sbt.internal.util.MessageOnlyException
import sbt.util.Logger

import scala.util.control.NoStackTrace

class ScalafmtSbtReporter(log: Logger, out: OutputStreamWriter)
    extends ScalafmtReporter {
  override def error(file: Path, message: String): Unit = {
    throw new MessageOnlyException(s"$message: $file")
  }

  override def error(file: Path, e: Throwable): Unit = {
    if (e.getMessage != null) {
      error(file, e.getMessage)
    } else {
      throw new FailedToFormat(file.toString, e)
    }
  }

  override def error(file: Path, message: String, e: Throwable): Unit = {
    if (e.getMessage != null) {
      error(file, s"$message: ${e.getMessage()}")
    } else {
      throw new FailedToFormat(file.toString, e)
    }
  }

  override def excluded(file: Path): Unit =
    log.debug(s"file excluded: $file")

  override def parsedConfig(config: Path, scalafmtVersion: String): Unit =
    log.debug(s"parsed config (v$scalafmtVersion): $config")

  override def downloadWriter(): PrintWriter = new PrintWriter(out)
  override def downloadOutputStreamWriter(): OutputStreamWriter = out

  private class FailedToFormat(filename: String, cause: Throwable)
      extends Exception(filename, cause)
      with NoStackTrace
} 
Example 159
Source File: CodeGeneratorTest.scala    From MoVE   with Mozilla Public License 2.0 5 votes vote down vote up
package de.thm.move.models

import java.io.PrintWriter
import java.net.URI
import java.nio.charset.Charset
import java.nio.file.{Paths, Files}
import java.util.Base64
import javafx.scene.Node
import javafx.scene.paint.{Paint, Color}
import javafx.scene.shape.{LineTo, MoveTo}
import javafx.scene.text.TextAlignment

import de.thm.move.MoveSpec

import de.thm.move.models.ModelicaCodeGenerator.FormatSrc
import de.thm.move.models.ModelicaCodeGenerator.FormatSrc.FormatSrc
import de.thm.move.types._
import de.thm.move.util.ResourceUtils
import de.thm.move.util.GeometryUtils
import de.thm.move.views.shapes._

class CodeGeneratorTest extends MoveSpec {
  val dummyURL = Paths.get(System.getProperty("user.home")).toUri

  private def eqTest(toTest:String,expected:String): Unit = {
    if(!toTest.contains(expected)) {
      println(toTest)
      println("Expected: "+expected)
    }

    assert(toTest.contains(expected), s"Expected [$toTest] containing [$expected]")
  }

  "ModelicaCodeGenerator" should "generate Rectangles" in {
    val generator = new ModelicaCodeGenerator(FormatSrc.Pretty, 1, 500,500)
    val rect = new ResizableRectangle((0,0), 100,100)
    rect.colorizeShape(Color.BLACK, Color.BLACK)
    rect.setRotate(90.0)
    val str = generator.generateShape(rect, "test",  dummyURL)(1)
    eqTest(str, "origin = {50,450}")
    eqTest(str, "extent = {{-50,50}, {50,-50}}")

    val generator2 = new ModelicaCodeGenerator(FormatSrc.Pretty, 4, 500,500)
    val str2 = generator2.generateShape(rect, "test",  dummyURL)(1)
    eqTest(str2, "origin = {12,112}")
    eqTest(str2, "extent = {{-12,12}, {12,-12}}")
  }

  it should "generate Circles" in {
    val generator = new ModelicaCodeGenerator(FormatSrc.Pretty, 1, 500,500)
    val circle = new ResizableCircle((100,100), 50,50)
    circle.colorizeShape(Color.BLACK, Color.BLACK)
    circle.setRotate(90.0)
    val str = generator.generateShape(circle, "test",  dummyURL)(1)
    eqTest(str, "origin = {100,400}")
    eqTest(str, "extent = {{-50,50}, {50,-50}}")
  }
} 
Example 160
Source File: UTF8TextInputFormatter.scala    From glow   with Apache License 2.0 5 votes vote down vote up
package io.projectglow.transformers.pipe

import java.io.{OutputStream, PrintWriter}

import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.SQLUtils.dataTypesEqualExceptNullability
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.types.StringType


class UTF8TextInputFormatter() extends InputFormatter {

  private var writer: PrintWriter = _

  override def init(stream: OutputStream): Unit = {
    writer = new PrintWriter(stream)
  }

  override def write(record: InternalRow): Unit = {
    if (!record.isNullAt(0)) {
      writer.println(record.getUTF8String(0)) // scalastyle:ignore
    }
  }

  override def close(): Unit = {
    writer.close()
  }
}

class UTF8TextInputFormatterFactory extends InputFormatterFactory {
  override def name: String = "text"

  override def makeInputFormatter(df: DataFrame, options: Map[String, String]): InputFormatter = {
    require(df.schema.length == 1, "Input dataframe must have one column,")
    require(
      dataTypesEqualExceptNullability(df.schema.head.dataType, StringType),
      "Input dataframe must have one string column.")
    new UTF8TextInputFormatter
  }
} 
Example 161
Source File: CSVInputFormatter.scala    From glow   with Apache License 2.0 5 votes vote down vote up
package io.projectglow.transformers.pipe

import java.io.{OutputStream, PrintWriter}

import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.csv.SGUnivocityGenerator
import org.apache.spark.sql.types.StructType

import io.projectglow.SparkShim.CSVOptions

class CSVInputFormatter(schema: StructType, parsedOptions: CSVOptions) extends InputFormatter {

  private var writer: PrintWriter = _
  private var univocityGenerator: SGUnivocityGenerator = _

  override def init(stream: OutputStream): Unit = {
    writer = new PrintWriter(stream)
    univocityGenerator = new SGUnivocityGenerator(schema, writer, parsedOptions)
    if (parsedOptions.headerFlag) {
      univocityGenerator.writeHeaders()
    }
  }

  override def write(record: InternalRow): Unit = {
    univocityGenerator.write(record)
  }

  override def close(): Unit = {
    writer.close()
    univocityGenerator.close()
  }
}

class CSVInputFormatterFactory extends InputFormatterFactory {
  override def name: String = "csv"

  override def makeInputFormatter(
      df: DataFrame,
      options: Map[String, String]
  ): InputFormatter = {
    val sqlConf = df.sparkSession.sessionState.conf
    val parsedOptions =
      new CSVOptions(
        options,
        sqlConf.csvColumnPruning,
        sqlConf.sessionLocalTimeZone
      )
    new CSVInputFormatter(df.schema, parsedOptions)
  }
} 
Example 162
Source File: CodeGenerator.scala    From jvm-toxcore-c   with GNU General Public License v3.0 5 votes vote down vote up
package im.tox.tox4j.impl.jni.codegen

import java.io.{ File, PrintWriter }

import com.google.common.base.CaseFormat
import gnieh.pp.PrettyRenderer
import im.tox.tox4j.impl.jni.codegen.cxx.Ast._
import im.tox.tox4j.impl.jni.codegen.cxx.{ Ast, Print }

object NameConversions {

  def cxxVarName(name: String): String = CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, name)
  def cxxTypeName(name: String): String = CaseFormat.UPPER_CAMEL.to(CaseFormat.UPPER_UNDERSCORE, name)
  def javaVarName(name: String): String = CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, name)
  def javaTypeName(name: String): String = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, name)

}

abstract class CodeGenerator extends App {

  def writeCode(path: String, sep: String = "\n\n")(code: Ast.TranslationUnit): Unit = {
    val renderer = new PrettyRenderer(130)

    val writer = new PrintWriter(new File("cpp/src", path))
    try {
      writer.println(code.map(Print.printDecl).map(renderer).mkString(sep))
    } finally {
      writer.close()
    }
  }

  def ifdef(header: String, guard: String, code: TranslationUnit*): TranslationUnit = {
    Include(header) +:
      Ifdef(guard) +:
      code.flatten :+
      Endif
  }

} 
Example 163
Source File: Codegen.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.tools

import java.io.{ File, PrintWriter }

import caliban.parsing.adt.Document
import zio.{ Task, UIO }

object Codegen {
  def generate(
    arguments: Options,
    writer: (Document, String, Option[String], String) => String
  ): Task[Unit] = {
    val s           = ".*/scala/(.*)/(.*).scala".r.findFirstMatchIn(arguments.toPath)
    val packageName = arguments.packageName.orElse(s.map(_.group(1).split("/").mkString(".")))
    val objectName  = s.map(_.group(2)).getOrElse("Client")
    val effect      = arguments.effect.getOrElse("zio.UIO")
    val loader      = getSchemaLoader(arguments.schemaPath, arguments.headers)
    for {
      schema    <- loader.load
      code      = writer(schema, objectName, packageName, effect)
      formatted <- Formatter.format(code, arguments.fmtPath)
      _ <- Task(new PrintWriter(new File(arguments.toPath)))
            .bracket(q => UIO(q.close()), pw => Task(pw.println(formatted)))
    } yield ()
  }

  private def getSchemaLoader(path: String, schemaPathHeaders: Option[List[Options.Header]]): SchemaLoader =
    if (path.startsWith("http")) SchemaLoader.fromIntrospection(path, schemaPathHeaders)
    else SchemaLoader.fromFile(path)

} 
Example 164
Source File: GraphiteEndpointSink.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter

import com.lightbend.kafkalagexporter.MetricsSink._
import com.lightbend.kafkalagexporter.EndpointSink.ClusterGlobalLabels
import java.net.Socket
import java.io.PrintWriter
import scala.util.{Try, Success, Failure}

import scala.util.Try

object GraphiteEndpointSink {

  def apply(metricWhitelist: List[String], clusterGlobalLabels: ClusterGlobalLabels,
            graphiteConfig: Option[GraphiteConfig]): MetricsSink = {
    Try(new GraphiteEndpointSink(metricWhitelist, clusterGlobalLabels, graphiteConfig))
      .fold(t => throw new Exception("Could not create Graphite Endpoint", t), sink => sink)
  }
}

class GraphiteEndpointSink private(metricWhitelist: List[String], clusterGlobalLabels: ClusterGlobalLabels,
                                      graphiteConfig: Option[GraphiteConfig]) extends EndpointSink(clusterGlobalLabels) {
  def graphitePush(graphiteConfig: GraphiteConfig, metricName: String, metricValue: Double): Unit = {
    Try(new Socket(graphiteConfig.host, graphiteConfig.port)) match {
      case Success(socket) =>
        Try(new PrintWriter(socket.getOutputStream)) match {
          case Success(writer) =>
            writer.print(s"${graphiteConfig.prefix.getOrElse("")}${metricName} ${metricValue} ${System.currentTimeMillis / 1000}\n")
            writer.close
            socket.close
          case Failure(_) =>
            socket.close
        }
      case Failure(_) => {
      }
    }
  }

   
  def metricNameToGraphiteMetricName(metricValue: MetricValue): String = {
    (getGlobalLabelValuesOrDefault(metricValue.clusterName) ++ metricValue.labels
      ).map( x => x.replaceAll("\\.", "_")).mkString(".") + "." + metricValue.definition.name;
  }

  override def report(m: MetricValue): Unit = {
    if (metricWhitelist.exists(m.definition.name.matches)) {
      graphiteConfig.foreach { conf =>
        graphitePush(conf, metricNameToGraphiteMetricName(m), m.value);
      }
    }
  }

  override def remove(m: RemoveMetric): Unit = {
  }


} 
Example 165
Source File: LocalCsvOutputWriter.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.csv

import java.io.PrintWriter
import com.univocity.parsers.csv.{CsvWriter, CsvWriterSettings}
import org.apache.spark.sql.types._


class LocalCsvOutputWriter(
      schema: StructType,
      options: CSVOptions,
      driverPath: String) {

  private val driverFileWriter = new PrintWriter(driverPath)

  private val FLUSH_BATCH_SIZE = 1024L
  private var records: Long = 0L
  private val writerSettings = createWriterSettings(schema, options)
  private val gen = new CsvWriter(driverFileWriter, writerSettings)

  def write(row: Seq[String]): Unit = {
    gen.writeRow(row.toArray)
    records += 1
    if (records % FLUSH_BATCH_SIZE == 0) {
      flush()
    }
  }

  def close(): Unit = {
    flush()
    driverFileWriter.close()
  }

  private def flush(): Unit = {
    gen.flush()
  }

  private def createWriterSettings(schema: StructType, options: CSVOptions): CsvWriterSettings = {
    val writerSettings = options.asWriterSettings
    writerSettings.setHeaderWritingEnabled(options.headerFlag)
    writerSettings.setHeaders(schema.fieldNames: _*)
    writerSettings
  }
} 
Example 166
Source File: LocalCsvOutputWriter.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.csv

import java.io.PrintWriter

import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.types._



class LocalCsvOutputWriter(
      dataSchema: StructType,
      params: CSVOptions,
      driverPath: String) {

  private val driverFileWriter = new PrintWriter(driverPath)

  private val FLUSH_BATCH_SIZE = 1024L
  private var records: Long = 0L
  private val csvWriter = new LineCsvWriter(params, dataSchema.fieldNames.toSeq)

  def write(row: Seq[String]): Unit = {
    csvWriter.writeRow(row, records == 0L && params.headerFlag)
    records += 1
    if (records % FLUSH_BATCH_SIZE == 0) {
      flush()
    }
  }

  private def flush(): Unit = {
    val lines = csvWriter.flush()
    if (lines.nonEmpty) {
      driverFileWriter.write(lines)
    }
  }

  def close(): Unit = {
    flush()
    driverFileWriter.close()
  }
} 
Example 167
Source File: AbstractOperationExample.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.deeplang.doperations.examples

import java.io.{File, PrintWriter}

import ai.deepsense.commons.utils.Logging
import ai.deepsense.deeplang.doperables.dataframe.DataFrame
import ai.deepsense.deeplang.doperations.ReadDataFrame
import ai.deepsense.deeplang.doperations.inout.CsvParameters.ColumnSeparatorChoice.Comma
import ai.deepsense.deeplang.doperations.readwritedataframe.FileScheme
import ai.deepsense.deeplang.{DOperable, DOperation, DeeplangIntegTestSupport}

abstract class AbstractOperationExample[T <: DOperation]
    extends DeeplangIntegTestSupport
    with Logging {

  def dOperation: T

  final def className: String = dOperation.getClass.getSimpleName

  def fileNames: Seq[String] = Seq.empty

  def loadCsv(fileName: String): DataFrame = {
    ReadDataFrame(
      FileScheme.File.pathPrefix + this.getClass.getResource(s"/test_files/$fileName.csv").getPath,
      Comma(),
      csvNamesIncluded = true,
      csvConvertToBoolean = false
    ).executeUntyped(Vector.empty[DOperable])(executionContext)
      .head
      .asInstanceOf[DataFrame]
  }

  def inputDataFrames: Seq[DataFrame] = fileNames.map(loadCsv)

  className should {
    "successfully run execute() and generate example" in {
      val op = dOperation
      val outputDfs = op
        .executeUntyped(inputDataFrames.toVector)(executionContext)
        .collect { case df: DataFrame => df }
      val html =
        ExampleHtmlFormatter.exampleHtml(op, inputDataFrames, outputDfs)

      // TODO Make it not rely on relative path it's run from
      val examplePageFile = new File(
        "docs/operations/examples/" + className + ".md")

      examplePageFile.getParentFile.mkdirs()
      examplePageFile.createNewFile()

      val writer = new PrintWriter(examplePageFile)
      // scalastyle:off println
      writer.println(html)
      // scalastyle:on println
      writer.flush()
      writer.close()
      logger.info(
        "Created doc page for " + className)
    }
  }
} 
Example 168
Source File: DriverFiles.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.deeplang.doperations.readwritedataframe.filestorage

import java.io.{File, IOException, PrintWriter}

import scala.io.Source

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.datasources.csv.{DataframeToDriverCsvFileWriter, RawCsvRDDToDataframe}
import org.apache.spark.sql.{Dataset, Encoders, Row, SaveMode, DataFrame => SparkDataFrame}
import ai.deepsense.commons.resources.ManagedResource
import ai.deepsense.deeplang.ExecutionContext
import ai.deepsense.deeplang.doperables.dataframe.DataFrame
import ai.deepsense.deeplang.doperations.inout.{InputFileFormatChoice, OutputFileFormatChoice}
import ai.deepsense.deeplang.doperations.readwritedataframe.filestorage.csv.CsvOptions
import ai.deepsense.deeplang.doperations.readwritedataframe.{FilePath, FileScheme}
import ai.deepsense.deeplang.readjsondataset.JsonReader
import ai.deepsense.sparkutils.SQL

object DriverFiles extends JsonReader {

  def read(driverPath: String, fileFormat: InputFileFormatChoice)
          (implicit context: ExecutionContext): SparkDataFrame = fileFormat match {
    case csv: InputFileFormatChoice.Csv => readCsv(driverPath, csv)
    case json: InputFileFormatChoice.Json => readJson(driverPath)
    case parquet: InputFileFormatChoice.Parquet => throw ParquetNotSupported
  }

  def write(dataFrame: DataFrame, path: FilePath, fileFormat: OutputFileFormatChoice, saveMode: SaveMode)
           (implicit context: ExecutionContext): Unit = {
    path.verifyScheme(FileScheme.File)
    if (saveMode == SaveMode.ErrorIfExists && new File(path.pathWithoutScheme).exists()){
      throw new IOException(s"Output file ${path.fullPath} already exists")
    }
    fileFormat match {
      case csv: OutputFileFormatChoice.Csv => writeCsv(path, csv, dataFrame)
      case json: OutputFileFormatChoice.Json => writeJson(path, dataFrame)
      case parquet: OutputFileFormatChoice.Parquet => throw ParquetNotSupported
    }
  }

  private def readCsv
      (driverPath: String, csvChoice: InputFileFormatChoice.Csv)
      (implicit context: ExecutionContext): SparkDataFrame = {
    val params = CsvOptions.map(csvChoice.getNamesIncluded, csvChoice.getCsvColumnSeparator())
    val lines = Source.fromFile(driverPath).getLines().toStream
    val fileLinesRdd = context.sparkContext.parallelize(lines)

    RawCsvRDDToDataframe.parse(fileLinesRdd, context.sparkSQLSession.sparkSession, params)
  }

  private def readJson(driverPath: String)(implicit context: ExecutionContext) = {
    val lines = Source.fromFile(driverPath).getLines().toStream
    val fileLinesRdd = context.sparkContext.parallelize(lines)
    val sparkSession = context.sparkSQLSession.sparkSession
    readJsonFromRdd(fileLinesRdd, sparkSession)
  }

  private def writeCsv
      (path: FilePath, csvChoice: OutputFileFormatChoice.Csv, dataFrame: DataFrame)
      (implicit context: ExecutionContext): Unit = {
    val params = CsvOptions.map(csvChoice.getNamesIncluded, csvChoice.getCsvColumnSeparator())

    DataframeToDriverCsvFileWriter.write(
      dataFrame.sparkDataFrame,
      params,
      dataFrame.schema.get,
      path.pathWithoutScheme,
      context.sparkSQLSession.sparkSession
    )
  }

  private def writeJson(path: FilePath, dataFrame: DataFrame)
                       (implicit context: ExecutionContext): Unit = {
    val rawJsonLines: RDD[String] = SQL.dataFrameToJsonRDD(dataFrame.sparkDataFrame)
    writeRddToDriverFile(path.pathWithoutScheme, rawJsonLines)
  }

  private def writeRddToDriverFile(driverPath: String, lines: RDD[String]): Unit = {
    val recordSeparator = System.getProperty("line.separator", "\n")
    ManagedResource(new PrintWriter(driverPath)) { writer =>
      lines.collect().foreach(line => writer.write(line + recordSeparator))
    }
  }

} 
Example 169
Source File: RedirectCreator.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.docgen

import java.io.{File, PrintWriter}

import ai.deepsense.deeplang.DOperation

trait RedirectCreator {

  // scalastyle:off println

  
  def createRedirects(
      sparkOperations: Seq[OperationWithSparkClassName],
      forceUpdate: Boolean): Int = {

    sparkOperations.map { case OperationWithSparkClassName(operation, sparkClassName) =>
      val redirectFile = new File("docs/uuid/" + operation.id + ".md")
      if (!redirectFile.exists() || forceUpdate) {
        createRedirect(redirectFile, operation, sparkClassName)
        1
      } else {
        0
      }
    }.sum
  }

  private def createRedirect(redirectFile: File, operation: DOperation, sparkClassName: String) = {
    val writer = new PrintWriter(redirectFile)
    writer.println("---")
    writer.println("layout: redirect")
    writer.println("redirect: ../operations/" + DocUtils.underscorize(operation.name) + ".html")
    writer.println("---")
    writer.flush()
    writer.close()
    println("Created redirect for " + operation.name)
  }
  // scalastyle:on println
} 
Example 170
Source File: Main.scala    From scalajs-highcharts   with MIT License 5 votes vote down vote up
package com.karasiq.highcharts.generator

import java.io.{BufferedWriter, FileOutputStream, OutputStreamWriter, PrintWriter}
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes

import scala.util.control.Exception
import scalaj.http.{Http, HttpOptions}

import com.karasiq.highcharts.generator.writers.{ScalaClassWriter, ScalaJsClassBuilder}

case class HighchartsApiDoc(library: String) {
  private val defaultPackage = System.getProperty(s"highcharts-generator.$library.package", s"com.$library")

  private def httpGet(url: String): List[ConfigurationObject] = {
    val page = Http.get(url)
      .header("User-Agent", "Mozilla/5.0 (X11; OpenBSD amd64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36")
      .header("Accept", "application/json")
      .options(HttpOptions.connTimeout(10000), HttpOptions.readTimeout(10000))

    val json = page.asString
    ConfigurationObject.fromJson(json)
  }

  private def writeFiles(pkg: String, configs: List[ConfigurationObject], rootObject: Option[String] = None): Unit = {
    val header =
      s"""
          |package $pkg
          |
          |import scalajs.js, js.`|`
          |import com.highcharts.CleanJsObject
          |import com.highcharts.HighchartsUtils._
          |
          |""".stripMargin

    val outputDir = Paths.get(System.getProperty("highcharts-generator.output", "src/main/scala"), pkg.split("\\."):_*)
    Files.createDirectories(outputDir)

    // Remove all files
    Files.walkFileTree(outputDir, new SimpleFileVisitor[Path] {
      override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
        Files.delete(file)
        FileVisitResult.CONTINUE
      }
    })

    val classes = new ScalaJsClassBuilder().parse(configs, rootObject)
    val classWriter = new ScalaClassWriter
    classes.foreach { scalaJsClass ⇒
      val file = outputDir.resolve(scalaJsClass.scalaName + ".scala")
      println(s"Writing $file...")
      val writer = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file.toFile, true), "UTF-8")))
      Exception.allCatch.andFinally(writer.close()) {
        if (Files.size(file) == 0) {
          writer.print(header)
        }
        classWriter.writeClass(scalaJsClass) { line ⇒
          writer.println(line)
        }
        writer.flush()
      }
    }
  }

  def writeConfigs(): Unit = {
    val configs = httpGet(s"https://api.highcharts.com/$library/dump.json")
    writeFiles(s"$defaultPackage.config", configs, Some(s"${library.capitalize}Config"))
  }

  def writeApis(): Unit = {
    val configs = httpGet(s"https://api.highcharts.com/$library/object/dump.json")
    writeFiles(s"$defaultPackage.api", configs)
  }

  def writeAll(): Unit = {
    // TODO: https://github.com/highcharts/highcharts/issues/7227
    writeConfigs()
    // writeApis() // TODO: 404
  }
}

object Main extends App {
  HighchartsApiDoc("highcharts").writeAll()
  HighchartsApiDoc("highstock").writeAll()
  HighchartsApiDoc("highmaps").writeAll()
} 
Example 171
Source File: PythonBroadcastSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import scala.io.Source

import java.io.{PrintWriter, File}

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 172
Source File: PageViewGenerator.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.examples.streaming.clickstream

import java.net.ServerSocket
import java.io.PrintWriter
import util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/"        -> .7,
                  "http://foo.com/news"    -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01) : _*)

  def pickFromDistribution[T](inputMap : Map[T, Double]) : T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent() : String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args : Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println("Listening on port: " + port)

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from: " + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
} 
Example 173
Source File: TestHedge.scala    From banditsbook-scala   with MIT License 5 votes vote down vote up
package com.github.everpeace.banditsbook.algorithm.hedge

import java.io.{File, PrintWriter}

import breeze.linalg._
import breeze.stats.MeanAndVariance
import com.github.everpeace.banditsbook.arm._
import com.github.everpeace.banditsbook.testing_framework.TestRunner
import com.github.everpeace.banditsbook.testing_framework.TestRunner._
import com.typesafe.config.ConfigFactory

import scala.collection.immutable.Seq

object TestHedge extends _TestHedge with App{
  run()
}

trait _TestHedge {
  def run() = {
//    implicit val randBasis = RandBasis.mt0

    val conf = ConfigFactory.load()
    val baseKey = "banditsbook.algorithm.hedge.test-hedge"
    val (_means, Some(ηs), horizon, nSims, outDir) = readConfig(conf, baseKey, Some("ηs"))
    val means = shuffle(_means)
    val arms = Seq(means:_*).map(μ => BernoulliArm(μ))

    val outputPath = new File(outDir, "test-hedge-results.csv")
    val file = new PrintWriter(outputPath.toString)
    file.write("eta, sim_num, step, chosen_arm, reward, cumulative_reward\n")
    try {
      println("-------------------------------")
      println("Hedge Algorithm")
      println("-------------------------------")
      println(s"   arms = ${means.map("(μ="+_+")").mkString(", ")} (Best Arm = ${argmax(means)})")
      println(s"horizon = $horizon")
      println(s"  nSims = $nSims")
      println(s"      η = (${ηs.mkString(",")})")
      println("")

      val meanOfFinalRewards = scala.collection.mutable.Map.empty[Double, MeanAndVariance]
      val res = for {
        η <- ηs
      } yield {
        println(s"starts simulation on η=$η.")

        val algo = Hedge.Algorithm(η)
        val res = TestRunner.run(algo, arms, nSims, horizon)

        for {
          sim <- 0 until nSims
        } {
          val st = sim * horizon
          val end = ((sim + 1) * horizon) - 1
        }
        val finalRewards = res.cumRewards((horizon-1) until (nSims * horizon, horizon))
        import breeze.stats._
        val meanAndVar = meanAndVariance(finalRewards)
        meanOfFinalRewards += η -> meanAndVar
        println(s"reward stats: ${TestRunner.toString(meanAndVar)}")

        res.rawResults.valuesIterator.foreach{ v =>
          file.write(s"${Seq(η, v._1, v._2, v._3, v._4, v._5).mkString(",")}\n")
        }
        println(s"finished simulation on η=$η.")
      }
      println("")
      println(s"reward stats summary")
      println(s"${meanOfFinalRewards.iterator.toSeq.sortBy(_._1).map(p => (s"η=${p._1}", TestRunner.toString(p._2))).mkString("\n")}")
    } finally {
      file.close()
      println("")
      println(s"results are written to ${outputPath}")
    }
  }
} 
Example 174
Source File: PythonBroadcastSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import java.io.{File, PrintWriter}

import scala.io.Source

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 175
Source File: PageViewGenerator.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream

import java.io.PrintWriter
import java.net.ServerSocket
import java.util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/" -> .7,
                  "http://foo.com/news" -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01): _*)

  def pickFromDistribution[T](inputMap: Map[T, Double]): T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent(): String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println("Listening on port: " + port)

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from: " + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
}
// scalastyle:on println 
Example 176
Source File: EdgeListDataSourceTest.scala    From morpheus   with Apache License 2.0 5 votes vote down vote up
package org.opencypher.morpheus.api.io.edgelist

import java.io.{File, PrintWriter}

import org.opencypher.morpheus.testing.MorpheusTestSuite
import org.opencypher.okapi.api.graph.{GraphName, PropertyGraph}
import org.opencypher.okapi.impl.exception
import org.scalatest.BeforeAndAfterAll
import org.scalatestplus.mockito.MockitoSugar

class EdgeListDataSourceTest extends MorpheusTestSuite with BeforeAndAfterAll with MockitoSugar {

  private val edgeList: String =
    s"""
       |0 1
       |0 2
       |1 2
       |1 3
     """.stripMargin

  private val tempFile = File.createTempFile(s"morpheus_edgelist_${System.currentTimeMillis()}", "edgelist")

  private val dataSource = EdgeListDataSource(
    tempFile.getAbsolutePath,
    Map("delimiter" -> " "))

  it("should return a static schema") {
    dataSource.schema(EdgeListDataSource.GRAPH_NAME) should equal(Some(EdgeListDataSource.SCHEMA))
  }

  it("should contain only one graph named 'graph'") {
    dataSource.hasGraph(EdgeListDataSource.GRAPH_NAME) should equal(true)
    dataSource.hasGraph(GraphName("foo")) should equal(false)
  }

  it("should throw when trying to delete") {
    a[exception.UnsupportedOperationException] shouldBe thrownBy {
      dataSource.delete(EdgeListDataSource.GRAPH_NAME)
    }
  }

  it("should have only one graph name") {
    dataSource.graphNames should equal(Set(EdgeListDataSource.GRAPH_NAME))
  }

  it("should throw when trying to store a graph") {
    a[exception.UnsupportedOperationException] shouldBe thrownBy {
      dataSource.store(GraphName("foo"), mock[PropertyGraph])
    }
  }

  it("should return the test graph") {
    val graph = dataSource.graph(EdgeListDataSource.GRAPH_NAME)
    graph.nodes("n").size should equal(4)
    graph.relationships("r").size should equal(4)
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    new PrintWriter(tempFile.getAbsolutePath) { write(edgeList); close() }
  }

  override protected def afterAll(): Unit = {
    tempFile.delete()
    super.afterAll()
  }
} 
Example 177
Source File: StringReaderSuite.scala    From functadelic   with MIT License 5 votes vote down vote up
package stagedparsec

import lms._
import lms.testutil.FileDiffSpec

import scala.lms.common._
import scala.lms.internal.Effects

import java.io.PrintWriter
import java.io.StringWriter
import java.io.FileOutputStream

trait StringReaderProg extends StringReaderOps with MiscOps {

  //print reader.first
  def testFirst(in: Rep[Array[Char]]): Rep[Char] = {
    val rdr: Rep[StringReader] = StringReader(in)
    rdr.first
  }

  def testAtEnd(in: Rep[Array[Char]], offset: Rep[Int]): Rep[Boolean] = {
    val rdr: Rep[StringReader] = StringReader(in, offset)
    rdr.atEnd
  }

  //compute rdr.rest and print first
  def testRest(in: Rep[Array[Char]]): Rep[Char] = {
    val rdr: Rep[StringReader] = StringReader(in)
    val rst = rdr.rest

    rst.first
  }

  def testIteration(in: Rep[Array[Char]]): Rep[Unit] = {
    val rdr = StringReader(in)
    rdr.foreach { c => println(c) }
  }
}

class StringReaderSuite extends FileDiffSpec {

  val prefix = "test-out/"

  def `StringReader generate code with no diff` = {
    withOutFile(prefix + "stringreader") {
      new StringReaderProg
        with StringReaderOpsExpOpt
        with MiscOpsExp
         with SeqOpsExp
        with MyScalaCompile { self =>

        val codegen = new ScalaGenStringReaderOps with ScalaGenMiscOps { val IR: self.type = self }

        codegen.emitSource(testFirst _, "testFirst", new java.io.PrintWriter(System.out))
        codegen.reset

        val testcFirst = compile(testFirst)
        scala.Console.println(testcFirst("hello".toArray))
        codegen.reset

        codegen.emitSource2(testAtEnd _, "testAtEnd", new java.io.PrintWriter(System.out))
        codegen.reset

        val testcAtEnd = compile2(testAtEnd)
        scala.Console.println(testcAtEnd("hello".toArray, 0))
        scala.Console.println(testcAtEnd("hello".toArray, 6))
        codegen.reset

        codegen.emitSource(testRest _, "testRest", new java.io.PrintWriter(System.out))
        codegen.reset

        val testcRest = compile(testRest)
        scala.Console.println(testcRest("hello".toArray))
        codegen.reset

        codegen.emitSource(testIteration _, "testIteration", new java.io.PrintWriter(System.out))
        codegen.reset

        val testcIteration = compile(testIteration)
        testcIteration("hello".toArray)
        codegen.reset

      }
    }

    assertFileEqualsCheck(prefix + "stringreader")
  }
} 
Example 178
Source File: RecParsersSuite.scala    From functadelic   with MIT License 5 votes vote down vote up
package stagedparsec

import lms._
import lms.testutil.FileDiffSpec

import scala.lms.common._
import scala.lms.internal.Effects

import java.io.PrintWriter
import java.io.StringWriter
import java.io.FileOutputStream

trait RecParsersProg
    extends CharParsers
    with Functions {

  import Parser._

   with SeqOpsExp
          with MyScalaCompile { self =>

        val codegen = new ScalaGenCharParsers
            with ScalaGenStruct
            with ScalaGenIfThenElse {
          val IR: self.type = self
        }

        codegen.emitSource(recNumber _, "recNumber", new java.io.PrintWriter(System.out))
        codegen.reset

        val testcRecNumber = compile(recNumber)
        scala.Console.println(testcRecNumber("12345".toArray))
        codegen.reset
      }
      assertFileEqualsCheck(prefix + "rec-parser")
    }
  }
} 
Example 179
Source File: GoldenGateStage.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas.stage

import firrtl.AnnotationSeq
import firrtl.options.{Phase, PhaseManager, PreservesAll, Shell, Stage, StageMain}
import firrtl.options.phases.DeletedWrapper
import firrtl.options.Viewer.view

import java.io.{StringWriter, PrintWriter}

class GoldenGateStage extends Stage with PreservesAll[Phase] {
  val shell: Shell = new Shell("goldengate") with GoldenGateCli

  private val phases: Seq[Phase] =
    Seq(
        new GoldenGateGetIncludes,
        new firrtl.stage.phases.AddDefaults,
        new firrtl.stage.phases.AddImplicitEmitter,
        new firrtl.stage.phases.Checks,
        new firrtl.stage.phases.AddCircuit,
        new firrtl.stage.phases.AddImplicitOutputFile,
        new midas.stage.GoldenGateCompilerPhase,
        new firrtl.stage.phases.WriteEmitted )
      .map(DeletedWrapper(_))


  def run(annotations: AnnotationSeq): AnnotationSeq = phases.foldLeft(annotations)((a, f) => f.transform(a))
}

object GoldenGateMain extends StageMain(new GoldenGateStage) 
Example 180
Source File: ChannelExcision.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas.passes.fame

import java.io.{PrintWriter, File}

import firrtl._
import ir._
import Mappers._
import Utils._
import firrtl.passes.MemPortUtils
import annotations.{ModuleTarget, ReferenceTarget, Annotation, SingleTargetAnnotation}

import scala.collection.mutable

class ChannelExcision extends Transform {
  def inputForm = LowForm
  def outputForm = LowForm

  val addedChannelAnnos = new mutable.ArrayBuffer[FAMEModelAnnotation]()
  val pipeChannels = new mutable.HashMap[(ReferenceTarget, ReferenceTarget), String]()


  override def execute(state: CircuitState): CircuitState = {
    val renames = RenameMap()
    val topModule = state.circuit.modules.find(_.name == state.circuit.main).get.asInstanceOf[Module]
    val topTarget = ModuleTarget(state.circuit.main, topModule.name)
    def subfieldTarget(instance: String, field: String) = topTarget.ref(instance).field(field)
    def portTarget(p: Port) = topTarget.ref(p.name)

    def onStmt(addedPorts: mutable.ArrayBuffer[Port])(s: Statement): Statement = s.map(onStmt(addedPorts)) match {
      case c @ Connect(_, lhs @ WSubField(WRef(lhsiname, _, InstanceKind, _), lhspname, _, _),
                          rhs @ WSubField(WRef(rhsiname, _, InstanceKind, _), rhspname, _, _)) =>
        val lhsTarget = subfieldTarget(lhsiname, lhspname)
        val rhsTarget = subfieldTarget(rhsiname, rhspname)
        pipeChannels.get((lhsTarget, rhsTarget)) match {
          case Some(chName) =>
            val srcP = Port(NoInfo, s"${rhsiname}_${rhspname}_source", Output, lhs.tpe)
            val sinkP = Port(NoInfo, s"${lhsiname}_${lhspname}_sink", Input, rhs.tpe)
            addedPorts ++= Seq(srcP, sinkP)
            renames.record(lhsTarget, portTarget(sinkP))
            renames.record(rhsTarget, portTarget(srcP))
            Block(Seq(Connect(NoInfo, lhs, WRef(sinkP)), Connect(NoInfo, WRef(srcP), rhs)))
          case None => c
        }
      case s => s
    }

    def onModule(m: DefModule): DefModule = m match {
      case mod @ Module(_,name,_,_) if name == state.circuit.main =>
        val addedPorts = new mutable.ArrayBuffer[Port]()
        mod.copy(body = mod.body.map(onStmt(addedPorts)))
           .copy(ports = mod.ports ++ addedPorts)
      case _ => m
    }

    // Step 1: Analysis -> build a map from reference targets to channel name
    state.annotations.collect({
      case fta@ FAMEChannelConnectionAnnotation(name, PipeChannel(_), Some(srcs), Some(sinks)) =>
      sinks.zip(srcs).foreach({ pipeChannels(_) = name })
    })

    // Step 2: Generate new ports, find and replace connections
    val circuit = state.circuit.map(onModule)

    state.copy(circuit = circuit, renames = Some(renames))
  }
} 
Example 181
Source File: ExtractModel.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas.passes.fame

import java.io.{PrintWriter, File}

import firrtl._
import ir._
import Mappers._
import Utils._
import firrtl.passes.MemPortUtils
import annotations.{InstanceTarget, Annotation, SingleTargetAnnotation}

import scala.collection.mutable
import mutable.{LinkedHashSet, LinkedHashMap}

class ExtractModel extends Transform {
  def inputForm = HighForm
  def outputForm = HighForm

  def promoteModels(state: CircuitState): CircuitState = {
    val anns = state.annotations.flatMap {
      case a @ FAMEModelAnnotation(it) if (it.module != it.circuit) => Seq(a, PromoteSubmoduleAnnotation(it))
      case a => Seq(a)
    }
    if (anns.toSeq == state.annotations.toSeq) {
      state
    } else {
      promoteModels((new PromoteSubmodule).runTransform(state.copy(annotations = anns)))
    }
  }

  override def execute(state: CircuitState): CircuitState = {
    promoteModels(state)
  }
} 
Example 182
Source File: MyForest.scala    From Scallina   with GNU General Public License v3.0 5 votes vote down vote up
package scala.of.coq.parsercombinators.compiler.customtreehugger

import treehugger.forest._
import definitions._
import treehuggerDSL._
import java.io.PrintWriter
import treehugger.Forest
import treehugger.TreePrinters

trait MyTreePrinters extends TreePrinters { self: Forest =>

  class MyTreePrinter(out: PrintWriter) extends TreePrinter(out) {

    private def symFn[T](tree: Tree, f: Symbol => T, orElse: => T): T = tree.symbol match {
      case null | NoSymbol => orElse
      case sym             => f(sym)
    }
    private def ifSym(tree: Tree, p: Symbol => Boolean) = symFn(tree, p, false)

    import treehugger.api.Modifier
    import Flags._
    override def printTree(tree: Tree) {
      tree match {
        case EmptyTree =>
          print("")

        case classdef: ClassDef if classdef.name == tpnme.ANON_CLASS_NAME =>
          print(classdef.impl)

        case ClassDef(mods, ctormods, name, tparams, vparams, impl) =>
          printlnAnnotations(tree)
          printModifiers(tree, mods)
          val word =
            if (mods.hasTraitFlag) "trait"
            else if (ifSym(tree, _.isModuleClass)) "object"
            else "class"

          print(word, " ", symName(tree, name))
          printTypeParams(tparams)
          if (ctormods != NoMods) {
            print(" ")
            printModifiers(tree, ctormods)
          }
          if (vparams != Nil || modifiersOfFlags(mods.flags).contains(Modifier.`case`))
            printValueParams(vparams, true)

          print(if (mods.isDeferred) " <: "
          else if (impl.parents.isEmpty) ""
          else " extends ", impl)

        case anythingElse => super.printTree(anythingElse)
      }
    }
  }

  override def newTreePrinter(writer: PrintWriter): TreePrinter = new MyTreePrinter(writer)
}

object MyForest extends Forest with MyTreePrinters { self: Forest =>

} 
Example 183
Source File: LdbcParameterizedQueriesTest.scala    From ingraph   with Eclipse Public License 1.0 5 votes vote down vote up
package ingraph.compiler.sql.driver

import java.io.{File, PrintWriter}

import ingraph.compiler.sql.SqlQueries.getQueriesFromFolder
import ingraph.compiler.sql.Util._
import ingraph.compiler.sql.driver.LdbcParameterizedQueriesTest.expectedToSucceed
import ingraph.compiler.sql.{CompilerOptions, GTopExtension, SqlCompiler}
import org.cytosm.common.gtop.GTop
import org.scalatest.FunSuite

class LdbcParameterizedQueriesTest extends FunSuite {
  val gTop: GTop = GTopExtension.loadFromResource("/gtop/ldbc.gtop")

  val ldbcParameterizedQueries: Seq[(String, String)] =
    getQueriesFromFolder("ldbc_snb_implementations/cypher/queries", "cypher")

  val outputPath = "ldbc_snb_implementations/postgres/queries-generated"

  ldbcParameterizedQueries.foreach { case (name, cypherQueryString) =>
    test(name) {
      try {
        val sqlCompiler = SqlCompiler(cypherQueryString, CompilerOptions(gTop = Some(gTop), inlineParameters = false, trimSql = true))

        val sqlQuery = sqlCompiler.sql

        if (LdbcTest.expectedToSucceed.contains(name)) {
          withResources(new PrintWriter(new File(outputPath, name + ".sql").getPath)) {
            _.write(sqlQuery)
          }
        }
      } catch {
        case throwable: Throwable if !expectedToSucceed.contains(name) => cancel(name + " has failed. Only warning!", throwable)
      }
    }
  }
}

object LdbcParameterizedQueriesTest {
  val expectedToSucceed: Set[String] = Set(
    "bi-2",
    "bi-3",
    "bi-4",
    "bi-6",
    "bi-7",
    "bi-8",
    "bi-9",
    "bi-12",
    "bi-14",
    "bi-15",
    "bi-17",
    "bi-18",
    "bi-19",
    "bi-23",
    "bi-24",
    "interactive-complex-1",
    "interactive-complex-2",
    "interactive-complex-3",
    "interactive-complex-4",
    "interactive-complex-5",
    "interactive-complex-6",
    "interactive-complex-8",
    "interactive-complex-9",
    "interactive-complex-11",
    "interactive-complex-12",
    "interactive-short-1",
    "interactive-short-2",
    "interactive-short-3",
    "interactive-short-4",
    "interactive-short-5",
    "interactive-short-6",
    "interactive-short-7",
    "interactive-update-2",
    "interactive-update-3",
    "interactive-update-5",
    "interactive-update-8",
    ""
  )
} 
Example 184
Source File: TestResults.scala    From mimir   with Apache License 2.0 5 votes vote down vote up
package mimir.util

import java.nio.file.Files
import java.nio.file.Paths
import java.nio.charset.Charset
import java.nio.charset.StandardCharsets
import java.io.ByteArrayOutputStream
import java.io.PrintWriter
import org.rogach.scallop.ScallopConf

object TestResults {
  def main(args: Array[String]) {
    val config = new TestResultConfig(args)
    println("running tests....")
    parseTestResults(config.sbtPath(),config.sbtCmd())
  }
  
  def parseTestResults(sbtPath:String = "/opt/local/bin/sbt", sbtCmd:String = "test") = {
    val procOutput = runCommand(Seq(sbtPath,sbtCmd))._2.replaceAll("""\x1b\[[0-9;]*[a-zA-Z]""", "")
    
    val pattern = """(?m)^.*\[info\] Total.*$|^.*\[info\] Finished.*$|^.*\[info\] [\d]+ examp.*$""".r
    
    val header = "test_name,seconds,examples,expectations,failures,errors,skipped\n"
    
    val pattern2 = """\[info\] Total for specification (\w+)\s+\[info\] Finished in (.+)\R\[info\] (.+)\R""".r
    val pattern3 = """([a-zA-Z]+): (?:(\d+) minutes? )?(?:(\d+) seconds?[,:] )?(?:(\d+) ms[,:] )?(\d+) examples?, (?:(\d+) expectations?, )?(\d+) failures?, (\d+) errors?(?:, (\d+) skipped)?""".r
    val string = pattern2.findAllMatchIn(procOutput).map(mat => s"${mat.group(1)}: ${mat.group(2)}: ${mat.group(3)}")
      .map(nline => nline match {
        case pattern3(test_name,minutes,seconds,ms,examples,expectations,failures,errors,skipped) => {
          val allseconds = (minutes match {
            case "" => 0
            case null => 0
            case x => x.toInt*60
          }) + (seconds match {
            case "" => 0
            case null => 0
            case x => x.toInt
          }) +  (ms match {
            case "" => 0.0
            case null => 0.0
            case x => x.toDouble/1000.0
          })
          s"$test_name,$allseconds,$examples,$expectations,$failures,$errors,$skipped"
        }
      }).mkString("\n")
    
    val outStr = header + string
      
    println(outStr)
    Files.write(Paths.get("test_output.csv"), outStr.getBytes(StandardCharsets.UTF_8))
  }
  
  import sys.process._
  def runCommand(cmd: Seq[String]): (Int, String, String) = {
    val stdoutStream = new ByteArrayOutputStream
    val stderrStream = new ByteArrayOutputStream
    val stdoutWriter = new PrintWriter(stdoutStream)
    val stderrWriter = new PrintWriter(stderrStream)
    val exitValue = cmd.!(ProcessLogger(stdoutWriter.println, stderrWriter.println))
    stdoutWriter.close()
    stderrWriter.close()
    (exitValue, stdoutStream.toString, stderrStream.toString)
  }
  
  
}

class TestResultConfig(arguments: Seq[String]) extends ScallopConf(arguments)
{
  val experimental = opt[List[String]]("X", default = Some(List[String]()))
  val sparkHost = opt[String]("sparkHost", descr = "The IP or hostname of the spark master",
    default = Some("spark-master.local"))
  val sparkPort = opt[String]("sparkPort", descr = "The port of the spark master",
    default = Some("7077"))
  val sbtPath = opt[String]("sbtPath", descr = "The path to sbt binary",
    default = Some("/opt/local/bin/sbt"))
  val sbtCmd = opt[String]("sbtCmd", descr = "The sbt command to run",
    default = Some("test"))
} 
Example 185
Source File: framian.scala    From blog   with Apache License 2.0 5 votes vote down vote up
import java.io.{File,PrintWriter}
import framian.{Index,Cols}
import framian.csv.{Csv,CsvFormat}

object FramianTest {

  def main(args: Array[String]) = {
    println("Hello")
    val df=Csv.parseFile(new File("../r/cars93.csv")).labeled.toFrame
    println(""+df.rows+" "+df.cols)
    val df2=df.filter(Cols("EngineSize").as[Double])( _ <= 4.0 )
    println(""+df2.rows+" "+df2.cols)
    val df3=df2.map(Cols("Weight").as[Int],"WeightKG")(r=>r.toDouble*0.453592)
    println(""+df3.rows+" "+df3.cols)
    println(df3.colIndex)
    val csv = Csv.fromFrame(new CsvFormat(",", header = true))(df3)
    new PrintWriter("out.csv") { write(csv.toString); close }
    println("Done")
  }

} 
Example 186
Source File: PythonBroadcastSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import java.io.{File, PrintWriter}

import scala.io.Source

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 187
Source File: PageViewGenerator.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream

import java.io.PrintWriter
import java.net.ServerSocket
import java.util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/" -> .7,
                  "http://foo.com/news" -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01): _*)

  def pickFromDistribution[T](inputMap: Map[T, Double]): T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent(): String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args: Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println(s"Listening on port: $port")

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println(s"Got client connected from: ${socket.getInetAddress}")
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
}
// scalastyle:on println 
Example 188
Source File: CornichonError.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package com.github.agourlay.cornichon.core

import java.io.{ PrintWriter, StringWriter }

import cats.data.EitherT
import cats.syntax.either._
import cats.instances.future._

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.control.NoStackTrace

trait CornichonError {
  def baseErrorMessage: String
  val causedBy: List[CornichonError] = Nil

  lazy val renderedMessage: String = {
    if (causedBy.isEmpty)
      baseErrorMessage
    else
      s"""$baseErrorMessage
      |caused by:
      |${causedBy.iterator.map(c => c.renderedMessage).mkString("\nand\n")}""".stripMargin
  }

  def toException = CornichonException(renderedMessage)
}

object CornichonError {
  def genStacktrace(exception: Throwable): String = {
    val sw = new StringWriter()
    val pw = new PrintWriter(sw)
    exception.printStackTrace(pw)
    sw.toString
  }

  def fromString(error: String): CornichonError =
    BasicError(error)

  def fromThrowable(exception: Throwable): CornichonError =
    StepExecutionError(exception)

  def catchThrowable[A](f: => A): Either[CornichonError, A] =
    Either.catchNonFatal(f).leftMap(fromThrowable)

  implicit class fromEither[A](e: Either[CornichonError, A]) {
    def valueUnsafe: A = e.fold(e => throw e.toException, identity)
    def futureEitherT(implicit ec: ExecutionContext): EitherT[Future, CornichonError, A] = EitherT.fromEither[Future](e)
  }
}

case class StepExecutionError(e: Throwable) extends CornichonError {
  lazy val baseErrorMessage = s"exception thrown ${CornichonError.genStacktrace(e)}"
}

case class BasicError(error: String) extends CornichonError {
  lazy val baseErrorMessage = error
}

case class CornichonException(m: String) extends Exception with NoStackTrace {
  override def getMessage = m
} 
Example 189
Source File: Logging.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.gen

import java.io.PrintWriter

// specific to gen so that the build tools can implement their own
trait Logger {
  def debug(text: String): Unit
  def info(text: String): Unit
  def warn(text: String): Unit
  def error(text: String): Unit
}


class ReflectiveLogger(logger: Object) extends Logger {
  private val debugMethod = logger.getClass.getMethod("debug", classOf[String])
  private val infoMethod = logger.getClass.getMethod("info", classOf[String])
  private val warnMethod = logger.getClass.getMethod("warn", classOf[String])
  private val errorMethod = logger.getClass.getMethod("error", classOf[String])

  def debug(text: String): Unit = debugMethod.invoke(logger, text)
  def info(text: String): Unit = infoMethod.invoke(logger, text)
  def warn(text: String): Unit = warnMethod.invoke(logger, text)
  def error(text: String): Unit = errorMethod.invoke(logger, text)
} 
Example 190
Source File: LogEvent.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.core.logging

import java.io.{PrintWriter, StringWriter}
import java.time.format.DateTimeFormatter
import java.time._

case class LogEvent(
  from: String,
  message: String,
  timeStamp: Long,
  level: Int,
  errTrace: Option[String]) {

  def mkString: String = {
    val date = formatDate
    val error = errTrace.map(s => "\n" + s).getOrElse("")
    s"${typedLevel.name} $date [$from] $message$error"
  }

  private def formatDate: String = {
    val inst = Instant.ofEpochMilli(timeStamp)
    val date = LocalDateTime.ofInstant(inst, ZoneOffset.UTC)
    DateTimeFormatter.ISO_LOCAL_DATE_TIME.format(date)
  }

  def typedLevel: Level = Level.fromInt(level)
}

object LogEvent {

  def mkDebug(from: String, message: String, ts: Long = mkTimestamp): LogEvent =
    nonFatal(Level.Debug, from, message, ts)

  def mkInfo(from: String, message: String, ts: Long = mkTimestamp): LogEvent =
    nonFatal(Level.Info, from, message, ts)

  def mkWarn(from: String, message: String, ts: Long = mkTimestamp): LogEvent =
    nonFatal(Level.Warn, from, message, ts)

  def mkError(from: String, message: String, t: Throwable): LogEvent =
    mkError(from, message, Some(t))

  def mkError(from: String, message: String, optT: Option[Throwable] = None, ts: Long = mkTimestamp): LogEvent = {
    val errTrace = optT.map(ex => {
      val writer = new StringWriter()
      ex.printStackTrace(new PrintWriter(writer))
      writer.toString
    })

    LogEvent(from, message, ts, Level.Error.value, errTrace)
  }

  def nonFatal(level: Level, from: String, message: String, ts: Long): LogEvent =
    LogEvent(from, message, ts, level.value, None)

  private def mkTimestamp: Long =
    LocalDateTime.now(ZoneOffset.UTC).toInstant(ZoneOffset.UTC).toEpochMilli
} 
Example 191
Source File: PersistentDMap.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package k.grid.dmap.impl.persistent

import java.io.{File, FileNotFoundException, PrintWriter}

import com.typesafe.scalalogging.LazyLogging
import k.grid.{Config, Grid}
import k.grid.dmap.api._
import play.api.libs.json.Json

import scala.util.{Failure, Success, Try}
import scala.concurrent.duration._
import json.MapDataJsonProtocol._
import scala.concurrent.ExecutionContext.Implicits.global

object PersistentDMap extends DMapFacade {
  override def masterType: DMapActorInit = DMapActorInit(classOf[PersistentMaster], "PersistentMaster")

  override def slaveType: DMapActorInit = DMapActorInit(classOf[PersistentSlave], "PersistentSlave")
}

class PersistentMaster extends DMapMaster {
  override val facade: DMapFacade = PersistentDMap
  override def onStart: Unit = {}
}

class PersistentSlave extends DMapSlave with LazyLogging {

  Grid.system.scheduler.schedule(5.seconds, 1.second, self, WriteData)

  case class MapHolder(m: Map[String, SettingsValue], timestamp: Long)

  case object NewData extends DMapMessage {
    override def act: Unit = {
      hasNewData = true
    }
  }

  case object WriteData extends DMapMessage {
    override def act: Unit = {
      val m = facade.sm
      if (hasNewData) {
        writeMap(MapData(m, lastTimestamp))
        hasNewData = false
      }

    }
  }

  var hasNewData: Boolean = false

  private val dataFile = new File(s"${Grid.persistentDmapDir}/${Config.clusterName}")

  def readMap: Option[MapData] = {
    val content = Try {
      val src = scala.io.Source.fromFile(dataFile)
      val mData = Json.parse(src.getLines().mkString("\n")).as[MapData]
      src.close()
      mData
    } match {
      case Success(c)                                          => Some(c)
      case Failure(e) if e.isInstanceOf[FileNotFoundException] => None
      case Failure(e) => {
        logger.error(e.getMessage, e)
        None
      }
    }
    content
  }

  def writeMap(md: MapData) = {
    val content = Json.stringify(Json.toJson(md))
    new PrintWriter(dataFile) { write(content); close }
  }

  override val facade: DMapFacade = PersistentDMap
  override def onStart: Unit = {

    if (Grid.isController) {
      import java.io.File
      logger.info(s" *** Will use data dir: ${Grid.persistentDmapDir}")
      Try(new File(Grid.persistentDmapDir).mkdir())

      val mdOpt = readMap

      mdOpt.foreach { md =>
        lastTimestamp = md.timestamp
        facade.sm = md.m
      }
    }
  }

  override protected def onUpdate(oldMap: Map[String, SettingsValue],
                                  newMap: Map[String, SettingsValue],
                                  timestamp: Long): Unit = {
    if (Grid.isController)
      self ! NewData
  }
} 
Example 192
Source File: JsonEncoderSpec.scala    From logback-json-logger   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.play.logging
import java.io.{PrintWriter, StringWriter}
import java.net.InetAddress

import ch.qos.logback.classic.Level
import ch.qos.logback.classic.spi.{ILoggingEvent, ThrowableProxy}
import ch.qos.logback.core.ContextBase
import org.apache.commons.lang3.time.FastDateFormat
import org.mockito.Mockito.when
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.mockito.MockitoSugar
import play.api.libs.json.{JsLookupResult, Json}

import scala.collection.JavaConverters._

class JsonEncoderSpec extends AnyWordSpec with Matchers with MockitoSugar {

  "Json-encoded message" should {
    "contain all required fields" in {

      val jsonEncoder = new JsonEncoder()
      val event       = mock[ILoggingEvent]

      when(event.getTimeStamp).thenReturn(1)
      when(event.getLevel).thenReturn(Level.INFO)
      when(event.getThreadName).thenReturn("my-thread")
      when(event.getFormattedMessage).thenReturn("my-message")
      when(event.getLoggerName).thenReturn("logger-name")
      when(event.getMDCPropertyMap).thenReturn(Map("myMdcProperty" -> "myMdcValue").asJava)

      val testException = new Exception("test-exception")
      val stringWriter  = new StringWriter()
      testException.printStackTrace(new PrintWriter(stringWriter))
      when(event.getThrowableProxy).thenReturn(new ThrowableProxy(testException))

      jsonEncoder.setContext {
        val ctx = new ContextBase()
        ctx.putProperty("myKey", "myValue")
        ctx
      }

      val result       = new String(jsonEncoder.encode(event), "UTF-8")
      val resultAsJson = Json.parse(result)

      (resultAsJson \ "app").asString           shouldBe "my-app-name"
      (resultAsJson \ "hostname").asString      shouldBe InetAddress.getLocalHost.getHostName
      (resultAsJson \ "timestamp").asString     shouldBe FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss.SSSZZ").format(1)
      (resultAsJson \ "message").asString       shouldBe "my-message"
      (resultAsJson \ "exception").asString     should include("test-exception")
      (resultAsJson \ "exception").asString     should include("java.lang.Exception")
      (resultAsJson \ "exception").asString     should include(stringWriter.toString)
      (resultAsJson \ "logger").asString        shouldBe "logger-name"
      (resultAsJson \ "thread").asString        shouldBe "my-thread"
      (resultAsJson \ "level").asString         shouldBe "INFO"
      (resultAsJson \ "mykey").asString         shouldBe "myValue"
      (resultAsJson \ "mymdcproperty").asString shouldBe "myMdcValue"

    }
  }

  implicit class JsLookupResultOps(jsLookupResult: JsLookupResult) {
    def asString: String = jsLookupResult.get.as[String]
  }

} 
Example 193
Source File: PythonBroadcastSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import scala.io.Source

import java.io.{PrintWriter, File}

import org.scalatest.Matchers

import org.apache.spark.{SharedSparkContext, SparkConf, SparkFunSuite}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.util.Utils

// This test suite uses SharedSparkContext because we need a SparkEnv in order to deserialize
// a PythonBroadcast:
class PythonBroadcastSuite extends SparkFunSuite with Matchers with SharedSparkContext {
  test("PythonBroadcast can be serialized with Kryo (SPARK-4882)") {
    val tempDir = Utils.createTempDir()
    val broadcastedString = "Hello, world!"
    def assertBroadcastIsValid(broadcast: PythonBroadcast): Unit = {
      val source = Source.fromFile(broadcast.path)
      val contents = source.mkString
      source.close()
      contents should be (broadcastedString)
    }
    try {
      val broadcastDataFile: File = {
        val file = new File(tempDir, "broadcastData")
        val printWriter = new PrintWriter(file)
        printWriter.write(broadcastedString)
        printWriter.close()
        file
      }
      val broadcast = new PythonBroadcast(broadcastDataFile.getAbsolutePath)
      assertBroadcastIsValid(broadcast)
      val conf = new SparkConf().set("spark.kryo.registrationRequired", "true")
      val deserializedBroadcast =
        Utils.clone[PythonBroadcast](broadcast, new KryoSerializer(conf).newInstance())
      assertBroadcastIsValid(deserializedBroadcast)
    } finally {
      Utils.deleteRecursively(tempDir)
    }
  }
} 
Example 194
Source File: PageViewGenerator.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.streaming.clickstream

import java.net.ServerSocket
import java.io.PrintWriter
import util.Random


// scalastyle:on
object PageViewGenerator {
  val pages = Map("http://foo.com/"        -> .7,
                  "http://foo.com/news"    -> 0.2,
                  "http://foo.com/contact" -> .1)
  val httpStatus = Map(200 -> .95,
                       404 -> .05)
  val userZipCode = Map(94709 -> .5,
                        94117 -> .5)
  val userID = Map((1 to 100).map(_ -> .01) : _*)

  def pickFromDistribution[T](inputMap : Map[T, Double]) : T = {
    val rand = new Random().nextDouble()
    var total = 0.0
    for ((item, prob) <- inputMap) {
      total = total + prob
      if (total > rand) {
        return item
      }
    }
    inputMap.take(1).head._1 // Shouldn't get here if probabilities add up to 1.0
  }

  def getNextClickEvent() : String = {
    val id = pickFromDistribution(userID)
    val page = pickFromDistribution(pages)
    val status = pickFromDistribution(httpStatus)
    val zipCode = pickFromDistribution(userZipCode)
    new PageView(page, status, zipCode, id).toString()
  }

  def main(args : Array[String]) {
    if (args.length != 2) {
      System.err.println("Usage: PageViewGenerator <port> <viewsPerSecond>")
      System.exit(1)
    }
    val port = args(0).toInt
    val viewsPerSecond = args(1).toFloat
    val sleepDelayMs = (1000.0 / viewsPerSecond).toInt
    val listener = new ServerSocket(port)
    println("Listening on port: " + port)

    while (true) {
      val socket = listener.accept()
      new Thread() {
        override def run(): Unit = {
          println("Got client connected from: " + socket.getInetAddress)
          val out = new PrintWriter(socket.getOutputStream(), true)

          while (true) {
            Thread.sleep(sleepDelayMs)
            out.write(getNextClickEvent())
            out.flush()
          }
          socket.close()
        }
      }.start()
    }
  }
}
// scalastyle:on println 
Example 195
Source File: TestStandard.scala    From banditsbook-scala   with MIT License 5 votes vote down vote up
package com.github.everpeace.banditsbook.algorithm.softmax

import java.io.{File, PrintWriter}

import breeze.linalg._
import breeze.stats.MeanAndVariance
import com.github.everpeace.banditsbook.arm._
import com.github.everpeace.banditsbook.testing_framework.TestRunner
import com.github.everpeace.banditsbook.testing_framework.TestRunner._
import com.typesafe.config.ConfigFactory

import scala.collection.immutable.Seq

object TestStandard extends _TestStandard with App {
  run()
}

trait _TestStandard {
  def run() = {
//    implicit val randBasis = RandBasis.mt0

    val conf = ConfigFactory.load()
    val baseKey = "banditsbook.algorithm.softmax.test-standard"
    val (_means, Some(τs), horizon, nSims, outDir) = readConfig(conf, baseKey, Some("τs"))
    val means = shuffle(_means)
    val arms = Seq(means:_*).map(μ => BernoulliArm(μ))

    val outputPath = new File(outDir, "test-standard-softmax-results.csv")
    val file = new PrintWriter(outputPath.toString)
    file.write("tau, sim_num, step, chosen_arm, reward, cumulative_reward\n")
    try {
      println("-------------------------------")
      println("Standard Softmax Algorithm")
      println("-------------------------------")
      println(s"   arms = ${means.map("(μ="+_+")").mkString(", ")} (Best Arm = ${argmax(means)})")
      println(s"horizon = $horizon")
      println(s"  nSims = $nSims")
      println(s"      τ = (${τs.mkString(",")})")
      println("")

      val meanOfFinalRewards = scala.collection.mutable.Map.empty[Double, MeanAndVariance]
      val res = for {
        τ <- τs
      } yield {
        println(s"starts simulation on τ=$τ.")

        val algo = Standard.Algorithm(τ)
        val res = TestRunner.run(algo, arms, nSims, horizon)

        for {
          sim <- 0 until nSims
        } {
          val st = sim * horizon
          val end = ((sim + 1) * horizon) - 1
        }
        val finalRewards = res.cumRewards((horizon-1) until (nSims * horizon, horizon))
        import breeze.stats._
        val meanAndVar = meanAndVariance(finalRewards)
        meanOfFinalRewards += τ -> meanAndVar
        println(s"reward stats: ${TestRunner.toString(meanAndVar)}")

        res.rawResults.valuesIterator.foreach{ v =>
          file.write(s"${Seq(τ, v._1, v._2, v._3, v._4, v._5).mkString(",")}\n")
        }
        println(s"finished simulation on τ=$τ.")
      }
      println("")
      println(s"reward stats summary")
      println(s"${meanOfFinalRewards.iterator.toSeq.sortBy(_._1).map(p => (s"τ=${p._1}", TestRunner.toString(p._2))).mkString("\n")}")
    } finally {
      file.close()
      println("")
      println(s"results are written to ${outputPath}")
    }
  }
} 
Example 196
Source File: TestExp3.scala    From banditsbook-scala   with MIT License 5 votes vote down vote up
package com.github.everpeace.banditsbook.algorithm.exp3

import java.io.{File, PrintWriter}

import breeze.linalg._
import breeze.stats.MeanAndVariance
import com.github.everpeace.banditsbook.arm._
import com.github.everpeace.banditsbook.testing_framework.TestRunner
import com.github.everpeace.banditsbook.testing_framework.TestRunner._
import com.typesafe.config.ConfigFactory

import scala.collection.immutable.Seq

object TestExp3 extends _TestExp3 with App {
  run()
}

trait _TestExp3{
  def run() = {
//    implicit val randBasis = RandBasis.mt0

    val conf = ConfigFactory.load()
    val baseKey = "banditsbook.algorithm.exp3.test-exp3"
    val (_means, Some(γs), horizon, nSims, outDir) = readConfig(conf, baseKey, Some("γs"))
    val means = shuffle(_means)
    val arms = Seq(means:_*).map(μ => BernoulliArm(μ))

    val outputPath = new File(outDir, "test-exp3-results.csv")
    val file = new PrintWriter(outputPath.toString)
    file.write("gamma, sim_num, step, chosen_arm, reward, cumulative_reward\n")
    try {
      println("-------------------------------")
      println("EXP3 Algorithm")
      println("-------------------------------")
      println(s"   arms = ${means.map("(μ="+_+")").mkString(", ")} (Best Arm = ${argmax(means)})")
      println(s"horizon = $horizon")
      println(s"  nSims = $nSims")
      println(s"      γ = (${γs.mkString(",")})")
      println("")

      val meanOfFinalRewards = scala.collection.mutable.Map.empty[Double, MeanAndVariance]
      val res = for {
        γ <- γs
      } yield {
        println(s"starts simulation on γ=$γ.")

        val algo = Exp3.Algorithm(γ)
        val res = TestRunner.run(algo, arms, nSims, horizon)

        for {
          sim <- 0 until nSims
        } {
          val st = sim * horizon
          val end = ((sim + 1) * horizon) - 1
        }
        val finalRewards = res.cumRewards((horizon-1) until (nSims * horizon, horizon))
        import breeze.stats._
        val meanAndVar = meanAndVariance(finalRewards)
        meanOfFinalRewards += γ -> meanAndVar
        println(s"reward stats: ${TestRunner.toString(meanAndVar)}")

        res.rawResults.valuesIterator.foreach{ v =>
          file.write(s"${Seq(γ, v._1, v._2, v._3, v._4, v._5).mkString(",")}\n")
        }
        println(s"finished simulation on γ=$γ.")
      }
      println("")
      println(s"reward stats summary")
      println(s"${meanOfFinalRewards.iterator.toSeq.sortBy(_._1).toSeq.sortBy(_._1).map(p => (s"γ=${p._1}", TestRunner.toString(p._2))).mkString("\n")}")
    } finally {
      file.close()
      println("")
      println(s"results are written to ${outputPath}")
    }
  }
} 
Example 197
Source File: TestUCB1.scala    From banditsbook-scala   with MIT License 5 votes vote down vote up
package com.github.everpeace.banditsbook.algorithm.ucb

import java.io.{File, PrintWriter}

import breeze.linalg._
import com.github.everpeace.banditsbook.arm._
import com.github.everpeace.banditsbook.testing_framework.TestRunner
import com.github.everpeace.banditsbook.testing_framework.TestRunner._
import com.typesafe.config.ConfigFactory

import scala.collection.immutable.Seq

object TestUCB1 extends _TestUCB1 with App{
  run()
}

trait _TestUCB1 {
  def run() = {
//    implicit val randBasis = RandBasis.mt0

    val conf = ConfigFactory.load()
    val baseKey = "banditsbook.algorithm.ucb.test-ucb1"
    val (_means, _, horizon, nSims, outDir) = readConfig(conf, baseKey)
    val means = shuffle(_means)
    val arms = Seq(means:_*).map(μ => BernoulliArm(μ))


    val outputPath = new File(outDir, "test-ucb1-results.csv")
    val file = new PrintWriter(outputPath.toString)
    file.write("sim_num, step, chosen_arm, reward, cumulative_reward\n")
    try {
      println("-------------------------------")
      println("UCB1 Algorithm")
      println("-------------------------------")
      println(s"   arms = ${means.map("(μ="+_+")").mkString(", ")} (Best Arm = ${argmax(means)})")
      println(s"horizon = $horizon")
      println(s"  nSims = $nSims")
      println( "The algorithm has no hyper parameters.")
      println("")

      println(s"starts simulation.")

      val algo = UCB1.Algorithm
      val res = TestRunner.run(algo, arms, nSims, horizon)

      for {sim <- 0 until nSims} {
        val st = sim * horizon
        val end = ((sim + 1) * horizon) - 1
      }
      val finalRewards = res.cumRewards((horizon-1) until (nSims * horizon, horizon))
      import breeze.stats._
      val meanAndVar = meanAndVariance(finalRewards)
      println(s"reward stats: ${TestRunner.toString(meanAndVar)}")

      res.rawResults.valuesIterator.foreach{ v =>
        file.write(s"${Seq(v._1, v._2, v._3, v._4, v._5).mkString(",")}\n")
      }
      println(s"finished simulation.")
    } finally {
      file.close()
      println("")
      println(s"results are written to ${outputPath}")
    }
  }
} 
Example 198
Source File: TestStandard.scala    From banditsbook-scala   with MIT License 5 votes vote down vote up
package com.github.everpeace.banditsbook.algorithm.epsilon_greedy

import java.io.{File, PrintWriter}

import breeze.linalg._
import breeze.stats.MeanAndVariance
import com.github.everpeace.banditsbook.arm._
import com.github.everpeace.banditsbook.testing_framework.TestRunner
import com.github.everpeace.banditsbook.testing_framework.TestRunner._
import com.typesafe.config.ConfigFactory

import scala.collection.immutable.Seq

object TestStandard extends _TestStandard with App {
  run()
}

trait _TestStandard {
  def run() = {
//    implicit val randBasis = RandBasis.mt0

    val conf = ConfigFactory.load()
    val baseKey = "banditsbook.algorithm.epsilon_greedy.test-standard"
    val (_means, Some(εs), horizon, nSims, outDir) = readConfig(conf, baseKey, Some("εs"))
    val means = shuffle(_means)
    val arms = Seq(means:_*).map(μ => BernoulliArm(μ))

    val outputPath = new File(outDir, "test-standard-epsilon-greedy-results.csv")
    val file = new PrintWriter(outputPath.toString)
    file.write("epsilon, sim_num, step, chosen_arm, reward, cumulative_reward\n")
    try {
      println("---------------------------------")
      println("Standard Epsilon Greedy Algorithm")
      println("---------------------------------")
      println(s"   arms = ${means.map("(μ="+_+")").mkString(", ")} (Best Arm = ${argmax(means)})")
      println(s"horizon = $horizon")
      println(s"  nSims = $nSims")
      println(s"      ε = (${εs.mkString(",")})")
      println("")

      val meanOfFinalRewards = scala.collection.mutable.Map.empty[Double, MeanAndVariance]
      val res = for {
        ε <- εs
      } yield {
        println(s"starts simulation on ε=$ε.")

        val algo = Standard.Algorithm(ε)
        val res = TestRunner.run(algo, arms, nSims, horizon)

        for {
          sim <- 0 until nSims
        } {
          val st = sim * horizon
          val end = ((sim + 1) * horizon) - 1
        }
        val finalRewards = res.cumRewards((horizon-1) until (nSims * horizon, horizon))
        import breeze.stats._
        val meanAndVar = meanAndVariance(finalRewards)
        meanOfFinalRewards += ε -> meanAndVar
        println(s"reward stats: ${TestRunner.toString(meanAndVar)}")

        res.rawResults.valuesIterator.foreach{ v =>
          file.write(s"${Seq(ε.toString, v._1.toString, v._2.toString, v._3.toString, v._4.toString, v._5.toString).mkString(",")}\n")
        }
        println(s"finished simulation on ε=$ε.")
      }
      println("")
      println(s"reward stats summary")
      println(s"${meanOfFinalRewards.iterator.toSeq.sortBy(_._1).map(p => (s"ε = ${p._1}", TestRunner.toString(p._2))).mkString("\n")}")
    } finally {
      file.close()
      println("")
      println(s"results are written to ${outputPath}")
    }
  }
}