java.io.FileWriter Scala Examples

The following examples show how to use java.io.FileWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RuntimeConfigGenerationPhase.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas.stage

import midas.{OutputDir}
import midas.widgets.{SerializableBridgeAnnotation}

import freechips.rocketchip.util.{ParsedInputNames}

import firrtl.{Transform, CircuitState, AnnotationSeq}
import firrtl.options.{Phase, TargetDirAnnotation}

import java.io.{File, FileWriter, Writer}
import logger._

class RuntimeConfigGenerationPhase extends Phase with ConfigLookup {

  def transform(annotations: AnnotationSeq): AnnotationSeq = {
    val targetDir = annotations.collectFirst({ case TargetDirAnnotation(targetDir) => new File(targetDir) }).get
    val configPackage = annotations.collectFirst({ case ConfigPackageAnnotation(p) => p }).get
    val configString  = annotations.collectFirst({ case ConfigStringAnnotation(s) => s }).get
    val runtimeConfigName  = annotations.collectFirst({ case RuntimeConfigNameAnnotation(s) => s }).get

    val pNames = ParsedInputNames("UNUSED", "UNUSED", "UNUSED", configPackage, configString)

    implicit val p = getParameters(pNames).alterPartial({
      case OutputDir => targetDir
    })

    val fasedBridgeAnnos = annotations.collect({
      case anno @ SerializableBridgeAnnotation(_,_,className,_)
        if className == classOf[midas.models.FASEDMemoryTimingModel].getName => anno
    })
    // Since presently all memory models share the same runtime configuration. Grab only the first 
    // FASED BridgeAnnotation, and use that to elaborate a memory model
    fasedBridgeAnnos.headOption.map({ anno =>
      // Here we're spoofing elaboration that occurs in FPGATop, which assumes ExtractBridges has been run
      lazy val memModel = anno.toIOAnnotation("").elaborateWidget.asInstanceOf[midas.models.FASEDMemoryTimingModel]
      chisel3.Driver.elaborate(() => memModel)
      memModel.getSettings(runtimeConfigName)
    })
    annotations
  }
} 
Example 2
Source File: DiskBlockManagerSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.mockito.Mockito.{mock, when}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils
//DiskBlockManager管理和维护了逻辑上的Block和存储在Disk上的物理的Block的映射。
//一般来说,一个逻辑的Block会根据它的BlockId生成的名字映射到一个物理上的文件
class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  val blockManager = mock(classOf[BlockManager])
  when(blockManager.conf).thenReturn(testConf)
  //DiskBlockManager创建和维护逻辑块和物理磁盘位置之间的逻辑映射,默认情况下,一个块被映射到一个文件,其名称由其BlockId给出
  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    super.afterAll()
    Utils.deleteRecursively(rootDir0)
    Utils.deleteRecursively(rootDir1)
  }

  override def beforeEach() {
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(blockManager, conf)
  }

  override def afterEach() {
    diskBlockManager.stop()
  }

  test("basic block creation") {//基本块的创建
    val blockId = new TestBlockId("test")
    //DiskBlockManager创建和维护逻辑块和物理磁盘位置之间的逻辑映射,默认情况下,一个块被映射到一个文件,其名称由其BlockId给出
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {//枚举块
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 3
Source File: FileWrite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package scalaDemo

import java.io.{File, FileWriter}
import java.util.Random

import com.google.common.base.Charsets.UTF_8
import com.google.common.io.Files
import org.apache.spark.util.Utils

object FileWrite {
  def main(args: Array[String]) {


    val outFile = File.createTempFile("test-load-spark-properties", "test")
    Files.write("spark.test.fileNameLoadA true\n" +
      "spark.test.fileNameLoadB 1\n", outFile, UTF_8)


    val writer = new FileWriter(new File("D:\\eclipse44_64\\workspace\\spark1.5\\examples\\sample_age_data.txt"), false)
    val rand = new Random()
    for (i <- 1 to 10000) {
      writer.write(i + " " + rand.nextInt(100))
      writer.write(System.getProperty("line.separator"))
    }
    writer.flush()
    writer.close()
  }
} 
Example 4
Source File: PeopleInfoFileGenerator.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.examples.demoIBM

import java.io.File
import java.util.Random
import java.io.FileWriter

object PeopleInfoFileGenerator {
  def main(args: Array[String]) {
    val writer = new FileWriter(new File("D:\\eclipse44_64\\workspace\\spark1.5\\examples\\sample_people_info.txt"), false)
    val rand = new Random()
    for (i <- 1 to 10000) {
      var height = rand.nextInt(220)
      if (height < 50) {
        height = height + 50
      }
      var gender = getRandomGender
      if (height < 100 && gender == "M")
        height = height + 100
      if (height < 100 && gender == "F")
        height = height + 50
      writer.write(i + " " + getRandomGender + " " + height)
      writer.write(System.getProperty("line.separator"))
    }
    writer.flush()
    writer.close()
    println("People Information File generated successfully.")
  }

  def getRandomGender(): String = {
    val rand = new Random()
    val randNum = rand.nextInt(2) + 1
    if (randNum % 2 == 0) {
      "M"
    } else {
      "F"
    }
  }
} 
Example 5
Source File: CliFullCycleTest.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.cli

import java.io.{File, FileWriter}

import com.salesforce.op.OpWorkflowRunType
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner

import scala.language.postfixOps
import scala.sys.process._


@RunWith(classOf[JUnitRunner])
class CliFullCycleTest extends CliTestBase {

  Spec[CliExec] should "do full cycle with avcs present" in {
    val sut = new Sut
    val result = sut.run(
      "gen",
      "--input", TestCsvHeadless,
      "--id", "passengerId",
      "--response", "survived",
      "--schema", TestAvsc,
      "--answers", AnswersFile,
      ProjectName,
      "--overwrite"
    )
    assertResult(result, Succeeded)
    checkScalaFiles(shouldNotContain = "_code")
    checkAvroFile(new File(TestAvsc))
    // runTraining() // TODO: requires proper SPARK_HOME setup on TC

    // TODO: score & evaluate
  }

  it should "do full cycle with autoreader" in {
    val sut = new Sut
    val result = sut.run(
      "gen",
      "--input", TestBigCsvWithHeaders,
      "--id", "passengerId",
      "--response", "survived",
      "--auto", "Pasajeros",
      "--answers", AnswersFile,
      ProjectName,
      "--overwrite"
    )
    assertResult(result, Succeeded)
    checkScalaFiles(shouldNotContain = "_code")
    // TODO: unfortunately, it fails, due to bad data.
    // runTraining() // TODO: requires proper SPARK_HOME setup on TC

    // TODO: score & evaluate
  }

  // TODO: add tests for multiclass & regression models


  private def runBuild() = runCommand(List("./gradlew", "--no-daemon", "installDist"))

  private def runTraining() = {
    val trainMe = appRuntimeArgs(OpWorkflowRunType.Train)
    val cmd = List(
      "./gradlew",
      "--no-daemon",
      s"sparkSubmit",
      s"-Dmain=com.salesforce.app.$ProjectName",
      s"""-Dargs=\"$trainMe\""""
    )
    runCommand(cmd)
  }

  private def runCommand(cmd: List[String]) = {
    val cmdStr = cmd.mkString(" ")
    val cmdSh = new FileWriter(new File(projectDir, "cmd"))
    cmdSh.write(cmdStr)
    cmdSh.close()

    val proc = Process("sh" :: "cmd" :: Nil, new File(projectDir).getAbsoluteFile)
    val logger = ProcessLogger(s => log.info(s), s => log.error(s))
    val code = proc !< logger

    if (code == 0) succeed else fail(s"Command returned a non zero code: $code")
  }
} 
Example 6
Source File: AvroInOutTest.scala    From TransmogrifAI   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.salesforce.op.utils.io.avro

import java.io.{File, FileNotFoundException, FileWriter}
import java.nio.file.Paths

import com.salesforce.op.test.TestSparkContext
import com.salesforce.op.utils.io.avro.AvroInOut._
import org.apache.avro.generic.GenericRecord
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner

@RunWith(classOf[JUnitRunner])
class AvroInOutTest extends FlatSpec with TestSparkContext {
  val avroSchemaPath = s"$testDataDir/PassengerDataAll.avsc"
  val avroFilePath = s"$testDataDir/PassengerDataAll.avro"
  val avroFileRecordCount = 891
  val hdfs: FileSystem = FileSystem.get(sc.hadoopConfiguration)
  lazy val avroTemp: String = tempDir + "/avro-inout-test"

  Spec(AvroInOut.getClass) should "creates RDD from an avro file" in {
    val res = readPathSeq(avroFilePath, withCount = true, deepCopy = true, persist = false)
    res shouldBe a[RDD[_]]
    res.count shouldBe avroFileRecordCount
  }

  it should "creates RDD from a sequence of avro files" in {
    val res = readPathSeq(s"$avroFilePath,$avroFilePath")
    res.count shouldBe avroFileRecordCount*2
  }

  it should "create RDD from a mixed sequence of valid and invalid avro files" in {
    val res = readPathSeq(s"badfile/path1,$avroFilePath,badfile/path2,$avroFilePath,badfile/path3")
    res.count shouldBe avroFileRecordCount*2
  }

  it should "throw an error if passed in avro files are invalid" in {
    val error = intercept[IllegalArgumentException](readPathSeq("badfile/path1,badfile/path2"))
    error.getMessage shouldBe "No valid directory found in path 'badfile/path1,badfile/path2'"
  }

  it should "creates Some(RDD) from an avro file" in {
    val res = read(avroFilePath)
    res.size shouldBe 1
    res.get shouldBe an[RDD[_]]
    res.get.count shouldBe avroFileRecordCount
  }

  it should "create None from an invalid avro file" in {
    val res = read("badfile/path")
    res shouldBe None
  }

  Spec[AvroWriter[_]] should "writeAvro to filesystem" in {
    val avroData = readPathSeq(avroFilePath).asInstanceOf[RDD[GenericRecord]]
    val avroSchema = loadFile(avroSchemaPath)

    val error = intercept[FileNotFoundException](hdfs.listStatus(new Path(avroTemp)))
    error.getMessage shouldBe s"File $avroTemp does not exist"

    AvroWriter(avroData).writeAvro(avroTemp, avroSchema)
    val hdfsFiles = hdfs.listStatus(new Path(avroTemp)) filter (x => x.getPath.getName.contains("part"))
    val res = readPathSeq((for { x <- hdfsFiles } yield avroTemp + "/" + x.getPath.getName).mkString(","))
    res.count shouldBe avroFileRecordCount
  }

  it should "checkPathsExist" in {
    val tmpDir = Paths.get(File.separator, "tmp").toFile
    val f1 = new File(tmpDir, "avroinouttest")
    f1.delete()
    val w = new FileWriter(f1)
    w.write("just checking")
    w.close()
    val f2 = new File(tmpDir, "thisfilecannotexist")
    f2.delete()
    val f3 = new File(tmpDir, "this file cannot exist")
    f3.delete()
    assume(f1.exists && !f2.exists && !f3.exists)

    // check for one dir being invalid in the path amongst two
    selectExistingPaths(s"$f1,$f2") shouldBe f1.toString

    // check if all dirs in the path are invalid then we get an exception
    intercept[IllegalArgumentException] { selectExistingPaths(f2.toString) }

    // also, check if all dirs in the path are invalid ( in a different way ) then we get an exception
    intercept[IllegalArgumentException] { selectExistingPaths(f3.toString) }

    // check for one dir being invalid ( in a different way ) in the path amongst the two dirs in it
    selectExistingPaths(s"$f1,$f3") shouldBe f1.toString

    // check for paths order insensitivity
    selectExistingPaths(s"$f3,$f1") shouldBe f1.toString

    // check for an exception if the path is an empty string
    intercept[IllegalArgumentException] { selectExistingPaths("") }
  }

} 
Example 7
Source File: DiskBlockManagerSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.mockito.Mockito.{mock, when}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  val blockManager = mock(classOf[BlockManager])
  when(blockManager.conf).thenReturn(testConf)
  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    super.afterAll()
    Utils.deleteRecursively(rootDir0)
    Utils.deleteRecursively(rootDir1)
  }

  override def beforeEach() {
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(blockManager, conf)
  }

  override def afterEach() {
    diskBlockManager.stop()
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 8
Source File: LogPageSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker.ui

import java.io.{File, FileWriter}

import org.mockito.Mockito.{mock, when}
import org.scalatest.PrivateMethodTester

import org.apache.spark.SparkFunSuite

class LogPageSuite extends SparkFunSuite with PrivateMethodTester {

  test("get logs simple") {
    val webui = mock(classOf[WorkerWebUI])
    val tmpDir = new File(sys.props("java.io.tmpdir"))
    val workDir = new File(tmpDir, "work-dir")
    workDir.mkdir()
    when(webui.workDir).thenReturn(workDir)
    val logPage = new LogPage(webui)

    // Prepare some fake log files to read later
    val out = "some stdout here"
    val err = "some stderr here"
    val tmpOut = new File(workDir, "stdout")
    val tmpErr = new File(workDir, "stderr")
    val tmpErrBad = new File(tmpDir, "stderr") // outside the working directory
    val tmpOutBad = new File(tmpDir, "stdout")
    val tmpRand = new File(workDir, "random")
    write(tmpOut, out)
    write(tmpErr, err)
    write(tmpOutBad, out)
    write(tmpErrBad, err)
    write(tmpRand, "1 6 4 5 2 7 8")

    // Get the logs. All log types other than "stderr" or "stdout" will be rejected
    val getLog = PrivateMethod[(String, Long, Long, Long)]('getLog)
    val (stdout, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stdout", None, 100)
    val (stderr, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stderr", None, 100)
    val (error1, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "random", None, 100)
    val (error2, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "does-not-exist.txt", None, 100)
    // These files exist, but live outside the working directory
    val (error3, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stderr", None, 100)
    val (error4, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stdout", None, 100)
    assert(stdout === out)
    assert(stderr === err)
    assert(error1.startsWith("Error: Log type must be one of "))
    assert(error2.startsWith("Error: Log type must be one of "))
    assert(error3.startsWith("Error: invalid log directory"))
    assert(error4.startsWith("Error: invalid log directory"))
  }

  
  private def write(f: File, s: String): Unit = {
    val writer = new FileWriter(f)
    try {
      writer.write(s)
    } finally {
      writer.close()
    }
  }

} 
Example 9
Source File: HashShuffleManagerSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.shuffle.hash

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkEnv, SparkFunSuite}
import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.shuffle.FileShuffleBlockResolver
import org.apache.spark.storage.{ShuffleBlockId, FileSegment}

class HashShuffleManagerSuite extends SparkFunSuite with LocalSparkContext {
  private val testConf = new SparkConf(false)

  private def checkSegments(expected: FileSegment, buffer: ManagedBuffer) {
    assert(buffer.isInstanceOf[FileSegmentManagedBuffer])
    val segment = buffer.asInstanceOf[FileSegmentManagedBuffer]
    assert(expected.file.getCanonicalPath === segment.getFile.getCanonicalPath)
    assert(expected.offset === segment.getOffset)
    assert(expected.length === segment.getLength)
  }

  test("consolidated shuffle can write to shuffle group without messing existing offsets/lengths") {

    val conf = new SparkConf(false)
    // reset after EACH object write. This is to ensure that there are bytes appended after
    // an object is written. So if the codepaths assume writeObject is end of data, this should
    // flush those bugs out. This was common bug in ExternalAppendOnlyMap, etc.
    conf.set("spark.serializer.objectStreamReset", "1")
    conf.set("spark.serializer", "org.apache.spark.serializer.JavaSerializer")
    conf.set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    sc = new SparkContext("local", "test", conf)

    val shuffleBlockResolver =
      SparkEnv.get.shuffleManager.shuffleBlockResolver.asInstanceOf[FileShuffleBlockResolver]

    val shuffle1 = shuffleBlockResolver.forMapTask(1, 1, 1, new JavaSerializer(conf),
      new ShuffleWriteMetrics)
    for (writer <- shuffle1.writers) {
      writer.write("test1", "value")
      writer.write("test2", "value")
    }
    for (writer <- shuffle1.writers) {
      writer.commitAndClose()
    }

    val shuffle1Segment = shuffle1.writers(0).fileSegment()
    shuffle1.releaseWriters(success = true)

    val shuffle2 = shuffleBlockResolver.forMapTask(1, 2, 1, new JavaSerializer(conf),
      new ShuffleWriteMetrics)

    for (writer <- shuffle2.writers) {
      writer.write("test3", "value")
      writer.write("test4", "vlue")
    }
    for (writer <- shuffle2.writers) {
      writer.commitAndClose()
    }
    val shuffle2Segment = shuffle2.writers(0).fileSegment()
    shuffle2.releaseWriters(success = true)

    // Now comes the test :
    // Write to shuffle 3; and close it, but before registering it, check if the file lengths for
    // previous task (forof shuffle1) is the same as 'segments'. Earlier, we were inferring length
    // of block based on remaining data in file : which could mess things up when there is
    // concurrent read and writes happening to the same shuffle group.

    val shuffle3 = shuffleBlockResolver.forMapTask(1, 3, 1, new JavaSerializer(testConf),
      new ShuffleWriteMetrics)
    for (writer <- shuffle3.writers) {
      writer.write("test3", "value")
      writer.write("test4", "value")
    }
    for (writer <- shuffle3.writers) {
      writer.commitAndClose()
    }
    // check before we register.
    checkSegments(shuffle2Segment, shuffleBlockResolver.getBlockData(ShuffleBlockId(1, 2, 0)))
    shuffle3.releaseWriters(success = true)
    checkSegments(shuffle2Segment, shuffleBlockResolver.getBlockData(ShuffleBlockId(1, 2, 0)))
    shuffleBlockResolver.removeShuffle(1)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 10
Source File: EmailReportHelper.scala    From sparklens   with Apache License 2.0 5 votes vote down vote up
package com.qubole.sparklens.helper

import java.io.FileWriter
import java.nio.file.{Files, Paths}

import org.apache.spark.SparkConf

object EmailReportHelper {

  def getTempFileLocation(): String = {
    val random = new scala.util.Random(31)
    s"/tmp/${random.nextInt.toString}.json"
  }

  def isValid(email: String): Boolean =
    """(\w+)((\.)(\w+))?@([\w\.]+)""".r.unapplySeq(email).isDefined

  def generateReport(appContextString: String, conf: SparkConf): Unit = {
    Option(conf.get("spark.sparklens.report.email", null)) match {
      case Some(email) =>
        if (!isValid(email)) {
          println(s"Email $email is not valid. Please provide a valid email.")
          return
        }
        val tempFileLocation = getTempFileLocation()
        try {
          val fileWriter = new FileWriter(tempFileLocation)

          fileWriter.write(appContextString)
          fileWriter.close()
          val response = HttpRequestHandler.requestReport(tempFileLocation, email)
          println(response.getEntity)
        } catch {
          case e: Exception =>
            println(s"Error while trying to generate email report: ${e.getMessage} \n " +
              s"Try to use sparklens.qubole.com to generate the report manually" )
        } finally {
          Files.deleteIfExists(Paths.get(tempFileLocation))
        }
      case _ =>
    }
  }
} 
Example 11
Source File: DiskBlockManagerSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(rootDir0)
      Utils.deleteRecursively(rootDir1)
    } finally {
      super.afterAll()
    }
  }

  override def beforeEach() {
    super.beforeEach()
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(conf, deleteFilesOnStop = true)
  }

  override def afterEach() {
    try {
      diskBlockManager.stop()
    } finally {
      super.afterEach()
    }
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 12
Source File: LogPageSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker.ui

import java.io.{File, FileWriter}

import org.mockito.Mockito.{mock, when}
import org.scalatest.PrivateMethodTester

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.worker.Worker

class LogPageSuite extends SparkFunSuite with PrivateMethodTester {

  test("get logs simple") {
    val webui = mock(classOf[WorkerWebUI])
    val worker = mock(classOf[Worker])
    val tmpDir = new File(sys.props("java.io.tmpdir"))
    val workDir = new File(tmpDir, "work-dir")
    workDir.mkdir()
    when(webui.workDir).thenReturn(workDir)
    when(webui.worker).thenReturn(worker)
    when(worker.conf).thenReturn(new SparkConf())
    val logPage = new LogPage(webui)

    // Prepare some fake log files to read later
    val out = "some stdout here"
    val err = "some stderr here"
    val tmpOut = new File(workDir, "stdout")
    val tmpErr = new File(workDir, "stderr")
    val tmpErrBad = new File(tmpDir, "stderr") // outside the working directory
    val tmpOutBad = new File(tmpDir, "stdout")
    val tmpRand = new File(workDir, "random")
    write(tmpOut, out)
    write(tmpErr, err)
    write(tmpOutBad, out)
    write(tmpErrBad, err)
    write(tmpRand, "1 6 4 5 2 7 8")

    // Get the logs. All log types other than "stderr" or "stdout" will be rejected
    val getLog = PrivateMethod[(String, Long, Long, Long)]('getLog)
    val (stdout, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stdout", None, 100)
    val (stderr, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stderr", None, 100)
    val (error1, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "random", None, 100)
    val (error2, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "does-not-exist.txt", None, 100)
    // These files exist, but live outside the working directory
    val (error3, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stderr", None, 100)
    val (error4, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stdout", None, 100)
    assert(stdout === out)
    assert(stderr === err)
    assert(error1.startsWith("Error: Log type must be one of "))
    assert(error2.startsWith("Error: Log type must be one of "))
    assert(error3.startsWith("Error: invalid log directory"))
    assert(error4.startsWith("Error: invalid log directory"))
  }

  
  private def write(f: File, s: String): Unit = {
    val writer = new FileWriter(f)
    try {
      writer.write(s)
    } finally {
      writer.close()
    }
  }

} 
Example 13
Source File: LogPageSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker.ui

import java.io.{File, FileWriter}

import org.mockito.Mockito.{mock, when}
import org.scalatest.PrivateMethodTester

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.worker.Worker

class LogPageSuite extends SparkFunSuite with PrivateMethodTester {

  test("get logs simple") {
    val webui = mock(classOf[WorkerWebUI])
    val worker = mock(classOf[Worker])
    val tmpDir = new File(sys.props("java.io.tmpdir"))
    val workDir = new File(tmpDir, "work-dir")
    workDir.mkdir()
    when(webui.workDir).thenReturn(workDir)
    when(webui.worker).thenReturn(worker)
    when(worker.conf).thenReturn(new SparkConf())
    val logPage = new LogPage(webui)

    // Prepare some fake log files to read later
    val out = "some stdout here"
    val err = "some stderr here"
    val tmpOut = new File(workDir, "stdout")
    val tmpErr = new File(workDir, "stderr")
    val tmpErrBad = new File(tmpDir, "stderr") // outside the working directory
    val tmpOutBad = new File(tmpDir, "stdout")
    val tmpRand = new File(workDir, "random")
    write(tmpOut, out)
    write(tmpErr, err)
    write(tmpOutBad, out)
    write(tmpErrBad, err)
    write(tmpRand, "1 6 4 5 2 7 8")

    // Get the logs. All log types other than "stderr" or "stdout" will be rejected
    val getLog = PrivateMethod[(String, Long, Long, Long)]('getLog)
    val (stdout, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stdout", None, 100)
    val (stderr, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stderr", None, 100)
    val (error1, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "random", None, 100)
    val (error2, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "does-not-exist.txt", None, 100)
    // These files exist, but live outside the working directory
    val (error3, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stderr", None, 100)
    val (error4, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stdout", None, 100)
    assert(stdout === out)
    assert(stderr === err)
    assert(error1.startsWith("Error: Log type must be one of "))
    assert(error2.startsWith("Error: Log type must be one of "))
    assert(error3.startsWith("Error: invalid log directory"))
    assert(error4.startsWith("Error: invalid log directory"))
  }

  
  private def write(f: File, s: String): Unit = {
    val writer = new FileWriter(f)
    try {
      writer.write(s)
    } finally {
      writer.close()
    }
  }

} 
Example 14
Source File: GoldenGateCompilerPhase.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas.stage

import midas._

import firrtl.ir.Circuit
import firrtl.{Transform, CircuitState, AnnotationSeq}
import firrtl.annotations.{Annotation}
import firrtl.options.{Phase, TargetDirAnnotation}
import firrtl.stage.{FirrtlCircuitAnnotation}
import firrtl.CompilerUtils.getLoweringTransforms
import firrtl.passes.memlib._

import freechips.rocketchip.config.{Parameters, Config, Field}
import freechips.rocketchip.util.{ParsedInputNames}
import java.io.{File, FileWriter, Writer}
import logger._

class GoldenGateCompilerPhase extends Phase with ConfigLookup {

  def transform(annotations: AnnotationSeq): AnnotationSeq = {
    val allCircuits = annotations.collect({ case FirrtlCircuitAnnotation(circuit) => circuit })
    require(allCircuits.size == 1, "Golden Gate can only process a single Firrtl Circuit at a time.")
    val circuit = allCircuits.head

    val targetDir = annotations.collectFirst({ case TargetDirAnnotation(targetDir) => new File(targetDir) }).get
    val configPackage = annotations.collectFirst({ case ConfigPackageAnnotation(p) => p }).get
    val configString  = annotations.collectFirst({ case ConfigStringAnnotation(s) => s }).get
    val pNames = ParsedInputNames("UNUSED", "UNUSED", "UNUSED", configPackage, configString)

    // MIDAS Legacy requirement -- GGRELEASE: Remove
    val io: Seq[(String, chisel3.Data)] = Seq.empty

    val midasAnnos = Seq(InferReadWriteAnnotation)

    implicit val p = getParameters(pNames).alterPartial({
      case OutputDir => targetDir
    })
    // Ran prior to Golden Gate tranforms (target-time)
    val targetTransforms = p(TargetTransforms).flatMap(transformCtor => transformCtor(p))
    // Ran after Golden Gate transformations (host-time)
    val hostTransforms = p(HostTransforms).flatMap(transformCtor => transformCtor(p))
    val midasTransforms = new passes.MidasTransforms(io)
    val compiler = new MidasCompiler
    val midas = compiler.compile(firrtl.CircuitState(
      circuit, firrtl.HighForm, annotations ++ midasAnnos),
      targetTransforms :+ midasTransforms)

    val postHostTransforms = new HostTransformCompiler().compile(midas, hostTransforms)
    val result = new LastStageVerilogCompiler().compileAndEmit(postHostTransforms, Seq())
    result.annotations
  }

} 
Example 15
Source File: Compiler.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas

import passes.Utils.writeEmittedCircuit

import chisel3.{Data, Bundle, Record, Clock, Bool}
import chisel3.internal.firrtl.Port
import firrtl.ir.Circuit
import firrtl.{Transform, CircuitState}
import firrtl.annotations.Annotation
import firrtl.CompilerUtils.getLoweringTransforms
import firrtl.passes.memlib._
import freechips.rocketchip.config.{Parameters, Field}
import java.io.{File, FileWriter, Writer}
import logger._

// Directory into which output files are dumped. Set by dir argument
case object OutputDir extends Field[File]

// Compiler for Midas Transforms
private class MidasCompiler extends firrtl.Compiler {
  def emitter = new firrtl.LowFirrtlEmitter
  def transforms =
    getLoweringTransforms(firrtl.ChirrtlForm, firrtl.MidForm) ++
    Seq(new InferReadWrite) ++
    getLoweringTransforms(firrtl.MidForm, firrtl.LowForm)
}

// These next two compilers split LFO from the rest of the lowering
// compilers to schedule around the presence of internal & non-standard WIR
// nodes (Dshlw) present after LFO, which custom transforms can't handle
private class HostTransformCompiler extends firrtl.Compiler {
  def emitter = new firrtl.LowFirrtlEmitter
  def transforms =
    Seq(new firrtl.IRToWorkingIR,
        new firrtl.ResolveAndCheck,
        new firrtl.HighFirrtlToMiddleFirrtl) ++
    getLoweringTransforms(firrtl.MidForm, firrtl.LowForm)
}

// Custom transforms have been scheduled -> do the final lowering
private class LastStageVerilogCompiler extends firrtl.Compiler {
  def emitter = new firrtl.VerilogEmitter
  def transforms = Seq(new firrtl.LowFirrtlOptimization,
                       new firrtl.transforms.RemoveReset)
}

object MidasCompiler {
  def apply(
      chirrtl: Circuit,
      targetAnnos: Seq[Annotation],
      io: Seq[(String, Data)],
      dir: File,
      targetTransforms: Seq[Transform], // Run pre-MIDAS transforms, on the target RTL
      hostTransforms: Seq[Transform]    // Run post-MIDAS transformations
    )
     (implicit p: Parameters): CircuitState = {
    val midasAnnos = Seq(
      firrtl.TargetDirAnnotation(dir.getPath()),
      InferReadWriteAnnotation)
    val midasTransforms = new passes.MidasTransforms(io)(p alterPartial { case OutputDir => dir })
    val compiler = new MidasCompiler
    val midas = compiler.compile(firrtl.CircuitState(
      chirrtl, firrtl.ChirrtlForm, targetAnnos ++ midasAnnos),
      targetTransforms :+ midasTransforms)

    val postHostTransforms = new HostTransformCompiler().compile(midas, hostTransforms)
    val result = new LastStageVerilogCompiler().compileAndEmit(postHostTransforms)

    writeEmittedCircuit(result, new File(dir, s"FPGATop.v"))
    result
  }

  // Unlike above, elaborates the target locally, before constructing the target IO Record.
  def apply[T <: chisel3.core.UserModule](
      w: => T,
      dir: File,
      targetTransforms: Seq[Transform] = Seq.empty,
      hostTransforms: Seq[Transform] = Seq.empty
    )
     (implicit p: Parameters): CircuitState = {
    dir.mkdirs
    lazy val target = w
    val circuit = chisel3.Driver.elaborate(() => target)
    val chirrtl = firrtl.Parser.parse(chisel3.Driver.emit(circuit))
    val io = target.getPorts map (p => p.id.instanceName -> p.id)
    apply(chirrtl, circuit.annotations.map(_.toFirrtl), io, dir, targetTransforms, hostTransforms)
  }
} 
Example 16
Source File: MidasTransforms.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas
package passes

import midas.core._
import chisel3.core.DataMirror.directionOf

import firrtl._
import firrtl.annotations._
import firrtl.ir._
import logger._
import firrtl.Mappers._
import firrtl.transforms.{DedupModules, DeadCodeElimination}
import Utils._
import java.io.{File, FileWriter}

private[passes] class WCircuit(
  info: Info,
  modules: Seq[DefModule],
  main: String,
  val sim: SimWrapperChannels) extends Circuit(info, modules, main)

private[midas] class MidasTransforms(
    io: Seq[(String, chisel3.Data)])
    (implicit p: freechips.rocketchip.config.Parameters) extends Transform {
  def inputForm = LowForm
  def outputForm = LowForm
  val dir = p(OutputDir)

  // Optionally run if the GenerateMultiCycleRamModels parameter is set
  val optionalTargetTransforms = if (p(GenerateMultiCycleRamModels)) Seq(
    new fame.LabelSRAMModels,
    new ResolveAndCheck,
    new EmitFirrtl("post-wrap-sram-models.fir"))
  else Seq()

  //Logger.setLevel(LogLevel.Debug)
  def execute(state: CircuitState) = {
    val xforms = Seq(
      firrtl.passes.RemoveValidIf,
      new firrtl.transforms.ConstantPropagation,
      firrtl.passes.SplitExpressions,
      firrtl.passes.CommonSubexpressionElimination,
      new firrtl.transforms.DeadCodeElimination,
      new EnsureNoTargetIO,
      // NB: Carelessly removing this pass will break the FireSim manager as we always
      // need to generate the *.asserts file. Fix by baking into driver.
      new AssertPass(dir),
      new PrintSynthesis(dir),
      new ResolveAndCheck,
      new HighFirrtlToMiddleFirrtl,
      new MiddleFirrtlToLowFirrtl,
      new BridgeExtraction,
      new ResolveAndCheck,
      new MiddleFirrtlToLowFirrtl,
      new fame.WrapTop,
      new ResolveAndCheck,
      new EmitFirrtl("post-wrap-top.fir")) ++
    optionalTargetTransforms ++
    Seq(
      new fame.ExtractModel,
      new ResolveAndCheck,
      new EmitFirrtl("post-extract-model.fir"),
      new HighFirrtlToMiddleFirrtl,
      new MiddleFirrtlToLowFirrtl,
      new fame.FAMEDefaults,
      new fame.ChannelExcision,
      new fame.InferModelPorts,
      new EmitFirrtl("post-channel-excision.fir"),
      new fame.FAMETransform,
      new EmitFirrtl("post-fame-transform.fir"),
      new ResolveAndCheck,
      new fame.EmitAndWrapRAMModels,
      new EmitFirrtl("post-gen-sram-models.fir"),
      new ResolveAndCheck) ++
    Seq(
      new SimulationMapping(io),
      new PlatformMapping(state.circuit.main, dir))
      (xforms foldLeft state)((in, xform) =>
      xform runTransform in).copy(form=outputForm)
  }
}


trait AddedTargetIoAnnotation[T <: chisel3.Data] extends Annotation {
  def generateChiselIO(): Tuple2[String, T]
} 
Example 17
Source File: PlatformMapping.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas
package passes

import firrtl._
import firrtl.annotations.{CircuitName}
import firrtl.ir._
import firrtl.Mappers._
import Utils._
import java.io.{File, FileWriter, StringWriter}

private[passes] class PlatformMapping(
    target: String,
    dir: File)
  (implicit param: freechips.rocketchip.config.Parameters) extends firrtl.Transform {

  def inputForm = LowForm
  def outputForm = HighForm
  override def name = "[MIDAS] Platform Mapping"

  private def dumpHeader(c: platform.PlatformShim) {
    def vMacro(arg: (String, Long)): String = s"`define ${arg._1} ${arg._2}\n"

    val csb = new StringBuilder
    csb append "#ifndef __%s_H\n".format(target.toUpperCase)
    csb append "#define __%s_H\n".format(target.toUpperCase)
    c.genHeader(csb, target)
    csb append "#endif  // __%s_H\n".format(target.toUpperCase)

    val vsb = new StringBuilder
    vsb append "`ifndef __%s_H\n".format(target.toUpperCase)
    vsb append "`define __%s_H\n".format(target.toUpperCase)
    c.headerConsts map vMacro addString vsb
    vsb append "`endif  // __%s_H\n".format(target.toUpperCase)

    val ch = new FileWriter(new File(dir, s"${target}-const.h"))
    val vh = new FileWriter(new File(dir, s"${target}-const.vh"))

    try {
      ch write csb.result
      vh write vsb.result
    } finally {
      ch.close
      vh.close
      csb.clear
      vsb.clear
    }
  }

  def initStmt(sim: String)(s: Statement): Statement =
    s match {
      case s: WDefInstance if s.name == "sim" && s.module == "SimBox" =>
        s.copy(module = sim) // replace TargetBox with the actual sim module
      case s => s map initStmt(sim)
    }

  def init(info: Info, sim: String)(m: DefModule) = m match {
    case m: Module if m.name == "FPGATop" =>
      val body = initStmt(sim)(m.body)
      Some(m.copy(info = info, body = body))
    case m: Module => Some(m)
    case m: ExtModule => None
  }

  def linkCircuits(parent: Circuit, child: Circuit): Circuit = {
    parent.copy(modules = child.modules ++ (parent.modules flatMap init(child.info, child.main)))
  }

  def execute(c: CircuitState) = {
    val sim = c.circuit match { case w: WCircuit => w.sim }
    lazy val shim = param(Platform) match {
      case Zynq     => new platform.ZynqShim(sim)
      case F1       => new platform.F1Shim(sim)
    }
    val shimCircuit = chisel3.Driver.elaborate(() => shim)
    val chirrtl = Parser.parse(chisel3.Driver.emit(shimCircuit))
    val shimAnnos = shimCircuit.annotations.map(_.toFirrtl)
    val transforms = Seq(new Fame1Instances,
                         new PreLinkRenaming(Namespace(c.circuit)))
    val shimCircuitState = new LowFirrtlCompiler().compile(CircuitState(chirrtl, ChirrtlForm, shimAnnos), transforms)

    // Rename the annotations from the inner module, which are using an obselete CircuitName
    val renameMap = RenameMap(
      Map(CircuitName(c.circuit.main) -> Seq(CircuitName(shimCircuitState.circuit.main))))

    dumpHeader(shim)
    c.copy(circuit = linkCircuits(shimCircuitState.circuit, c.circuit),
           annotations = shimCircuitState.annotations ++ c.annotations,
           renames = Some(renameMap))
  }
} 
Example 18
Source File: EnsureNoTargetIO.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package midas.passes

import midas.widgets.BridgeAnnotation
import midas.passes.fame.{PromoteSubmodule, PromoteSubmoduleAnnotation, FAMEChannelConnectionAnnotation}

import firrtl._
import firrtl.annotations._
import firrtl.ir._
import firrtl.Mappers._
import firrtl.transforms.TopWiring.{TopWiringAnnotation, TopWiringTransform, TopWiringOutputFilesAnnotation}
import firrtl.passes.wiring.{Wiring, WiringInfo}
import Utils._

import scala.collection.mutable
import java.io.{File, FileWriter, StringWriter}

// Ensures that there are no dangling IO on the target. All I/O coming off the DUT must be bound
// to an Bridge BlackBox
private[passes] class EnsureNoTargetIO extends firrtl.Transform {
  def inputForm = HighForm
  def outputForm = HighForm
  override def name = "[MIDAS] Ensure No Target IO"

  def execute(state: CircuitState): CircuitState = {
    val topName = state.circuit.main
    val topModule = state.circuit.modules.find(_.name == topName).get

    val nonClockPorts = topModule.ports.filter(_.tpe !=  ClockType)

    if (!nonClockPorts.isEmpty) {
      val exceptionMessage = """
Your target design has dangling IO.
You must bind the following top-level ports to an Bridge BlackBox:
""" + nonClockPorts.map(_.name).mkString("\n")
      throw new Exception(exceptionMessage)
    }
    state
  }
} 
Example 19
Source File: DumpChains.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package strober
package passes

import firrtl._
import firrtl.ir._
import firrtl.Utils.create_exps
import firrtl.passes.LowerTypes.loweredName
import firrtl.passes.VerilogRename.verilogRenameN
import strober.core.ChainType
import mdf.macrolib.SRAMMacro
import java.io.{File, FileWriter}

class DumpChains(
    dir: File,
    meta: StroberMetaData,
    srams: Map[String, SRAMMacro])
   (implicit param: freechips.rocketchip.config.Parameters) extends firrtl.passes.Pass {
  
  override def name = "[strober] Dump Daisy Chains"

  private def addPad(chainFile: FileWriter, cw: Int, dw: Int)(chainType: ChainType.Value) {
    (cw - dw) match {
      case 0 =>
      case pad => chainFile write s"${chainType.id} null ${pad} -1\n"
    }
  }

  private def loop(chainFile: FileWriter,
                   mod: String,
                   path: String)
                  (chainType: ChainType.Value)
                  (implicit daisyWidth: Int) {
    meta.chains(chainType) get mod match {
      case Some(chain) if !chain.isEmpty =>
        val id = chainType.id
        val (cw, dw) = (chain foldLeft (0, 0)){case ((chainWidth, dataWidth), s) =>
          val dw = dataWidth + (s match {
            case s: WDefInstance =>
              val sram = srams(s.module)
              (chainType: @unchecked) match {
                case ChainType.SRAM =>
                  chainFile write s"$id ${path}.${s.name}.ram ${sram.width} ${sram.depth}\n"
                  sram.width
                case ChainType.Trace =>
                  val ports = sram.ports filter (_.output.nonEmpty)
                  (ports foldLeft 0){ (sum, p) =>
                    chainFile write s"$id ${path}.${s.name}.${p.output.get.name} ${p.width} -1\n"
                    sum + p.width.get
                  }
              }
            case s: DefMemory if s.readLatency > 0 =>
              val width = bitWidth(s.dataType)
              (chainType: @unchecked) match {
                case ChainType.SRAM =>
                  chainFile write s"$id ${path}.${s.name} ${width} ${s.depth}\n"
                  width.toInt
                case ChainType.Trace =>
                  s.readers foreach (r =>
                    chainFile write s"$id ${path}.${s.name}_${r}_data ${width} -1\n")
                  s.readwriters foreach (rw =>
                    chainFile write s"$id ${path}.${s.name}_${rw}_rdata ${width} -1\n")
                  (s.readers.size + s.readwriters.size) * width.toInt
              }
            case s: DefMemory =>
              val name = verilogRenameN(s.name)
              val width = bitWidth(s.dataType).toInt
              chainType match {
                case ChainType.RegFile =>
                  chainFile write s"$id $path.$name $width ${s.depth}\n"
                  width
                case ChainType.Regs => (((0 until s.depth.toInt) foldLeft 0){ (sum, i) =>
                  chainFile write s"$id $path.$name[$i] $width -1\n"
                  sum + width
                })
              }
            case s: DefRegister =>
              val name = verilogRenameN(s.name)
              val width = bitWidth(s.tpe).toInt
              chainFile write s"$id $path.$name $width -1\n"
              width
          })
          val cw = (Stream from 0 map (chainWidth + _ * daisyWidth) dropWhile (_ < dw)).head
          (cw, dw)
        }
        addPad(chainFile, cw, dw)(chainType)
      case _ =>
    }
    meta.childInsts(mod) foreach (child => loop(
      chainFile, meta.instModMap(child, mod), s"${path}.${child}")(chainType))
  }

  def run(c: Circuit) = {
    implicit val daisyWidth = param(core.DaisyWidth)
    val chainFile = new FileWriter(new File(dir, s"${c.main}.chain"))
    ChainType.values.toList foreach loop(chainFile, c.main, c.main)
    chainFile.close
    c
  }
} 
Example 20
Source File: Compiler.scala    From midas   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package strober
package replay

import firrtl._
import firrtl.ir.Circuit
import firrtl.passes.memlib._
import firrtl.CompilerUtils.getLoweringTransforms
import barstools.macros._
import java.io.{File, FileWriter}

private class Compiler(conf: File, json: File, lib: File, macros: File, paths: File) extends firrtl.Compiler {
  def transforms =
    getLoweringTransforms(ChirrtlForm, MidForm) ++
    Seq(new InferReadWrite, new ReplSeqMem) ++
    getLoweringTransforms(MidForm, LowForm) ++
    Seq(new LowFirrtlOptimization)
  def emitter = new StroberVerilogEmitter(lib, macros, paths)
}

object Compiler {
  def apply(chirrtl: Circuit, io: Seq[chisel3.Data], dir: File, lib: Option[File]): Circuit = {
    dir.mkdirs
    val confFile = new File(dir, s"${chirrtl.main}.conf")
    val jsonFile = new File(dir, s"${chirrtl.main}.macros.json")
    val macroFile = new File(dir, s"${chirrtl.main}.macros.v")
    val pathFile = new File(dir, s"${chirrtl.main}.macros.path")
    val annotations = Seq(
      InferReadWriteAnnotation,
      ReplSeqMemAnnotation(chirrtl.main, confFile.getPath))
    val verilog = new FileWriter(new File(dir, s"${chirrtl.main}.v"))
    val result = new Compiler(confFile, jsonFile, lib getOrElse jsonFile, macroFile, pathFile) compile (
      CircuitState(chirrtl, ChirrtlForm, annotations), verilog)
    genVerilogFragment(chirrtl.main, io, new FileWriter(new File(dir, s"${chirrtl.main}.vfrag")))
    verilog.close
    result.circuit
  }

  def apply[T <: chisel3.core.UserModule](
      w: => T, dir: File, lib: Option[File] = None): Circuit = {
    lazy val dut = w
    val chirrtl = Parser.parse(chisel3.Driver.emit(() => dut))
    val io = dut.getPorts map (_.id)
    apply(chirrtl, io, dir, lib)
  }
} 
Example 21
Source File: TsvRetrieverFromFile.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream

import java.io.{BufferedWriter, File, FileWriter}

import akka.actor.ActorSystem
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.Supervision.Decider
import akka.stream.contrib.SourceGen
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import akka.util.ByteString
import cmwell.dc.LazyLogging
import cmwell.dc.stream.MessagesTypesAndExceptions.{DcInfo, InfotonData}
import cmwell.dc.stream.TsvRetriever.{logger, TsvFlowOutput}
import cmwell.util.resource._

import scala.concurrent.Future
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global


object TsvRetrieverFromFile extends LazyLogging {

  def apply(dcInfo: DcInfo)(implicit mat: Materializer,
                            system: ActorSystem): Source[InfotonData, (KillSwitch, Future[Seq[Option[String]]])] = {
    val persistFile = dcInfo.tsvFile.get + ".persist"

    def appendToPersistFile(str: String): Unit = {
      val bw = new BufferedWriter(new FileWriter(persistFile, true))
      bw.write(str)
      bw.close()
    }

    val linesToDrop = dcInfo.positionKey.fold {
      if (!new File(persistFile).exists) 0L
      else using(scala.io.Source.fromFile(persistFile))(_.getLines.toList.last.toLong)
    }(pos => pos.toLong)
    val positionKeySink = Flow[InfotonData]
      .recover {
        case e: Throwable => InfotonData(null, null, -1)
      }
      .scan(linesToDrop) {
        case (count, InfotonData(null, null, -1)) => {
          appendToPersistFile("crash at: " + count + "\n" + count.toString + "\n")
          count
        }
        case (count, _) => {
          val newCount = count + 1
          if (newCount % 10000 == 0) appendToPersistFile(newCount.toString + "\n")
          newCount
        }
      }
      .toMat(Sink.last)(
        (_, right) =>
          right.map { count =>
            appendToPersistFile(count.toString + "\n")
            Seq.fill(2)(Option(count.toString))
        }
      )

    Source
      .fromIterator(() => scala.io.Source.fromFile(dcInfo.tsvFile.get).getLines())
      .drop {
        logger.info(s"Dropping $linesToDrop initial lines from file ${dcInfo.tsvFile.get} for sync ${dcInfo.key}")
        linesToDrop
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .map(line => TsvRetriever.parseTSVAndCreateInfotonDataFromIt(ByteString(line)))
      .alsoToMat(positionKeySink)(Keep.both)
  }
} 
Example 22
Source File: CliLogger.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.saf.cli.util

import java.io.{File, FileWriter, PrintWriter}
import java.text.SimpleDateFormat
import java.util.Date

 
object CliLogger {
  
  def timeStamp: String = new SimpleDateFormat("yyyyMMdd-HHmmss").format(new Date)
  
  def outPrint(s : String) {
    scala.Console.out.print(s)
    scala.Console.out.flush()
  }

  def outPrintln(s : String) {
    scala.Console.out.println(s)
    scala.Console.out.flush()
  }

  def outPrintln() {
    scala.Console.out.println()
    scala.Console.out.flush()
  }

  def errPrintln(s : String) {
    scala.Console.err.println(s)
    scala.Console.err.flush()
  }

  def errPrintln() {
    scala.Console.err.println()
    scala.Console.err.flush()
  }
  
  def logError(dir: File, text: String, e: Throwable) {
    outPrintln()
    errPrintln(text + e.getMessage)
    val f = new File(dir, ".errorlog")
    f.getParentFile.mkdirs
    val fw = new FileWriter(f)
    try {
      val pw = new PrintWriter(fw)
      pw.println("An error occurred on " + timeStamp)
      e.printStackTrace(pw)
      fw.close()
      outPrintln("Written: " + f.getAbsolutePath)
    } catch {
      case e : Throwable =>
        errPrintln("Error: " + e.getMessage)
    }
  }
} 
Example 23
Source File: CliLogger.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.jnsaf.native_statistics

import java.io.{File, FileWriter, PrintWriter}
import java.text.SimpleDateFormat
import java.util.Date

 
object CliLogger {
  
  def timeStamp = new SimpleDateFormat("yyyyMMdd-HHmmss").format(new Date)
  
  def outPrint(s : String) {
    scala.Console.out.print(s)
    scala.Console.out.flush()
  }

  def outPrintln(s : String) {
    scala.Console.out.println(s)
    scala.Console.out.flush()
  }

  def outPrintln() {
    scala.Console.out.println()
    scala.Console.out.flush()
  }

  def errPrintln(s : String) {
    scala.Console.err.println(s)
    scala.Console.err.flush()
  }

  def errPrintln() {
    scala.Console.err.println()
    scala.Console.err.flush()
  }
  
  def logError(dir: File, text: String, e: Throwable) {
    outPrintln()
    errPrintln(text + e.getMessage)
    val f = new File(dir, ".errorlog")
    f.getParentFile.mkdirs
    val fw = new FileWriter(f)
    try {
      val pw = new PrintWriter(fw)
      pw.println("An error occurred on " + timeStamp)
      e.printStackTrace(pw)
      fw.close()
      outPrintln("Written: " + f.getAbsolutePath)
    } catch {
      case e : Throwable =>
        errPrintln("Error: " + e.getMessage)
    }
  }
} 
Example 24
Source File: MnistManager.scala    From dl4scala   with MIT License 5 votes vote down vote up
package org.dl4scala.datasets.mnist

import java.io.{BufferedWriter, FileWriter, IOException}

import org.dl4scala.datasets.fetchers.MnistDataFetcher


  def close(): Unit = {
    if (images != null) {
      try
        images.close()
      catch {
        case e: IOException =>

      }
      images = null
    }
    if (labels != null) {
      try
        labels.close()
      catch {
        case e: IOException =>
      }
      labels = null
    }
  }
}

object MnistManager {
  private val HEADER_SIZE = 8

  @throws(classOf[IOException])
  def writeImageToPpm(image: Array[Array[Int]], ppmFileName: String): Unit = {
    try {
      val ppmOut = new BufferedWriter(new FileWriter(ppmFileName))
      val rows = image.length
      val cols = image(0).length
      ppmOut.write("P3\n")
      ppmOut.write("" + rows + " " + cols + " 255\n")

      (0 until rows).foreach{ i =>
        val s = new StringBuilder
        (0 until cols).foreach(j => s.append(image(i)(j) + " " + image(i)(j) + " " + image(i)(j) + "  "))
        ppmOut.write(s.toString)
      }
      ppmOut.close()
    } catch {
      case e: Exception => println("BufferedWriter error" + e.printStackTrace())
    }
  }
} 
Example 25
Source File: Storage.scala    From exhibitor-mesos-framework   with Apache License 2.0 5 votes vote down vote up
package ly.stealth.mesos.exhibitor

import java.io.{File, FileWriter}

import play.api.libs.json.{Json, Reads, Writes}

import scala.io.Source

trait Storage[T] {
  def save(value: T)(implicit writes: Writes[T])

  def load(implicit reads: Reads[T]): Option[T]
}

case class FileStorage[T](file: String) extends Storage[T] {
  override def save(value: T)(implicit writes: Writes[T]) {
    val writer = new FileWriter(file)
    try {
      writer.write(Json.stringify(Json.toJson(value)))
    } finally {
      writer.close()
    }
  }

  override def load(implicit reads: Reads[T]): Option[T] = {
    if (!new File(file).exists()) None
    else Json.parse(Source.fromFile(file).mkString).asOpt[T]
  }
} 
Example 26
Source File: KafkaCollector.scala    From Swallow   with Apache License 2.0 5 votes vote down vote up
package com.intel.hibench.common.streaming.metrics

import java.io.{FileWriter, File}
import java.util.Date
import java.util.concurrent.{TimeUnit, Future, Executors}

import com.codahale.metrics.{UniformReservoir, Histogram}
import kafka.utils.{ZKStringSerializer, ZkUtils}
import org.I0Itec.zkclient.ZkClient

import scala.collection.mutable.ArrayBuffer


class KafkaCollector(zkConnect: String, metricsTopic: String,
    outputDir: String, sampleNumber: Int, desiredThreadNum: Int) extends LatencyCollector {

  private val histogram = new Histogram(new UniformReservoir(sampleNumber))
  private val threadPool = Executors.newFixedThreadPool(desiredThreadNum)
  private val fetchResults = ArrayBuffer.empty[Future[FetchJobResult]]

  def start(): Unit = {
    val partitions = getPartitions(metricsTopic, zkConnect)

    println("Starting MetricsReader for kafka topic: " + metricsTopic)

    partitions.foreach(partition => {
      val job = new FetchJob(zkConnect, metricsTopic, partition, histogram)
      val fetchFeature = threadPool.submit(job)
      fetchResults += fetchFeature
    })

    threadPool.shutdown()
    threadPool.awaitTermination(30, TimeUnit.MINUTES)

    val finalResults = fetchResults.map(_.get()).reduce((a, b) => {
      val minTime = Math.min(a.minTime, b.minTime)
      val maxTime = Math.max(a.maxTime, b.maxTime)
      val count = a.count + b.count
      new FetchJobResult(minTime, maxTime, count)
    })

    report(finalResults.minTime, finalResults.maxTime, finalResults.count)
  }

  private def getPartitions(topic: String, zkConnect: String): Seq[Int] = {
    val zkClient = new ZkClient(zkConnect, 6000, 6000, ZKStringSerializer)
    try {
      ZkUtils.getPartitionsForTopics(zkClient, Seq(topic)).flatMap(_._2).toSeq
    } finally {
      zkClient.close()
    }
  }


  private def report(minTime: Long, maxTime: Long, count: Long): Unit = {
    val outputFile = new File(outputDir, metricsTopic + ".csv")
    println(s"written out metrics to ${outputFile.getCanonicalPath}")
    val header = "time,count,throughput(msgs/s),max_latency(ms),mean_latency(ms),min_latency(ms)," +
        "stddev_latency(ms),p50_latency(ms),p75_latency(ms),p95_latency(ms),p98_latency(ms)," +
        "p99_latency(ms),p999_latency(ms)\n"
    val fileExists = outputFile.exists()
    if (!fileExists) {
      val parent = outputFile.getParentFile
      if (!parent.exists()) {
        parent.mkdirs()
      }
      outputFile.createNewFile()
    }
    val outputFileWriter = new FileWriter(outputFile, true)
    if (!fileExists) {
      outputFileWriter.append(header)
    }
    val time = new Date(System.currentTimeMillis()).toString
    val count = histogram.getCount
    val snapshot = histogram.getSnapshot
    val throughput = count * 1000 / (maxTime - minTime)
    outputFileWriter.append(s"$time,$count,$throughput," +
        s"${formatDouble(snapshot.getMax)}," +
        s"${formatDouble(snapshot.getMean)}," +
        s"${formatDouble(snapshot.getMin)}," +
        s"${formatDouble(snapshot.getStdDev)}," +
        s"${formatDouble(snapshot.getMedian)}," +
        s"${formatDouble(snapshot.get75thPercentile())}," +
        s"${formatDouble(snapshot.get95thPercentile())}," +
        s"${formatDouble(snapshot.get98thPercentile())}," +
        s"${formatDouble(snapshot.get99thPercentile())}," +
        s"${formatDouble(snapshot.get999thPercentile())}\n")
    outputFileWriter.close()
  }

  private def formatDouble(d: Double): String = {
    "%.3f".format(d)
  }

} 
Example 27
Source File: LogActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.connector

import java.io.FileWriter
import akka.actor.{ActorLogging, Props}
import io.coral.actors.CoralActor
import org.json4s.JsonAST.JObject
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scala.concurrent.Future


object LogActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		val file = (json \ "params" \ "file").extractOpt[String]
		val append = (json \ "params" \ "append").extractOpt[Boolean]
		Some((file, append getOrElse false))
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[LogActor], json))
	}
}

class LogActor(json: JObject) extends CoralActor(json) with ActorLogging {
	val (file, append) = LogActor.getParams(json).get
	var fileWriter: Option[FileWriter] = None

	override def preStart() = {
		if (file.isDefined) {
			fileWriter = Some(new FileWriter(file.get, append))
		}
	}

	override def postStop() = {
		fileWriter match {
			case None =>
			case Some(f) => f.close()

		}
	}

	override def trigger = {
		json => Future {
			fileWriter match {
				case None =>
					log.info(compact(json))
				case Some(f) =>
					f.write(compact(json) + "\n")
					f.flush()
			}

			Some(JNothing)
		}
	}
} 
Example 28
Source File: DevelopmentEmailNotifications.scala    From sundial   with MIT License 5 votes vote down vote up
package service.notifications

import java.io.{BufferedWriter, File, FileWriter}

import dao.SundialDaoFactory
import dto.DisplayModels
import model.{EmailNotification, ProcessStatus}
import software.amazon.awssdk.services.ses.SesClient

import scala.sys.process._

class DevelopmentEmailNotifications(daoFactory: SundialDaoFactory,
                                    displayModels: DisplayModels,
                                    sesClient: SesClient)
    extends EmailNotifications(daoFactory,
                               "[email protected]",
                               displayModels,
                               sesClient) {

  override def sendEmail(processStatus: ProcessStatus,
                         previousProcessStatus: Option[ProcessStatus],
                         teams: Seq[EmailNotification],
                         subject: String,
                         body: String): Unit = {
    val outfile = File.createTempFile("sundial", ".html")
    val bw = new BufferedWriter(new FileWriter(outfile))
    bw.write(body)
    bw.close()

    Seq("open", outfile.getAbsolutePath()).!
  }

} 
Example 29
Source File: ElevationSpec.scala    From geotrellis-osm-elevation   with Apache License 2.0 5 votes vote down vote up
package geotrellis.osme.core

import java.io.{FileWriter, BufferedWriter, File}

import geotrellis.raster.io.geotiff.SinglebandGeoTiff
import geotrellis.vector.{Feature, Line, LineFeature}
import geotrellis.vector.io.json.GeoJson._
import spray.json.DefaultJsonProtocol._
import geotrellis.vector.io.json.{JsonFeatureCollection, GeoJson}
import spray.json.JsonReader
import scala.io.Source
import org.scalatest._


class ElevationSpec extends FunSpec with Matchers {

  def sharedData = {

    val geojson = Source.fromFile("data/imgn36w100vector.geojson").getLines.mkString
    val gjCol = parse[JsonFeatureCollection](geojson)

    new {
      val geotiff = SinglebandGeoTiff("data/imgn36w100_13_3_3.tif")
      val multiLine = gjCol.getAllLines().toMultiLine
      val elevationGeoJson = ElevationOverlay(geotiff, multiLine)
    }

  }

  describe("Core spec") {

    val numInputLines = sharedData.multiLine.lines.size
    val numOutputLines = sharedData.elevationGeoJson.size
    val ratio = numOutputLines / numInputLines
    println(s"Ratio of input lines to output lines: $ratio : 1")

    it("returned geojson should contain the MEANV property") {
      val elevationFeatures =  sharedData.elevationGeoJson
      val hasMeanV = elevationFeatures.forall(feat => feat.data.contains("MEANV"))
      assert(hasMeanV)
    }

    it("should produce a geojson file that can be put into geocolor.io") {
      val elevationFeatures =  sharedData.elevationGeoJson
      val jsonFeatures = JsonFeatureCollection(elevationFeatures)

      val file = new File("geocolor_test.json")
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write(jsonFeatures.toJson.prettyPrint)
      bw.close()
    }

    it("Every feature should intersect the tile extent") {
      val elevationFeatures =  sharedData.elevationGeoJson
      val rasterPoly =  sharedData.geotiff.rasterExtent.extent.toPolygon()
      val doesIntersect = elevationFeatures.forall(feat => rasterPoly.intersects(feat.geom))
      assert(doesIntersect)
    }


  }
} 
Example 30
Source File: ElevationOverlay.scala    From geotrellis-osm-elevation   with Apache License 2.0 5 votes vote down vote up
package geotrellis.osme.core

import java.io.{BufferedWriter, FileWriter, File}

import com.vividsolutions.jts.geom.{LineString, MultiLineString}
import geotrellis.raster.io.geotiff.SinglebandGeoTiff
import geotrellis.vector.io.json.{GeoJson, JsonFeatureCollection}
import scala.collection.immutable.Map
import spray.json._
import DefaultJsonProtocol._
import geotrellis.vector.io.json.FeatureFormats.writeFeatureJson
import geotrellis.vector.io.json.GeometryFormats._
import geotrellis.vector.densify.DensifyMethods
import geotrellis.vector.dissolve.DissolveMethods
import geotrellis.vector._



    val segmentsFeatures = segments.map { segment =>
       val center = segment.centroid match {
         case PointResult(p) => p
         case NoResult => throw new Exception("No result found in PointOrNoResult")
       }
       val (col, row) = rasterExtent.mapToGrid(center)
       val elevation = geotiff.tile.getDouble(col, row)
       val meanvMap: Map[String, Double] = Map("MEANV" -> elevation)
       LineFeature(segment, meanvMap)
     }

    return segmentsFeatures.toTraversable


  }
} 
Example 31
Source File: Storage.scala    From zipkin-mesos-framework   with Apache License 2.0 5 votes vote down vote up
package net.elodina.mesos.zipkin.storage

import java.io.{File, FileWriter}

import org.I0Itec.zkclient.ZkClient
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import org.I0Itec.zkclient.serialize.ZkSerializer
import play.api.libs.json.{Json, Reads, Writes}

import scala.io.Source

trait Storage[T] {
  def save(value: T)(implicit writes: Writes[T])

  def load(implicit reads: Reads[T]): Option[T]
}

case class FileStorage[T](file: String) extends Storage[T] {
  override def save(value: T)(implicit writes: Writes[T]) {
    val writer = new FileWriter(file)
    try {
      writer.write(Json.stringify(Json.toJson(value)))
    } finally {
      writer.close()
    }
  }

  override def load(implicit reads: Reads[T]): Option[T] = {
    if (!new File(file).exists()) None
    else Json.parse(Source.fromFile(file).mkString).asOpt[T]
  }
}

case class ZkStorage[T](zk: String) extends Storage[T] {
  val (zkConnect, path) = zk.span(_ != '/')
  createChrootIfRequired()

  private def createChrootIfRequired() {
    if (path != "") {
      val client = zkClient
      try {
        client.createPersistent(path, true)
      }
      finally {
        client.close()
      }
    }
  }

  private def zkClient: ZkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer)

  override def save(value: T)(implicit writes: Writes[T]) {
    val client = zkClient
    val json = Json.stringify(Json.toJson(value))
    try {
      client.createPersistent(path, json)
    }
    catch {
      case e: ZkNodeExistsException => client.writeData(path, json)
    }
    finally {
      client.close()
    }
  }

  override def load(implicit reads: Reads[T]): Option[T] = {
    val client = zkClient
    try {
      Option(client.readData(path, true).asInstanceOf[String]).flatMap(Json.parse(_).asOpt[T])
    }
    finally {
      client.close()
    }
  }
}

private object ZKStringSerializer extends ZkSerializer {
  def serialize(data: Object): Array[Byte] = data.asInstanceOf[String].getBytes("UTF-8")

  def deserialize(bytes: Array[Byte]): Object = {
    if (bytes == null) null
    else new String(bytes, "UTF-8")
  }
} 
Example 32
Source File: DocExportV3.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.utils.doc

import java.io.FileWriter
import java.nio.file.{Files, Paths}

import com.github.mustachejava.DefaultMustacheFactory
import com.wavesplatform.DocSource
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.lang.directives.DirectiveSet
import com.wavesplatform.lang.directives.values.{Account, Expression, V3}

import scala.jdk.CollectionConverters._

object DocExportV3 {
  def main(args: Array[String]): Unit = {
      val funcV3Template = if (args.length == 1) args(0) else "lang/func-doc.template.md"
      val path = Paths.get("target/funcs")
      Files.createDirectories(path)

      val ds = DirectiveSet(V3, Account, Expression).explicitGet()
      RideFullContext.build(ds)
        .functions
        .map(
          f => {
            val argTypes = f.signature.args.map(_._2.toString).toList
            val docKey = (f.name, argTypes, V3.value.asInstanceOf[Int])
            val (doc, paramsDoc, category) = DocSource.categorizedfuncDataV3(docKey)
            val varDocs =
              (f.args, f.signature.args, paramsDoc)
                .zipped
                .toList
                .map { arg => VarDoc(arg._1, TypeDoc(arg._2._2), arg._3.replace("\n", "<br>")) }
                .asJava

            val cost = f.costByLibVersion(V3).toString
            val funcDoc = FuncDoc(f.name, TypeDoc(f.signature.result), doc.replace("\n", "<br>"), varDocs, cost)
            (funcDoc, category)
          }
        )
        .groupBy(_._2)
        .map { case (category, funcs) =>
          val indexedDocs = funcs
            .zipWithIndex
            .map { case ((func, _), index) => FuncDocV3(func, index + 1) }
            .toList
            .asJava
          val title = category.replace("-", " ").capitalize
          val writer = new FileWriter(path.resolve(category + ".md").toFile)
          val docs = CategorizedFuncsDoc(indexedDocs, title)
          (writer, docs)
        }
        .foreach { case (writer, docs) =>
          new DefaultMustacheFactory().compile(funcV3Template).execute(writer, docs)
          writer.close()
        }
    }
} 
Example 33
Source File: FilePersistenceActor.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.persistence

import java.io.{ File, FileWriter }

import akka.actor.Actor
import io.vamp.common.notification.NotificationErrorException
import io.vamp.common.{ Artifact, ClassMapper, Config, ConfigMagnet }
import io.vamp.persistence.AccessGuard.LoadAll
import io.vamp.persistence.notification.{ CorruptedDataException, UnknownDataFormatException }

import scala.io.Source

class FilePersistenceActorMapper extends ClassMapper {
  val name = "file"
  val clazz: Class[_] = classOf[FilePersistenceActor]
}

object FilePersistenceActor {
  val directory: ConfigMagnet[String] = Config.string("vamp.persistence.file.directory")
}

class FilePersistenceActor
    extends PersistenceActor
    with PersistenceRepresentation
    with PersistenceMarshaller
    with PersistenceDataReader {

  import FilePersistenceActor._

  private lazy val file = {
    val dir = directory()
    val file = new File(
      if (dir.endsWith(File.separator)) s"$dir${namespace.name}.db" else s"$dir${File.separator}${namespace.name}.db"
    )
    file.getParentFile.mkdirs()
    file.createNewFile()
    file
  }

  override def receive: Receive = ({
    case LoadAll ⇒ read()
  }: Actor.Receive) orElse super.receive

  override def preStart(): Unit = self ! LoadAll

  override protected def info(): Map[String, Any] = super.info() + ("type" → "file") + ("file" → file.getAbsolutePath)

  protected def read(): Unit = this.synchronized {
    for (line ← Source.fromFile(file).getLines().map(_.trim)) {
      if (line.nonEmpty && !line.startsWith("#") && !line.startsWith("//")) try dataRead(line) catch {
        case NotificationErrorException(_: UnknownDataFormatException, _) ⇒ // already logged, skip to the next line
        case c: CorruptedDataException ⇒
          reportException(c)
          validData = false
          throw c
      }
    }
    removeGuard()
  }

  override protected def set[T <: Artifact](artifact: T, kind: String): T = {
    def store(): T = {
      write(PersistenceRecord(artifact.name, artifact.kind, marshall(artifact)))
      super.set[T](artifact, kind)
    }

    super.get[T](artifact.name, kind) match {
      case Some(a) if a != artifact ⇒ store()
      case Some(_)                  ⇒ artifact
      case None                     ⇒ store()
    }
  }

  override protected def delete[T <: Artifact](name: String, kind: String): Option[T] = {
    super.get[T](name, kind) match {
      case Some(_) ⇒
        write(PersistenceRecord(name, kind))
        super.delete(name, kind)
      case _ ⇒ None
    }
  }

  override protected def dataSet(artifact: Artifact, kind: String): Artifact = super.set(artifact, kind)

  override protected def dataDelete(name: String, kind: String): Unit = super.delete(name, kind)

  private def write(record: PersistenceRecord): Unit = {
    guard()
    val writer = new FileWriter(file, true)
    try {
      writer.write(s"${marshallRecord(record)}\n")
      writer.flush()
    }
    finally writer.close()
  }
} 
Example 34
Source File: Output.scala    From Clustering4Ever   with Apache License 2.0 5 votes vote down vote up
package org.clustering4ever.spark.clustering.mtm
import java.io._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.DenseVector
import scala.sys.process._
import java.util.Calendar
import java.text.SimpleDateFormat
import java.io.File
import java.io.FileWriter

object Output extends Serializable
{

  def saveStr(savingPath: String, value: String, fileName: String = "") =
  {
    s"mkdir -p ${savingPath}".!
    val finalPath = savingPath + fileName
    val fw = new FileWriter(finalPath, true)
    fw.write(value + "\n")
    fw.close()
  }

  def write(outputDir: String, datas: RDD[Array[Double]], model: AbstractModel, nbRowSOM:Int, nbColSOM: Int): String =
  {
      val now = Calendar.getInstance().getTime()
      val format = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss")
      val time = format.format(now)
      val dim = datas.first.size
      val datasWithIndex = datas.zipWithIndex.map(_.swap)

      val path: String = outputDir + "/EXP-" + time + "/"
      s"mkdir -p ${path}".!
    
      val mapMin = Array.fill[Byte](dim)(0).mkString(",")
      var header = "# mapDim=2 mapSize={"+ nbRowSOM +"," + nbColSOM + "}"
      header += " pointDim=" + dim + " pointRealDim=" + dim + " mapMin={" + mapMin + "}"
    
      val prototypes = model.prototypes.map( d => (d.id, d.point)).sortBy(_._1).map(_._2)
      println("Write Prototypes...")
      val protosString = prototypes.map( d => d.toArray.mkString(",")).mkString("\n")

      // Utiliser fileWriter
      saveStr(path, header + "\n" + protosString, "maps")

      val sumAffectedDatas = datas.map( d => (model.findClosestPrototype(d).id, 1)).reduceByKey{ case (sum1, sum2) => sum1 + sum2 }.collectAsMap 
    
      // fill in all the prototypes that have 0 observations
      val card = (0 until prototypes.length).map( d => if (sumAffectedDatas.contains(d)) sumAffectedDatas(d) + "" else "0" )
    
      println("Write Cardinalities...")
      var cardHeader = "# mapDim=2 mapSize={"+ nbRowSOM +"," + nbColSOM + "}" 
      cardHeader +=  "pointDim=1 pointRealDim=0 mapMin={0} mapMax={0}"
      val cardStr = card.mkString("\n")
      saveStr(path, cardHeader + "\n" + cardStr, "cards")

      val affHeader = "# mapDim=1 mapSize={" + datas.count() + "} pointDim=1 pointRealDim=0 mapMin={0} mapMax={0}"
      val aff = datasWithIndex.map(d => (d._1, model.findClosestPrototype(d._2).id + "")).sortByKey().values.collect.mkString("\n")

      println("Write Affiliate...")
      saveStr(path, affHeader + "\n" + aff, "affs")    
      println("Write Maps...")

      val maps = prototypes.zip(card).map(d => d._1.toArray.mkString(",") + "," + d._2).mkString("\n")
      saveStr(path, maps, "mapscard")
      println("Write successfully...")
      time
  }
} 
Example 35
Source File: ParseCSVwithHTML.scala    From CSYE7200_Old   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.parse

import java.io.{BufferedWriter, File, FileWriter}

import scala.collection.mutable
import scala.io.{BufferedSource, Source}
import scala.util.{Failure, Success, Try}


object ParseCSVwithHTML extends App {
  val parser = ParseCSVwithHTML(CsvParser(delimiter = '\t' + ""))
  val title = "Report"
  if (args.length > 0) {
    val filename = args.head
    val source: BufferedSource = Source.fromFile(filename, "UTF-16")
    val w = parser.parseStreamIntoHTMLTable(source.getLines.toStream, title)
    val file = new File("output.html")
    val bw = new BufferedWriter(new FileWriter(file))
    bw.write(w)
    bw.close()
    println(s"Successfully written $file")
  }
  else
    System.err.println("syntax: ParseCSVwithHTML filename")

} 
Example 36
Source File: BlockchainSimulationSpecification.scala    From sigmastate-interpreter   with MIT License 5 votes vote down vote up
package sigmastate.utxo.blockchain

import java.io.{File, FileWriter}

import org.scalacheck.Gen
import sigmastate.Values.{BooleanConstant, ErgoTree, GetVarBoolean, TrueLeaf}
import sigmastate.helpers.{ContextEnrichingTestProvingInterpreter, ErgoLikeTestProvingInterpreter}
import sigmastate.interpreter.ContextExtension
import sigmastate.utxo.blockchain.BlockchainSimulationTestingCommons._

import scala.collection.concurrent.TrieMap
import scala.util.Random


class BlockchainSimulationSpecification extends BlockchainSimulationTestingCommons {

  implicit lazy val IR = new TestingIRContext

  property("apply one valid block") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    val block = generateBlock(state, miner, 0)
    val updStateTry = state.applyBlock(block)
    updStateTry.isSuccess shouldBe true
  }

  property("too costly block") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    val block = generateBlock(state, miner, 0)
    val updStateTry = state.applyBlock(block, maxCost = 1)
    updStateTry.isSuccess shouldBe false
  }

  property("apply many blocks") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    checkState(state, miner, 0, randomDeepness)
  }

  property("apply many blocks with enriched context") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    val varId = 1.toByte
    val prop = GetVarBoolean(varId).get.toSigmaProp
    // unable to spend boxes without correct context extension
    an[RuntimeException] should be thrownBy checkState(state, miner, 0, randomDeepness, Some(prop))

    // spend boxes with context extension
    val contextExtension = ContextExtension(Map(varId -> TrueLeaf))
    checkState(state, miner, 0, randomDeepness, Some(prop), contextExtension)
  }

  ignore(s"benchmarking applying many blocks (!!! ignored)") {
    val results = new TrieMap[Int, Long]

    def bench(numberOfBlocks: Int): Unit = {

      val state = ValidationState.initialState()
      val miner = new ContextEnrichingTestProvingInterpreter()

      val (_, time) = (0 until numberOfBlocks).foldLeft(state -> 0L) { case ((s, timeAcc), h) =>
        val b = generateBlock(state, miner, h)

        val t0 = System.currentTimeMillis()
        val updStateTry = s.applyBlock(b)
        val t = System.currentTimeMillis()

        updStateTry shouldBe 'success
        updStateTry.get -> (timeAcc + (t - t0))
      }

      println(s"Total time for $numberOfBlocks blocks: $time ms")
      results.put(numberOfBlocks, time)
    }

    bench(100)
    bench(200)
    bench(300)
    bench(400)

    printResults(results.toMap)

    def printResults(results: Map[Int, Long]): Unit = {
      val file = new File("target/bench")
      file.mkdirs()
      val writer = new FileWriter(s"target/bench/result.csv", false)
      val sorted = results.toList.sortBy { case (i, _) => i }
      val header = sorted.map(_._1).mkString(",")
      writer.write(s"$header\n")
      val values = sorted.map(_._2).mkString(",")
      writer.write(s"$values\n")
      writer.flush()
      writer.close()
    }
  }
} 
Example 37
Source File: SpatialSpanClustering.scala    From traj-sim-spark   with Apache License 2.0 5 votes vote down vote up
package edu.utah.cs.trajectory

import java.io.{BufferedWriter, File, FileWriter}

import com.vividsolutions.jts.geom.{GeometryCollection, GeometryFactory}
import edu.utah.cs.partitioner.STRMBRPartition
import edu.utah.cs.spatial.{LineSegment, MBR, Point, Polygon}
import edu.utah.cs.util._
import org.apache.spark.{SparkConf, SparkContext}
import org.geotools.geojson.geom.GeometryJSON


object SpatialSpanClustering {
  final val max_entries_per_node = 25

  def getMBR(x: (Int, Array[(Int, LineSegment)])): (MBR, Int) = {
    val pts = x._2.flatMap(p => Array(p._2.start, p._2.end))
    var maxx = Double.MinValue
    var maxy = Double.MinValue
    var minx = Double.MaxValue
    var miny = Double.MaxValue
    pts.foreach(x => {
      maxx = Math.max(x.coord(0), maxx)
      maxy = Math.max(x.coord(1), maxy)
      minx = Math.min(x.coord(0), minx)
      miny = Math.min(x.coord(1), miny)
    })
    (MBR(Point(Array(minx, miny)), Point(Array(maxx, maxy))), x._1)
  }

  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setAppName("SpatialSpanClustering"))

    if (args.length < 2) {
      println("usage: SpatialSpanClustering <input_file_path> <output_file_path>")
      System.exit(1)
    }

    val input_file_path = args(0)
    val output_file_path = args(1)

    val bf_meta = BloomFilterMeta(10000, 1)
    val bc_bf_meta = sc.broadcast(bf_meta)
    BloomFilter.meta = bf_meta

    val mbrs = sc.textFile(input_file_path).mapPartitions(iter => {
      iter.map(x => {
        val splitted = x.split("\t")
        (splitted(0).toInt,
          LineSegment(Point(Array(splitted(2).toDouble, splitted(1).toDouble)),
            Point(Array(splitted(4).toDouble, splitted(3).toDouble))))
      }).toArray.groupBy(_._1).map(now => getMBR(now)).iterator
    })

    val num_partitions = mbrs.getNumPartitions * 4

    val partitioned_rdd = STRMBRPartition(mbrs, num_partitions, 0.01, max_entries_per_node)

    val part_bounds = partitioned_rdd.mapPartitions(iter => {
      if (iter.nonEmpty) {
        var maxx = Double.MinValue
        var maxy = Double.MinValue
        var minx = Double.MaxValue
        var miny = Double.MaxValue
        iter.map(_._1).foreach(x => {
          maxx = Math.max(x.high.coord(0), maxx)
          maxy = Math.max(x.high.coord(1), maxy)
          minx = Math.min(x.low.coord(0), minx)
          miny = Math.min(x.low.coord(1), miny)
        })
        Array(MBR(Point(Array(minx, miny)), Point(Array(maxx, maxy)))).iterator
      } else Array().iterator
    }).collect()

    val file = new File(output_file_path)
    val bw = new BufferedWriter(new FileWriter(file))

    val collection = new GeometryCollection(part_bounds.map(x =>
      Polygon(Array(x.low, Point(Array(x.low.coord(0), x.high.coord(1))),
        x.high, Point(Array(x.high.coord(0), x.low.coord(1))), x.low)).content), new GeometryFactory)

    new GeometryJSON().writeGeometryCollection(collection, bw)

    bw.close()

    sc.stop()
  }
} 
Example 38
Source File: LineSegmentClustering.scala    From traj-sim-spark   with Apache License 2.0 5 votes vote down vote up
package edu.utah.cs.trajectory

import java.io.{BufferedWriter, File, FileWriter}

import com.vividsolutions.jts.geom.{GeometryCollection, GeometryFactory}
import edu.utah.cs.partitioner.STRSegPartition
import edu.utah.cs.spatial.{LineSegment, MBR, Point, Polygon}
import edu.utah.cs.util.{BloomFilter, BloomFilterMeta}
import org.apache.spark.{SparkConf, SparkContext}
import org.geotools.geojson.geom.GeometryJSON


object LineSegmentClustering {
  final val max_entries_per_node = 25
  final val k = 10
  final val N = 34085

  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setAppName("LineSegmentClustering"))

    if (args.length < 2) {
      println("usage: SpatialSpanClustering <input_file_path> <output_file_path>")
      System.exit(1)
    }

    val input_file_path = args(0)
    val output_file_path = args(1)

    val dataRDD = sc.textFile(input_file_path)
      .map(x => x.split('\t'))
      .map(x => (LineSegment(Point(Array(x(2).toDouble, x(1).toDouble)),
        Point(Array(x(4).toDouble, x(3).toDouble))),
        TrajMeta(x(0).toInt, x(5).toInt)))

    val bf_meta = BloomFilterMeta(N, 1)
    val bc_bf_meta = sc.broadcast(bf_meta)
    BloomFilter.meta = bf_meta

    val num_partitions = dataRDD.getNumPartitions
    val (partitioned_rdd, part_mbrs) = STRSegPartition(dataRDD, num_partitions, 0.01, max_entries_per_node)

    val part_bounds = partitioned_rdd.mapPartitions(iter => {
      if (iter.nonEmpty) {
        var maxx = Double.MinValue
        var maxy = Double.MinValue
        var minx = Double.MaxValue
        var miny = Double.MaxValue
        iter.map(_._1).foreach(x => {
          maxx = Math.max(Math.max(x.start.coord(0), x.end.coord(0)), maxx)
          maxy = Math.max(Math.max(x.start.coord(1), x.end.coord(1)), maxy)
          minx = Math.min(Math.min(x.start.coord(0), x.end.coord(0)), minx)
          miny = Math.min(Math.min(x.start.coord(1), x.end.coord(1)), miny)
        })
        Array(MBR(Point(Array(minx, miny)), Point(Array(maxx, maxy)))).iterator
      } else Array().iterator
    }).collect()

    val file = new File(output_file_path)
    val bw = new BufferedWriter(new FileWriter(file))

    val collection = new GeometryCollection(part_bounds.map(x =>
      Polygon(Array(x.low, Point(Array(x.low.coord(0), x.high.coord(1))),
        x.high, Point(Array(x.high.coord(0), x.low.coord(1))), x.low)).content), new GeometryFactory)

    new GeometryJSON().writeGeometryCollection(collection, bw)

    bw.close()

    sc.stop()
  }
} 
Example 39
Source File: TrajSampling.scala    From traj-sim-spark   with Apache License 2.0 5 votes vote down vote up
package edu.utah.cs.trajectory

import java.io.{BufferedWriter, File, FileWriter}

import edu.utah.cs.spatial.{LineSegment, Point}
import org.apache.spark.{SparkConf, SparkContext}


object TrajSampling {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("TrajSampling")
    val sc = new SparkContext(sparkConf)

    Thread.sleep(3000)

    if (args.length < 2) {
      println("usage: TrajSampling <input_file_path> <output_file_path> <sample_count>")
      System.exit(1)
    }

    val input_file_path = args(0)
    val output_file_path = args(1)
    val cnt = args(2).toInt

    val sampled_trajs = sc.textFile(input_file_path).mapPartitions(iter => {
      iter.map(x => {
        val splitted = x.split("\t")
        (splitted(0).toInt,
          LineSegment(Point(Array(splitted(1).toDouble, splitted(2).toDouble)),
            Point(Array(splitted(3).toDouble, splitted(4).toDouble))))
      }).toArray.groupBy(_._1).map(now => (now._1, now._2.sortBy(_._1).map(_._2))).iterator
    }).takeSample(withReplacement = false, cnt, System.currentTimeMillis())

    val file = new File(output_file_path)
    val bw = new BufferedWriter(new FileWriter(file))

    for (i <- sampled_trajs.indices) {
      val cur_traj = sampled_trajs(i)._2
      cur_traj.foreach(x => bw.write(i + "\t" + x.toTSV + "\n"))
    }

    bw.close()

    sc.stop()
  }
} 
Example 40
Source File: convertOutput.scala    From SparkAndMPIFactorizations   with MIT License 5 votes vote down vote up
package org.apache.spark.mllib.linalg.distributed
import breeze.linalg.{DenseMatrix, DenseVector}
import java.io.{DataInputStream, FileInputStream, FileWriter, File}

object ConvertDump { 

  type DM = DenseMatrix[Double]
  type DDV = DenseVector[Double]
  type DIV = DenseVector[Int]

  def loadDoubleVector( inf: DataInputStream) : DDV = {
    val len = inf.readInt()
    val v = DenseVector.zeros[Double](len)
    for (i <- 0 until len) {
      v(i) = inf.readDouble()
    }
    v
  }
  
  def loadIntVector( inf: DataInputStream) : DIV = {
    val len = inf.readInt()
    val v = DenseVector.zeros[Int](len)
    for (i <- 0 until len) {
      v(i) = inf.readInt()
    }
    v
  }

  def loadMatrix( inf: DataInputStream) : DM = {
    val (r,c) = Tuple2(inf.readInt(), inf.readInt())
    val m = DenseMatrix.zeros[Double](r,c)
    for (i <- 0 until r; j <- 0 until c) {
      m(i,j) = inf.readDouble()
    }
    m 
  }

  def loadDump(infname: String) : Tuple4[DM, DM, DDV, DDV] = {

    val inf = new DataInputStream( new FileInputStream(infname))

    val eofsU = loadMatrix(inf)
    val eofsV = loadMatrix(inf)
    val evals = loadDoubleVector(inf)
    val mean = loadDoubleVector(inf)

    inf.close()
    (eofsU, eofsV, evals, mean)
  }

  def writeDoubleMatrix(mat: DM, fn: String) = {
    val writer = new FileWriter(new File(fn))
    writer.write("%%MatrixMarket matrix coordinate real general\n")
    writer.write(s"${mat.rows} ${mat.cols} ${mat.rows*mat.cols}\n")
    for(i <- 0 until mat.rows) {
      for(j <- 0 until mat.cols) {
        writer.write(f"${i+1} ${j+1} ${mat(i, j)}%f\n")
      }
    }
    writer.close
  }

  def writeIntVector(vec: DIV, fn: String) = {
    val mat = vec.asDenseMatrix
    val writer = new FileWriter(new File(fn))
    writer.write("%%MatrixMarket matrix coordinate real general\n")
    writer.write(s"${mat.rows} ${mat.cols} ${mat.rows*mat.cols}\n")
    for(i <- 0 until mat.rows) {
      for(j <- 0 until mat.cols) {
        writer.write(s"${i+1} ${j+1} ${mat(i, j)}\n")
      }
    }
    writer.close
  }

  def main(args: Array[String]) {
    val (eofsU, eofsV, eofsS, mean) = loadDump(args(0))
    writeDoubleMatrix(eofsU, s"${args(1)}/colEOFs")
    writeDoubleMatrix(eofsV, s"${args(1)}/rowEOFs")
    writeDoubleMatrix(eofsS.asDenseMatrix, s"${args(1)}/evalEOFs")
    writeDoubleMatrix(mean.asDenseMatrix, s"${args(1)}/rowMeans")
  }
} 
Example 41
Source File: DiskBlockManagerSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.mockito.Mockito.{mock, when}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  val blockManager = mock(classOf[BlockManager])
  when(blockManager.conf).thenReturn(testConf)
  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    super.afterAll()
    Utils.deleteRecursively(rootDir0)
    Utils.deleteRecursively(rootDir1)
  }

  override def beforeEach() {
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(blockManager, conf)
  }

  override def afterEach() {
    diskBlockManager.stop()
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 42
Source File: LogPageSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker.ui

import java.io.{File, FileWriter}

import org.mockito.Mockito.{mock, when}
import org.scalatest.PrivateMethodTester

import org.apache.spark.SparkFunSuite

class LogPageSuite extends SparkFunSuite with PrivateMethodTester {

  test("get logs simple") {
    val webui = mock(classOf[WorkerWebUI])
    val tmpDir = new File(sys.props("java.io.tmpdir"))
    val workDir = new File(tmpDir, "work-dir")
    workDir.mkdir()
    when(webui.workDir).thenReturn(workDir)
    val logPage = new LogPage(webui)

    // Prepare some fake log files to read later
    val out = "some stdout here"
    val err = "some stderr here"
    val tmpOut = new File(workDir, "stdout")
    val tmpErr = new File(workDir, "stderr")
    val tmpErrBad = new File(tmpDir, "stderr") // outside the working directory
    val tmpOutBad = new File(tmpDir, "stdout")
    val tmpRand = new File(workDir, "random")
    write(tmpOut, out)
    write(tmpErr, err)
    write(tmpOutBad, out)
    write(tmpErrBad, err)
    write(tmpRand, "1 6 4 5 2 7 8")

    // Get the logs. All log types other than "stderr" or "stdout" will be rejected
    val getLog = PrivateMethod[(String, Long, Long, Long)]('getLog)
    val (stdout, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stdout", None, 100)
    val (stderr, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stderr", None, 100)
    val (error1, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "random", None, 100)
    val (error2, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "does-not-exist.txt", None, 100)
    // These files exist, but live outside the working directory
    val (error3, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stderr", None, 100)
    val (error4, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stdout", None, 100)
    assert(stdout === out)
    assert(stderr === err)
    assert(error1.startsWith("Error: Log type must be one of "))
    assert(error2.startsWith("Error: Log type must be one of "))
    assert(error3.startsWith("Error: invalid log directory"))
    assert(error4.startsWith("Error: invalid log directory"))
  }

  
  private def write(f: File, s: String): Unit = {
    val writer = new FileWriter(f)
    try {
      writer.write(s)
    } finally {
      writer.close()
    }
  }

} 
Example 43
Source File: LibSVMTransformationLocalFunctionalTests.scala    From sagemaker-spark   with Apache License 2.0 5 votes vote down vote up
package com.amazonaws.services.sagemaker.sparksdk.transformation

import java.io.{File, FileWriter}

import collection.JavaConverters._
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import org.scalatest.mock.MockitoSugar

import org.apache.spark.sql.SparkSession

import com.amazonaws.services.sagemaker.sparksdk.transformation.deserializers.LibSVMResponseRowDeserializer
import com.amazonaws.services.sagemaker.sparksdk.transformation.serializers.LibSVMRequestRowSerializer

class LibSVMTransformationLocalFunctionalTests extends FlatSpec with Matchers with MockitoSugar
  with BeforeAndAfter {

  val spark = SparkSession.builder
    .master("local")
    .appName("spark session")
    .getOrCreate()

  var libsvmDataFile : File = _
  val libsvmdata =
    "1.0 1:1.5 2:3.0 28:-39.935 55:0.01\n" +
      "0.0 2:3.0 28:-39.935 55:0.01\n" +
      "-1.0 23:-39.935 55:0.01\n" +
      "3.0 1:1.5 2:3.0"
  before {
    libsvmDataFile = File.createTempFile("temp", "temp")
    val fw = new FileWriter(libsvmDataFile)
    fw.write(libsvmdata)
    fw.close()
  }

  "LibSVMSerialization" should "serialize Spark loaded libsvm file to same contents" in {
    import spark.implicits._

    val df = spark.read.format("libsvm").load(libsvmDataFile.getPath)
    val libsvmSerializer = new LibSVMRequestRowSerializer(Some(df.schema))
    val result = df.map(row => new String(libsvmSerializer.serializeRow(row))).collect().mkString
    assert (libsvmdata.trim == result.trim)
  }

  "LibSVMDeserialization" should "deserialize serialized lib svm records" in {

    val libsvmdata =
      "1.0 1:1.5 2:3.0 28:-39.935 55:0.01\n" +
        "0.0 2:3.0 28:-39.935 55:0.01\n" +
        "-1.0 23:-39.935 55:0.01\n" +
        "3.0 1:1.5 2:3.0"

    val libsvmDeserializer = new LibSVMResponseRowDeserializer (55)
    val rowList = libsvmDeserializer.deserializeResponse(libsvmdata.getBytes).toBuffer.asJava
    val deserializedDataFrame = spark.createDataFrame(rowList, libsvmDeserializer.schema)
    val sparkProducedDataFrame = spark.read.format("libsvm").load(libsvmDataFile.getPath)

    val deserializedRows = deserializedDataFrame.collectAsList()
    val sparkRows = sparkProducedDataFrame.collectAsList()

    assert (deserializedRows == sparkRows)
  }
} 
Example 44
Source File: LoadDistLabelKb.scala    From rex   with Apache License 2.0 5 votes vote down vote up
package org.rex.dl

import java.io.{BufferedWriter, File, FileWriter}

import scala.io.Source
import scala.util.Try

object LoadDistLabelKb {

  import org.rex.dl.DistLabelStr._

  type Triple = (Query, Answer, Label)

  def parseTripleFromLine(l: String): Triple = {
    val bits = l.split("\t")
    (bits(0), bits(1), bits(2))
  }

  def apply(f: File): Try[KnowledgeBase] =
    Try {
      Source
        .fromFile(f)
        .getLines()
        .map(parseTripleFromLine)
        .foldLeft(Map.empty[Query, Map[Answer, Labels]]) {

          case (m, (q, a, r)) =>
            if (m contains q) {
              val answersForQ = m(q)
              (m - q) + (q -> (
                if (answersForQ contains a)
                  (answersForQ - a) + (a -> (answersForQ(a) + r))
                else
                  answersForQ + (a -> Set(r))
              ))

            } else
              m + (q -> Map(a -> Set(r)))
        }
    }

  def apply(kb: KnowledgeBase)(f: File): Try[Unit] =
    Try {
      val w = new BufferedWriter(new FileWriter(f))
      try {
        kb.foreach {
          case (q, amap) =>
            amap.foreach {
              case (a, labels) =>
                labels.foreach { l =>
                  w.write(s"${writeTripleToLine(q, a, l)}\n")
                }
            }
        }
      } finally {
        w.close()
      }
    }

  @inline def writeTripleToLine(t: Triple): String =
    writeTripleToLine(t._1, t._2, t._3)

  @inline def writeTripleToLine(q: Query, a: Answer, l: Label): String =
    s"$q\t$q\t$l"

} 
Example 45
Source File: DiskBlockManagerSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}
import java.util.UUID

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(rootDir0)
      Utils.deleteRecursively(rootDir1)
    } finally {
      super.afterAll()
    }
  }

  override def beforeEach() {
    super.beforeEach()
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(conf, deleteFilesOnStop = true)
  }

  override def afterEach() {
    try {
      diskBlockManager.stop()
    } finally {
      super.afterEach()
    }
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  test("SPARK-22227: non-block files are skipped") {
    val file = diskBlockManager.getFile("unmanaged_file")
    writeToFile(file, 10)
    assert(diskBlockManager.getAllBlocks().isEmpty)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 46
Source File: CsvAlarm.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.alarm

import java.io.FileWriter

class CsvAlarm extends Alarm {
  import CsvAlarm._
  override val name: String = "csv"
  private lazy val path = options.getOrElse(PATH, "test.csv")
  lazy val fw = new FileWriter(path, true)

  
  override def alarm(msg: AlertMessage): AlertResp = {
    fw.write(msg.toCsv() + "\n")
    AlertResp.success("")
  }

  override def finalAlarm(msg: AlertMessage): AlertResp = {
    val content = msg.toCsv()
    if (!content.isEmpty) {
      fw.write(content + "\n")
    }
    fw.flush()
    fw.close()
    AlertResp.success("")
  }
}

object CsvAlarm {
  val PATH = "path"
} 
Example 47
Source File: OutputCategoryList.scala    From jigg   with Apache License 2.0 5 votes vote down vote up
package jigg.nlp.ccg



import java.io.FileWriter

import scala.collection.mutable.ArrayBuffer
import scala.sys.process.Process
import scala.collection.mutable.HashMap

import lexicon._

import breeze.config.CommandLineParser

object OutputCategoryList {

  case class Params(
    bank: Opts.BankInfo,
    dict: Opts.DictParams
  )

  case class CategoryInfo(sentence: GoldSuperTaggedSentence, position: Int, num: Int = 1) {
    def increment(): CategoryInfo = this.copy(num = num + 1)
    def replace(_sentence: GoldSuperTaggedSentence, _p: Int) =
      CategoryInfo(_sentence, _p, num + 1)
  }

  def main(args:Array[String]) = {

    val params = CommandLineParser.readIn[Params](args)

    val dict = new JapaneseDictionary(params.dict.categoryDictinoary)
    val bank = CCGBank.select(params.bank, dict)

    val trainSentences: Array[GoldSuperTaggedSentence] = bank.trainSentences

    val stats = new HashMap[Category, CategoryInfo]

    trainSentences foreach { sentence =>
      (0 until sentence.size) foreach { i =>
        val cat = sentence.cat(i)
        stats.get(cat) match {
          case Some(info) =>
            if (sentence.size > info.sentence.size)
              stats += ((cat, info.replace(sentence, i)))
            else
              stats += ((cat, info.increment()))
          case None => stats += ((cat, CategoryInfo(sentence, i)))
          case _ =>
        }
      }
    }
    def highlight(sentence: Sentence, i: Int) = {
      val tokens = sentence.wordSeq
      // tokens.take(i).mkString("") + s"\\x1b[1;31m{${tokens(i)}}\\x1b[0m" + tokens.drop(i+1).mkString("")
      tokens.slice(i-5, i).mkString("") + s"${tokens(i)}" + tokens.slice(i+1, i+6).mkString("")
    }

    var fw = new FileWriter("./category.lst")
    stats.toSeq.sortBy(_._2.num).reverse.foreach {
      case (cat, CategoryInfo(sentence, i, num)) =>
        fw.write("%s\t%s\t%s\t%s\n"
          .format(num, cat, sentence.pos(i), highlight(sentence, i)))
    }
    fw.flush
    fw.close

    val noFeatureCategories = new HashMap[String, CategoryInfo]
    stats foreach { case (cat, CategoryInfo(sentence, i, numWithFeat)) =>
      val noFeature = cat.toStringNoFeature
      noFeatureCategories.get(noFeature) match {
        case Some(exist) =>
          val newNum = numWithFeat + exist.num
          val newInfo = exist.copy(num = newNum)
          noFeatureCategories += (noFeature -> newInfo)
        case None =>
          noFeatureCategories += (noFeature -> CategoryInfo(sentence, i, numWithFeat))
        case _ =>
      }
    }

    fw = new FileWriter("./category.nofeature.lst")
    noFeatureCategories.toSeq.sortBy(_._2.num).reverse.foreach {
      case (cat, CategoryInfo(sentence, i, num)) =>
        fw.write("%s\t%s\t%s\t%s\n"
          .format(num, cat, sentence.pos(i), highlight(sentence, i)))
    }
    fw.flush
    fw.close
  }
} 
Example 48
Source File: SshClientSpec.scala    From scala-ssh   with Apache License 2.0 5 votes vote down vote up
package com.decodified.scalassh

import org.specs2.Specification
import java.io.File
import java.io.FileWriter
import io.Source
import Source.{ fromFile ⇒ open }
import org.specs2.execute.{ Failure, FailureException }

class SshClientSpec extends Specification {
  sequential

  def is =
    "The SshClient should be able to" ^
      "properly connect to the test host and fetch a directory listing" ! simpleTest ^
      "properly connect to the test host and execute three independent commands" ! threeCommandsTest ^
      "properly upload to the test host" ! fileUploadTest ^
      "properly download to the test host" ! fileDownloadTest

  def simpleTest = {
    SSH(testHostName) { client ⇒
      client.exec("ls -a").right.map { result ⇒
        result.stdOutAsString() + "|" + result.stdErrAsString()
      }
    }.right.get must startWith(".\n..\n")
  }

  def threeCommandsTest = {
    SSH(testHostName) { client ⇒
      client.exec("ls").right.flatMap { res1 ⇒
        println("OK 1")
        client.exec("dfssgsdg").right.flatMap { res2 ⇒
          println("OK 2")
          client.exec("uname").right.map { res3 ⇒
            println("OK 3")
            (res1.exitCode, res2.exitCode, res3.exitCode)
          }
        }
      }
    } mustEqual Right((Some(0), Some(127), Some(0)))
  }

  def fileUploadTest = {
    val testFile = make(new File(testFileName)) { file ⇒
      val writer = new FileWriter(file)
      writer.write(testText)
      writer.close()
    }

    SSH(testHostName) { client ⇒
      try client.upload(testFile.getAbsolutePath, testFileName).right.flatMap { _ ⇒
        client.exec("cat " + testFileName).right.map { result ⇒
          testFile.delete()
          result.stdOutAsString()
        }
      }
      finally client.close()
    } mustEqual Right(testText)
  }

  def fileDownloadTest = {
    SSH(testHostName) { client ⇒
      try client.download(testFileName, testFileName).right.map { _ ⇒
        make(open(testFileName).getLines.mkString) { _ ⇒
          new File(testFileName).delete()
        }
      }
      finally client.close()
    } mustEqual Right(testText)
  }

  lazy val testFileName = "testUpload.txt"
  lazy val testText = "Hello, Scala SSH!"

  lazy val testHostName = {
    val fileName = HostFileConfig.DefaultHostFileDir + File.separator + ".testhost"
    try {
      Source.fromFile(fileName).getLines().toList.head
    } catch {
      case e: Exception ⇒ throw FailureException(Failure(("Could not find file '%s', you need to create it holding " +
        "nothing but the name of the test host you would like to run your tests against!").format(fileName), e.toString))
    }
  }
} 
Example 49
Source File: JsonFileTest.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.server

import java.io.{BufferedWriter, FileWriter}

import org.specs2.matcher.DisjunctionMatchers.be_\/-
import org.specs2.mutable.Specification

class JsonFileTest extends Specification {

  val exampleJson =
    """
      |[
      |  {
      |    "label": "bedroom1-sensor-1",
      |    "data": [
      |      {
      |        "x": 1507709610000,
      |        "y": "NaN"
      |      },
      |      {
      |        "x": 1507709640000,
      |        "y": "+2.2062500000E01"
      |      },
      |      {
      |        "x": 1507709680000,
      |        "y": "+2.2262500000E01"
      |      }
      |    ]
      |  }
      |]
    """.stripMargin

  "Load a file" >> {
    createFile()
    JsonFile.load must be_\/-(exampleJson)
  }
  
  private def createFile() = {
    val writer = new BufferedWriter(new FileWriter(JsonFile.file))
    writer.write(exampleJson)
    writer.close()
  }

} 
Example 50
Source File: Xml.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.rrd

import java.io.{BufferedWriter, FileWriter}

import bad.robot.temperature.{FileOps, Files, JsonOps, encode}
import bad.robot.temperature.rrd.ChartJson._
import org.rrd4j.ConsolFun._
import org.rrd4j.core.RrdDb
import bad.robot.temperature.Files._
import scala.collection.JavaConverters._
import scala.xml.{Elem, XML}

case class Xml(xml: Elem) {
  def exportXml(filename: String) = {
    XML.save(Files.path / filename, xml)
  }

  def exportJson(filename: String) = {
    val writer = new BufferedWriter(new FileWriter(Files.path / filename))
    writer.write(toJson())
    writer.close()
  }

  def toJson(): String = {
    val series = parse(xml)
    encode(series).spaces2ps
  }

}

object Xml {
  def apply(start: Seconds, end: Seconds, hosts: List[Host]): Xml = {
    val database = new RrdDb(RrdFile.file)
    val request = database.createFetchRequest(AVERAGE, start, end)
    val sensors = for {
      host   <- hosts
      sensor <- 1 to RrdFile.MaxSensors
    } yield {
      s"${host.name}-sensor-$sensor"
    }
    request.setFilter(nonEmpty(sensors, database).asJava)
    val data = request.fetchData()
    val xml = data.exportXml()
    new Xml(XML.loadString(xml))
  }

  def nonEmpty(sensors: List[String], database: RrdDb) = sensors.filter(database.hasValuesFor).toSet

} 
Example 51
Source File: HTMLReportGenerator.scala    From regressr   with Apache License 2.0 5 votes vote down vote up
package org.ebayopensource.regression.internal.reportGenerator

import java.io.{BufferedWriter, File, FileWriter}

import org.fusesource.scalate.{TemplateEngine, TemplateSource}

import scala.io.Source
import scala.util.{Failure, Success, Try}


class HTMLReportGenerator extends ReportGenerator {

  val scalateEngine = new TemplateEngine

  def getContent(reportEntries: Seq[ReportEntry]) : Try[String] = Try {

    if (reportEntries.size==0) {
      throw new IllegalArgumentException("Cannot generate report with 0 reportEntries.")
    }

    val templateText = Source.fromInputStream(getClass.getResourceAsStream("/report/index.html")).mkString
    scalateEngine.escapeMarkup = false

    val regressionCount :Seq[Int] = reportEntries.flatMap {
      reportEntry => {
        reportEntry.requestReportEntries.map {
          requestReportEntry => {
            requestReportEntry.reqMessages.size
          }
        }
      }
    }

    val renderedContent = scalateEngine.layout(TemplateSource.fromText("/com/ebay/n/regression/text.ssp", templateText),
      Map("reportEntries" -> reportEntries, "regressionCount" -> regressionCount.sum))
    renderedContent
  }

  def writeAndGetFile(content: String, reportFilePath: String) : Try[File] = Try {
    val outputFile = new File(reportFilePath)
    val bw = new BufferedWriter(new FileWriter(outputFile))
    bw.write(content)
    bw.close()
    outputFile
  }

  override def generate(reportEntries: Seq[ReportEntry], reportFilePath: String): Try[File] = Try {
    getContent(reportEntries).flatMap {
      content => writeAndGetFile(content, reportFilePath)
    } match {
      case Success(file) => file
      case Failure(t) => throw t
    }
  }
} 
Example 52
Source File: WhiskConfigTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core

import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter

import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner

import common.StreamLogging

@RunWith(classOf[JUnitRunner])
class WhiskConfigTests extends FlatSpec with Matchers with StreamLogging {

  behavior of "WhiskConfig"

  it should "get required property" in {
    val config = new WhiskConfig(WhiskConfig.edgeHost)
    assert(config.isValid)
    assert(config.edgeHost.nonEmpty)
  }

  it should "be valid when a prop file is provided defining required props" in {
    val file = File.createTempFile("cxt", ".txt")
    file.deleteOnExit()

    val bw = new BufferedWriter(new FileWriter(file))
    bw.write("a=A\n")
    bw.close()

    val config = new WhiskConfig(Map("a" -> null), Set.empty, file)
    assert(config.isValid && config("a") == "A")
  }

  it should "not be valid when a prop file is provided but does not define required props" in {
    val file = File.createTempFile("cxt", ".txt")
    file.deleteOnExit()

    val bw = new BufferedWriter(new FileWriter(file))
    bw.write("a=A\n")
    bw.close()

    val config = new WhiskConfig(Map("a" -> null, "b" -> null), Set.empty, file)
    assert(!config.isValid && config("b") == null)
  }

  it should "be valid when a prop file is provided defining required props and optional properties" in {
    val file = File.createTempFile("cxt", ".txt")
    file.deleteOnExit()

    val bw = new BufferedWriter(new FileWriter(file))
    bw.write("a=A\n")
    bw.write("b=B\n")
    bw.write("c=C\n")
    bw.close()

    val config = new WhiskConfig(Map("a" -> null, "b" -> "???"), Set("c", "d"), file, env = Map.empty)
    assert(config.isValid && config("a") == "A" && config("b") == "B")
    assert(config("c") == "C")
    assert(config("d") == "")
    assert(config("a", "c") == "C")
    assert(config("a", "d") == "A")
    assert(config("d", "a") == "A")
    assert(config("c", "a") == "A")
  }
} 
Example 53
Source File: ApiGwRestEndToEndTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package apigw.healthtests

import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter

import akka.http.scaladsl.model.StatusCodes.OK

import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner

import common.TestUtils._
import common.rest.WskRestOperations
import common.rest.RestResult
import common.WskActorSystem

@RunWith(classOf[JUnitRunner])
class ApiGwRestEndToEndTests extends ApiGwEndToEndTests with WskActorSystem {

  override lazy val wsk = new WskRestOperations
  override val createCode = OK.intValue

  override def verifyAPICreated(rr: RunResult): Unit = {
    val apiResultRest = rr.asInstanceOf[RestResult]
    apiResultRest.statusCode shouldBe OK
    val apiurl = apiResultRest.getField("gwApiUrl") + "/path"
    println(s"apiurl: '$apiurl'")
  }

  override def verifyAPIList(rr: RunResult,
                             actionName: String,
                             testurlop: String,
                             testapiname: String,
                             testbasepath: String,
                             testrelpath: String): Unit = {
    val apiResultRest = rr.asInstanceOf[RestResult]
    val apiValue = RestResult.getFieldJsObject(apiResultRest.getFieldListJsObject("apis")(0), "value")
    val apidoc = RestResult.getFieldJsObject(apiValue, "apidoc")
    val basepath = RestResult.getField(apidoc, "basePath")
    basepath shouldBe testbasepath

    val paths = RestResult.getFieldJsObject(apidoc, "paths")
    paths.fields.contains(testrelpath) shouldBe true

    val info = RestResult.getFieldJsObject(apidoc, "info")
    val title = RestResult.getField(info, "title")
    title shouldBe testapiname

    val relpath = RestResult.getFieldJsObject(paths, testrelpath)
    val urlop = RestResult.getFieldJsObject(relpath, testurlop)
    val openwhisk = RestResult.getFieldJsObject(urlop, "x-openwhisk")
    val actionN = RestResult.getField(openwhisk, "action")
    actionN shouldBe actionName
  }

  override def verifyAPISwaggerCreated(rr: RunResult): Unit = {
    val apiResultRest = rr.asInstanceOf[RestResult]
    apiResultRest.statusCode shouldBe OK
  }

  override def writeSwaggerFile(rr: RunResult): File = {
    val swaggerfile = File.createTempFile("api", ".json")
    swaggerfile.deleteOnExit()
    val bw = new BufferedWriter(new FileWriter(swaggerfile))
    val apiResultRest = rr.asInstanceOf[RestResult]
    val apiValue = RestResult.getFieldJsObject(apiResultRest.getFieldListJsObject("apis")(0), "value")
    val apidoc = RestResult.getFieldJsObject(apiValue, "apidoc")
    bw.write(apidoc.toString())
    bw.close()
    swaggerfile
  }

  override def getSwaggerApiUrl(rr: RunResult): String = {
    val apiResultRest = rr.asInstanceOf[RestResult]
    apiResultRest.getField("gwApiUrl") + "/path"
  }
} 
Example 54
Source File: WriteSupport.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor.utils

import java.io.{BufferedWriter, File, FileWriter, Writer}

trait WriteSupport {

  def withWriter(path: String)(f: Writer => Unit): Unit ={
    var writer: Writer = null
    try {
      val file = new File(path)
      if (!file.exists()) file.createNewFile()
      writer = new BufferedWriter(new FileWriter(file))
      f(writer)
      writer.flush()
    } finally {
      if (writer != null) writer.close()
    }
  }
} 
Example 55
Source File: gen4tests.scala    From tscfg   with Apache License 2.0 5 votes vote down vote up
package tscfg

import java.io.{File, FileWriter, PrintWriter}

import tscfg.generators.java.JavaGen
import tscfg.generators.{GenOpts, Generator}
import tscfg.generators.scala.ScalaGen

object gen4tests {
  def main(args: Array[String]): Unit = {
    val sourceDir = new File("src/main/tscfg/example")
    sourceDir.listFiles().filter {
      _.getName.endsWith(".spec.conf")
    } foreach generate
  }

  private def generate(confFile: File): Unit = {
    //println(s"gen4tests: confFile=$confFile")

    val bufSource = io.Source.fromFile(confFile)
    val source = bufSource.mkString
    bufSource.close

    val opts = {
      val linePat = """\s*//\s*GenOpts:(.*)""".r
      source.split("\n")
        .collect { case linePat(xs) => xs.trim }
        .flatMap(_.split("\\s+"))
    }
    if (opts.contains("--skip-gen4tests")) {
      println(s"gen4tests: skipping $confFile")
      return
    }

    val baseGenOpts: GenOpts = {
      var genOpts = GenOpts("tscfg.example", "?")
      opts foreach {
        case "--scala:2.12"      => genOpts = genOpts.copy(s12 = true)
        case "--scala:bt"        => genOpts = genOpts.copy(useBackticks = true)
        case "--java:getters"    => genOpts = genOpts.copy(genGetters = true)
        case "--java:optionals"  => genOpts = genOpts.copy(useOptionals = true)
        case "--durations"       => genOpts = genOpts.copy(useDurations = true)
        case "--all-required"    => genOpts = genOpts.copy(assumeAllRequired = true)

        // $COVERAGE-OFF$
        case opt => println(s"WARN: $confFile: unrecognized GenOpts argument: `$opt'")
        // $COVERAGE-ON$
      }
      genOpts
    }

    val buildResult = ModelBuilder(source, assumeAllRequired = baseGenOpts.assumeAllRequired)
    val objectType = buildResult.objectType

    val name = confFile.getName
    val (base, _) = name.span(_ != '.')
    val classNameSuffix = util.upperFirst(base.replace('-', '_')) + "Cfg"

    List("Scala", "Java") foreach { lang =>
      val targetScalaDir = new File("src/test/" + lang.toLowerCase + "/tscfg/example")
      targetScalaDir.mkdirs()

      val className = lang + classNameSuffix

      val fileName = className + "." + lang.toLowerCase
      val targetFile = new File(targetScalaDir, fileName)
      // $COVERAGE-OFF$
      if (true||confFile.lastModified >= targetFile.lastModified) {
        val genOpts = baseGenOpts.copy(className = className)
        //println(s"generating for $name -> $fileName")
        val generator: Generator = lang match {
          case "Scala" => new ScalaGen(genOpts)
          case "Java" =>  new JavaGen(genOpts)
        }

        val results = generator.generate(objectType)
        val out = new PrintWriter(new FileWriter(targetFile), true)
        out.println(results.code)
      }
      // $COVERAGE-ON$
    }
  }
} 
Example 56
Source File: WriteOpenApi.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash
package rest

import java.io.FileWriter

import com.avsystem.commons.serialization.json.{JsonOptions, JsonStringOutput}
import io.udash.rest.openapi.{Info, Server}

object WriteOpenApi {
  def main(args: Array[String]): Unit = {
    val openapi = RestTestApi.openapiMetadata.openapi(
      Info("Test API", "0.1", description = "Some test REST API"),
      servers = List(Server("http://localhost"))
    )
    val fw = new FileWriter("/home/ghik/api.js")
    fw.write("apiSpec = ")
    fw.write(JsonStringOutput.write(openapi, JsonOptions.Pretty))
    fw.close()
  }
} 
Example 57
Source File: Painter.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.painter

import java.awt.Font
import java.io.{File, FileWriter}

import org.jfree.chart.{ChartFactory, StandardChartTheme}
import org.jfree.data.general.Dataset

abstract class Painter(dataPath: String, picturePath: String) {
  initialize()
  var fw: FileWriter = _

  def initialize(): Unit = {
    val dataFile = new File(dataPath)
    if (dataFile.exists()) {
      dataFile.delete()
    }
    fw = new FileWriter(dataPath, true)
    val standardChartTheme = new StandardChartTheme("CN")
    standardChartTheme.setExtraLargeFont(new Font("Monospaced", Font.BOLD, 20))
    standardChartTheme.setRegularFont(new Font("Monospaced", Font.PLAIN, 15))
    standardChartTheme.setLargeFont(new Font("Monospaced", Font.PLAIN, 15))
    ChartFactory.setChartTheme(standardChartTheme)
  }

  def addPoint(xAxis: Any, yAxis: Any): Unit = {
    fw.write(s"${xAxis},${yAxis}\n")
  }

  def addPoint(xAxis: Any, yAxis: Any, zAxis: Any): Unit = {
    fw.write(s"${xAxis},${yAxis},${zAxis}\n")
  }

  def createDataset(): Dataset

  def paint(
      width: Int,
      height: Int,
      chartTitle: String,
      categoryAxisLabel: String,
      valueAxisLabel: String): Unit
} 
Example 58
Source File: CCGBankToCabochaFormat.scala    From jigg   with Apache License 2.0 5 votes vote down vote up
package jigg.nlp.ccg


object CCGBankToCabochaFormat {

  case class Opts(
    @Help(text="Path to CCGBank file") ccgbank: File = new File(""),
    @Help(text="Path to output") output: File = new File(""),
    @Help(text="Cabocha command (path to cabocha)") cabocha: String = "cabocha"
  )

  type Tree = ParseTree[NodeLabel]

  def main(args:Array[String]) = {
    val opts = CommandLineParser.readIn[Opts](args)

    val dict = new JapaneseDictionary()
    val extractors = TreeExtractor(
      new JapaneseParseTreeConverter(dict),
      new CCGBankReader)

    val trees = extractors.readTrees(opts.ccgbank, -1, true)
    val rawString = trees map (extractors.treeConv.toSentenceFromLabelTree) map (_.wordSeq.mkString("")) mkString ("\n")
    val is = new java.io.ByteArrayInputStream(rawString.getBytes("UTF-8"))
    val out = (Process(s"${opts.cabocha} -f1") #< is).lineStream_!

    val os = jigg.util.IOUtil.openOut(opts.output.getPath)
    out foreach { line =>
      os.write(line + "\n")
    }
    os.flush
    os.close
  }
} 
Example 59
Source File: Preprocess.scala    From Scala-Machine-Learning-Projects   with MIT License 5 votes vote down vote up
package com.packt.ScalaML.BitCoin

import java.io.{ BufferedWriter, File, FileWriter }
import org.apache.spark.sql.types.{ DoubleType, IntegerType, StructField, StructType }
import org.apache.spark.sql.{ DataFrame, Row, SparkSession }
import scala.collection.mutable.ListBuffer

object Preprocess {
  //how many of first rows are omitted
    val dropFirstCount: Int = 612000

    def rollingWindow(data: DataFrame, window: Int, xFilename: String, yFilename: String): Unit = {
      var i = 0
      val xWriter = new BufferedWriter(new FileWriter(new File(xFilename)))
      val yWriter = new BufferedWriter(new FileWriter(new File(yFilename)))

      val zippedData = data.rdd.zipWithIndex().collect()
      System.gc()
      val dataStratified = zippedData.drop(dropFirstCount) //todo slice fisrt 614K
      while (i < (dataStratified.length - window)) {
        val x = dataStratified
          .slice(i, i + window)
          .map(r => r._1.getAs[Double]("Delta")).toList
        val y = dataStratified.apply(i + window)._1.getAs[Integer]("label")
        val stringToWrite = x.mkString(",")
        xWriter.write(stringToWrite + "\n")
        yWriter.write(y + "\n")

        i += 1
        if (i % 10 == 0) {
          xWriter.flush()
          yWriter.flush()
        }
      }

      xWriter.close()
      yWriter.close()
    }
    
  def main(args: Array[String]): Unit = {
    //todo modify these variables to match desirable files
    val priceDataFileName: String = "C:/Users/admin-karim/Desktop/bitstampUSD_1-min_data_2012-01-01_to_2017-10-20.csv/bitstampUSD_1-min_data_2012-01-01_to_2017-10-20.csv"
    val outputDataFilePath: String = "output/scala_test_x.csv"
    val outputLabelFilePath: String = "output/scala_test_y.csv"

    val spark = SparkSession
      .builder()
      .master("local[*]")
      .config("spark.sql.warehouse.dir", "E:/Exp/")
      .appName("Bitcoin Preprocessing")
      .getOrCreate()

    val data = spark.read.format("com.databricks.spark.csv").option("header", "true").load(priceDataFileName)
    data.show(10)
    println((data.count(), data.columns.size))

    val dataWithDelta = data.withColumn("Delta", data("Close") - data("Open"))

    import org.apache.spark.sql.functions._
    import spark.sqlContext.implicits._

    val dataWithLabels = dataWithDelta.withColumn("label", when($"Close" - $"Open" > 0, 1).otherwise(0))
    rollingWindow(dataWithLabels, 22, outputDataFilePath, outputLabelFilePath)    
    spark.stop()
  }
} 
Example 60
Source File: RedisBenchmarks.scala    From spark-redis   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.redislabs.provider.redis

import java.io.{File, FileWriter, PrintWriter}
import java.time.{Duration => JDuration}

import com.redislabs.provider.redis.util.Logging


trait RedisBenchmarks extends Logging {

  val benchmarkReportDir = new File("target/reports/benchmarks/")
  benchmarkReportDir.mkdirs()

  def time[R](tag: String)(block: => R): R = {
    val t0 = System.nanoTime()
    val result = block // call-by-name
    val t1 = System.nanoTime()
    new PrintWriter(new FileWriter(s"$benchmarkReportDir/results.txt", true)) {
      // scalastyle:off
      this.println(s"$tag, ${JDuration.ofNanos(t1 - t0)}")
      close()
    }
    result
  }
} 
Example 61
Source File: ConfigSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.io.{BufferedWriter, File, FileWriter}
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.app.HarnessActor.ConfigChange
import com.webtrends.harness.config.ConfigWatcherActor
import com.webtrends.harness.health.{ComponentState, HealthComponent}
import com.webtrends.harness.service.messages.CheckHealth
import org.specs2.mutable.SpecificationWithJUnit

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
import scala.reflect.io.{Directory, Path}

class ConfigSpec extends SpecificationWithJUnit {
  implicit val dur = FiniteDuration(2, TimeUnit.SECONDS)
  new File("services/test/conf").mkdirs()
  implicit val sys = ActorSystem("system", ConfigFactory.parseString( """
    akka.actor.provider = "akka.actor.LocalActorRefProvider"
    services { path = "services" }
    """).withFallback(ConfigFactory.load))

  implicit val ec: ExecutionContextExecutor =  sys.dispatcher

  val probe = TestProbe()
  val parent = sys.actorOf(Props(new Actor {
    val child = context.actorOf(ConfigWatcherActor.props, "child")
    def receive = {
      case x if sender == child => probe.ref forward x
      case x => child forward x
    }
  }))

  sequential

  "config " should {
    "be in good health" in {
      probe.send(parent, CheckHealth)
      val msg = probe.expectMsgClass(classOf[HealthComponent])
      msg.state equals ComponentState.NORMAL
    }

    "detect changes in config" in {
      val file = new File("services/test/conf/test.conf")
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write("test = \"value\"")
      bw.close()
      val msg = probe.expectMsgClass(classOf[ConfigChange])
      msg.isInstanceOf[ConfigChange]
    }
  }

  step {
    sys.terminate().onComplete { _ =>
        Directory(Path(new File("services"))).deleteRecursively()
    }
  }
} 
Example 62
Source File: ForgerBoxMerklePathInfoTest.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.validation

import java.io.{BufferedReader, BufferedWriter, FileReader, FileWriter}
import java.lang.{Byte => JByte}
import java.util
import java.util.{ArrayList => JArrayList}

import com.horizen.box.ForgerBox
import com.horizen.fixtures.BoxFixture
import com.horizen.utils.{BytesUtils, ForgerBoxMerklePathInfo, ForgerBoxMerklePathInfoSerializer, MerklePath, Pair}
import com.horizen.vrf.VrfGeneratedDataProvider
import org.junit.Assert.{assertEquals, assertNotEquals, assertTrue}
import org.junit.Test
import org.scalatest.junit.JUnitSuite

class ForgerBoxMerklePathInfoTest extends JUnitSuite with BoxFixture {
  val vrfGenerationSeed = 907
  val vrfGenerationPrefix = "ForgerBoxMerklePathInfoTest"

  //uncomment if you want update vrf related data
  if (false) {
    VrfGeneratedDataProvider.updateVrfPublicKey(vrfGenerationPrefix, vrfGenerationSeed)
  }

  val forgerBox: ForgerBox = getForgerBox(
    getPrivateKey25519("123".getBytes()).publicImage(),
    1000L,
    100L,
    getPrivateKey25519("456".getBytes()).publicImage(),
    VrfGeneratedDataProvider.getVrfPublicKey(vrfGenerationPrefix, vrfGenerationSeed)
  )
  val emptyMerklePath: MerklePath = new MerklePath(new JArrayList())

  val nonEmptyMerklePath: MerklePath = new MerklePath(util.Arrays.asList(
    new Pair[JByte, Array[Byte]](0.toByte, BytesUtils.fromHexString("29d000eee85f08b6482026be2d92d081d6f9418346e6b2e9fe2e9b985f24ed1e")),
    new Pair[JByte, Array[Byte]](1.toByte, BytesUtils.fromHexString("61bfbdf7038dc7f21e2bcf193faef8e6caa8222af016a6ed86b9e9d860f046df"))
  ))

  @Test
  def comparison(): Unit = {
    assertNotEquals("Box merkle path info expected to be different.", emptyMerklePath, nonEmptyMerklePath)
  }

  @Test
  def serialization(): Unit = {
    // Test 1: empty merkle path (single element in merkle tree)
    val boxWithEmptyPath = ForgerBoxMerklePathInfo(forgerBox, emptyMerklePath)
    var boxBytes = boxWithEmptyPath.bytes
    var deserializedBox = ForgerBoxMerklePathInfoSerializer.parseBytes(boxBytes)
    assertEquals("Deserialized box merkle path info hashCode expected to be equal to the original one.", boxWithEmptyPath.hashCode(), deserializedBox.hashCode())
    assertEquals("Deserialized box merkle path info expected to be equal to the original one.", boxWithEmptyPath, deserializedBox)


    // Test 2: non empty merkle path
    val boxWithNonEmptyPath = ForgerBoxMerklePathInfo(forgerBox, nonEmptyMerklePath)
    boxBytes = boxWithNonEmptyPath.bytes
    deserializedBox = ForgerBoxMerklePathInfoSerializer.parseBytes(boxBytes)
    assertEquals("Deserialized box merkle path info hashCode expected to be equal to the original one.", boxWithNonEmptyPath.hashCode(), deserializedBox.hashCode())
    assertEquals("Deserialized box merkle path info expected to be equal to the original one.", boxWithNonEmptyPath, deserializedBox)

    // Set to true and run if you want to update regression data.
    if (false) {
      val out = new BufferedWriter(new FileWriter("src/test/resources/boxmerklepathinfo_hex"))
      out.write(BytesUtils.toHexString(boxBytes))
      out.close()
    }

    // Test 3: try to deserialize broken bytes.
    assertTrue("ForgerBoxMerklePathInfo expected to be not parsed due to broken data.", ForgerBoxMerklePathInfoSerializer.parseBytesTry("broken bytes".getBytes).isFailure)
  }

  @Test
  def serializationRegression(): Unit = {
    var bytes: Array[Byte] = null
    try {
      val classLoader = getClass.getClassLoader
      val file = new FileReader(classLoader.getResource("boxmerklepathinfo_hex").getFile)
      bytes = BytesUtils.fromHexString(new BufferedReader(file).readLine())
    }
    catch {
      case e: Exception =>
        fail(e.toString)
    }

    val boxMerklePathInfoTry = ForgerBoxMerklePathInfoSerializer.parseBytesTry(bytes)
    assertTrue("ForgerBoxMerklePathInfo expected to by parsed.", boxMerklePathInfoTry.isSuccess)

    val boxWithNonEmptyPath = ForgerBoxMerklePathInfo(forgerBox, nonEmptyMerklePath)
    assertEquals("Parsed info is different to original.", boxWithNonEmptyPath, boxMerklePathInfoTry.get)
  }
} 
Example 63
Source File: TupleWriterFile.scala    From sddf   with GNU General Public License v3.0 5 votes vote down vote up
package de.unihamburg.vsis.sddf.writing

import java.io.File
import java.io.FileWriter

import org.apache.spark.rdd.RDD

import com.opencsv.CSVWriter

import de.unihamburg.vsis.sddf.reading.Tuple


class TupleWriterFile(file: File, separator: Char = ',') {

  val writer = new CSVWriter(new FileWriter(file), separator);

  def writeTuple[A <: Tuple](tuple: A): Unit = {
    writer.writeNext(tuple.id.toString +: tuple.toSeq.map(_._2).toArray)
  }

  def close() = {
	  writer.close()
  }
  
  def blankLine() = {
    writer.writeNext(Array())
  }
  
  def writeTuple[A <: Tuple](tuples: Traversable[A]): Unit = {
    tuples.foreach(tuple => {
      writer.writeNext(tuple.id.toString +: tuple.toSeq.map(_._2).toArray)
    })
  }

  def writeTuple[A <: Tuple](tuples: RDD[A]): Unit = {
    val collectedTuples = tuples.collect()
    collectedTuples.foreach(tuple => {
      writer.writeNext(tuple.id.toString +: tuple.toSeq.map(_._2).toArray)
    })
  }
} 
Example 64
Source File: ClusterWriterCsvFile.scala    From sddf   with GNU General Public License v3.0 5 votes vote down vote up
package de.unihamburg.vsis.sddf.writing

import java.io.File
import java.io.FileWriter

import org.apache.spark.rdd.RDD

import com.opencsv.CSVWriter

import de.unihamburg.vsis.sddf.reading.Tuple

class ClusterWriterCsvFile(file: File, separator: Char = ',') {

  // create folders
  file.getParentFile().mkdirs()

  def this(path: String) = {
    this(new File(path))
  }

  def this(folder: String, file: String) = {
    this(new File(folder, file))
  }

  def write(clusterRdd: RDD[Set[Tuple]]): Unit = {
    val collectedClusters = clusterRdd.collect()
    val writer = new CSVWriter(new FileWriter(file), separator);
    // feed in your array (or convert your data to an array)
    collectedClusters.foreach(set => {
      val tupleIdSet: Set[String] = set.map(tuple => tuple.id.toString())
      val tupleIdArray: Array[String] = tupleIdSet.toArray
      writer.writeNext(tupleIdArray)
    })
    writer.close()
  }
  
} 
Example 65
Source File: TablesInitiation.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.identities.slick.tables

import java.io.{File, FileWriter}

import play.api.{Configuration, Logger}
import play.api.db.slick.HasDatabaseConfigProvider
import slick.lifted.TableQuery
import com.dataengi.crm.common.context.types._
import com.dataengi.crm.common.extensions.awaits._
import com.dataengi.crm.common.extensions.logging._

import scala.concurrent.ExecutionContext

trait TablesInitiation extends HasDatabaseConfigProvider[slick.jdbc.JdbcProfile] {

  implicit val executionContext: ExecutionContext

  val configuration: Configuration

  val DDLTag: String = super.getClass.getSimpleName

  def printMigrationDDL(tables: List[slick.lifted.TableQuery[_ <: slick.relational.RelationalProfile#Table[_]]]) = {
    val allowPrintDDL = configuration.getOptional[Boolean]("play.db.ddl.print").getOrElse(false)
    Logger.info(s"[initiate-table][print-ddl] allowPrintDDL=$allowPrintDDL")
    if (allowPrintDDL) {
      createDDL(tables, DDLTag)
    }
  }

  private def createDDL(tables: List[TableQuery[_ <: slick.relational.RelationalProfile#Table[_]]], DDLPath: String) = {
    import profile.api._
    val schema    = tables.map(_.schema).reduce(_ ++ _)
    val directory = new File(s"./db/statements/${DDLPath}/")
    if (!directory.exists()) directory.mkdirs()
    val migrationFile = new File(directory.getPath + "/migration_ddl.sql")
    val writer        = new FileWriter(migrationFile.getAbsoluteFile)
    writer.write("# --- !Ups\n\n")
    schema.createStatements.foreach { s =>
      writer.write(s + ";\n")
    }
    writer.write("\n\n# --- !Downs\n\n")
    schema.dropStatements.foreach { s =>
      writer.write(s + ";\n")
    }
    writer.close()
  }

  def createTables(tables: List[slick.lifted.TableQuery[_ <: slick.relational.RelationalProfile#Table[_]]]) = {
    import profile.api._
    val allowCreateTables     = configuration.getOptional[Boolean]("play.db.create.dynamic").getOrElse(false)
    val printStatementsTables = configuration.getOptional[Boolean]("play.db.print.statements").getOrElse(false)
    Logger.info(s"[initiate-table][create] allowCreateTables=$allowCreateTables")
    if (allowCreateTables) {
      val schema             = tables.map(_.schema).reduce(_ ++ _)
      val schemaCreateResult = db.run(schema.create).toOr.await()
      if (printStatementsTables) Logger.info(s"[initiate-table][${DDLTag}] create query: ${schema.create.statements}")
      if (!List("already exist", "not found").exists(schemaCreateResult.logResult.contains)) {
        Logger.info(s"[initiate-table][${DDLTag}] create tables: ${schemaCreateResult.logResult}")
      }
    }
  }

} 
Example 66
Source File: Predictor.scala    From low-rank-logic   with MIT License 5 votes vote down vote up
package uclmr.util

import java.io.FileWriter

import uclmr.hack.EntityHackNormalization
import uclmr.{DefaultIx, TensorKB}


object Predictor extends App {
  val pathToMatrix = args.lift(0).getOrElse("./data/out/bbc/serialized/")
  val outFile = args.lift(1).getOrElse("./data/out/bbc/predictions.txt")
  val relations = if (args.size > 2) args.tail else Array(
    "REL$/location/administrative_division/country",
    "REL$/base/biblioness/bibs_location/country",
    "REL$/location/location/contains",
    "REL$/people/person/nationality",
    "REL$/base/aareas/schema/administrative_area/administrative_parent",
    "REL$/location/country/first_level_divisions",
    "REL$/location/country/capital"
  )

  println("Loading db...")
  val kb = new TensorKB(100)
  kb.deserialize(pathToMatrix)

  println(kb.toInfoString)

  println("Predicting facts...")
  val predictions = relations.map(rel => rel -> kb.keys2
    .filterNot(t => kb.getFact(rel, t, DefaultIx).exists(_.train))
    .map(t => {
      (kb.prob(rel, t), t)
    }).sortBy(-_._1)
  ).toMap

  println("Reporting predictions...")

  if (true || args.size > 1) {

    val writer = new FileWriter(outFile)

    EntityHackNormalization.init()

    predictions.foreach(t => t._2.take(100).foreach { case (score, es) =>
      val Array(e1, e2) = es.toString.tail.init.split(",")
      val can1 = if (e1.startsWith("/m/")) EntityHackNormalization.getCanonical(e1) else e1
      val can2 = if (e2.startsWith("/m/")) EntityHackNormalization.getCanonical(e2) else e2

      writer.write(s"$score\t$e1\t$can1\t$e2\t$can2\t${ t._1 }\n")
    })
    writer.close()
  } else {
    predictions.foreach(t => t._2.take(100).foreach { case (score, es) =>
      val Array(e1, e2) = es.toString.tail.init.split(",")
      println(s"$score\t$e1\t$e2\t${ t._1 }")
    })
  }
} 
Example 67
Source File: DiskBlockManagerSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(rootDir0)
      Utils.deleteRecursively(rootDir1)
    } finally {
      super.afterAll()
    }
  }

  override def beforeEach() {
    super.beforeEach()
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(conf, deleteFilesOnStop = true)
  }

  override def afterEach() {
    try {
      diskBlockManager.stop()
    } finally {
      super.afterEach()
    }
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 68
Source File: LogPageSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker.ui

import java.io.{File, FileWriter}

import org.mockito.Mockito.{mock, when}
import org.scalatest.PrivateMethodTester

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.worker.Worker

class LogPageSuite extends SparkFunSuite with PrivateMethodTester {

  test("get logs simple") {
    val webui = mock(classOf[WorkerWebUI])
    val worker = mock(classOf[Worker])
    val tmpDir = new File(sys.props("java.io.tmpdir"))
    val workDir = new File(tmpDir, "work-dir")
    workDir.mkdir()
    when(webui.workDir).thenReturn(workDir)
    when(webui.worker).thenReturn(worker)
    when(worker.conf).thenReturn(new SparkConf())
    val logPage = new LogPage(webui)

    // Prepare some fake log files to read later
    val out = "some stdout here"
    val err = "some stderr here"
    val tmpOut = new File(workDir, "stdout")
    val tmpErr = new File(workDir, "stderr")
    val tmpErrBad = new File(tmpDir, "stderr") // outside the working directory
    val tmpOutBad = new File(tmpDir, "stdout")
    val tmpRand = new File(workDir, "random")
    write(tmpOut, out)
    write(tmpErr, err)
    write(tmpOutBad, out)
    write(tmpErrBad, err)
    write(tmpRand, "1 6 4 5 2 7 8")

    // Get the logs. All log types other than "stderr" or "stdout" will be rejected
    val getLog = PrivateMethod[(String, Long, Long, Long)]('getLog)
    val (stdout, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stdout", None, 100)
    val (stderr, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stderr", None, 100)
    val (error1, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "random", None, 100)
    val (error2, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "does-not-exist.txt", None, 100)
    // These files exist, but live outside the working directory
    val (error3, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stderr", None, 100)
    val (error4, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stdout", None, 100)
    assert(stdout === out)
    assert(stderr === err)
    assert(error1.startsWith("Error: Log type must be one of "))
    assert(error2.startsWith("Error: Log type must be one of "))
    assert(error3.startsWith("Error: invalid log directory"))
    assert(error4.startsWith("Error: invalid log directory"))
  }

  
  private def write(f: File, s: String): Unit = {
    val writer = new FileWriter(f)
    try {
      writer.write(s)
    } finally {
      writer.close()
    }
  }

} 
Example 69
Source File: WriteTSToFiles.scala    From scala-tsi   with MIT License 5 votes vote down vote up
package com.scalatsi.output

import java.io.{FileWriter, IOException}

import com.scalatsi.TypescriptType.TypescriptNamedType
import com.scalatsi.TypescriptTypeSerializer

import scala.util.Try

object WriteTSToFiles {
  def write(options: OutputOptions)(types: Set[TypescriptNamedType]): Unit = {
    try {
      val targetFile = options.targetFile
      val output     = TypescriptTypeSerializer.emits(options.styleOptions, types)

      Try {
        Option(targetFile.getParentFile).foreach(_.mkdirs())
        targetFile.createNewFile()
      } // createNewFile will continue if file exists
      .recover {
        case e: SecurityException =>
          reportFailure(s"Could not create file '$targetFile' due to JVM security stopping it", code = 2, e = e)
        case e: IOException => reportFailure(s"Could not create file '$targetFile' due to I/O problem", code = 2, e = e)
      }.get

      // TODO: For some reason scala.util.Using isn't working in 2.12, even though we have the compat library
      //      Using(new FileWriter(targetFile)) { writer =>
      //        writer.write(output)
      //      }.recover {
      //        case e: IOException => reportFailure(s"Could not write typescript to file '$targetFile' due to I/O problem", code = 2, e = e)
      //      }.get
      (for {
        writer <- Try(new FileWriter(targetFile))
        _ <- Try {
          try { writer.write(output) }
          finally { writer.close() }
        }
      } yield ()).recover {
        case e: IOException => reportFailure(s"Could not write typescript to file '$targetFile' due to I/O problem", code = 2, e = e)
      }.get

      ()
    } catch {
      case e: Throwable =>
        reportFailure(
          """Uncaught exception in scala-tsi output writer.
            |Please file a bug report at https://github.com/scala-tsi/scala-tsi/issues""".stripMargin,
          e = e
        )
    }
  }

  def reportFailure(msg: String, code: Int = 1, e: Throwable = null): Nothing = {
    require(code > 0, "Should exist with a non-zero exit code on failure")
    System.err.println(msg)
    Option(e).foreach(_.printStackTrace())
    // This will not stop SBT, and the non-zero exit will mark the task as unsuccessful
    sys.exit(code)
  }
} 
Example 70
Source File: TestS3.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi.s3

import java.io.{File, FileWriter}

import com.dimafeng.testcontainers.GenericContainer
import com.github.matsluni.akkahttpspi.testcontainers.TimeoutWaitStrategy
import com.github.matsluni.akkahttpspi.{AkkaHttpAsyncHttpService, BaseAwsClientTest}
import software.amazon.awssdk.auth.credentials.AnonymousCredentialsProvider
import software.amazon.awssdk.core.async.{AsyncRequestBody, AsyncResponseTransformer}
import software.amazon.awssdk.services.s3.{S3AsyncClient, S3Configuration}
import software.amazon.awssdk.services.s3.model._

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

class TestS3 extends BaseAwsClientTest[S3AsyncClient] {

  "Async S3 client" should {
    "create bucket" in {
      val bucketName = createBucket()
      val buckets = client.listBuckets().join
      buckets.buckets() should have size (1)
      buckets.buckets().asScala.toList.head.name() should be(bucketName)
    }

    "upload and download a file to a bucket" in {
      val bucketName = createBucket()
      val fileContent = 0 to 1000 mkString

      client.putObject(PutObjectRequest.builder().bucket(bucketName).key("my-file").contentType("text/plain").build(), AsyncRequestBody.fromString(fileContent)).join

      val result = client.getObject(GetObjectRequest.builder().bucket(bucketName).key("my-file").build(),
        AsyncResponseTransformer.toBytes[GetObjectResponse]()).join

      result.asUtf8String() should be(fileContent)
    }

    "multipart upload" ignore {
      val bucketName = createBucket()
      val randomFile = File.createTempFile("aws1", Random.alphanumeric.take(5).mkString)
      val fileContent = Random.alphanumeric.take(1000).mkString
      val fileWriter = new FileWriter(randomFile)
      fileWriter.write(fileContent)
      fileWriter.flush()
      val createMultipartUploadResponse = client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(bucketName).key("bar").contentType("text/plain").build()).join()

      val p1 = client.uploadPart(UploadPartRequest.builder().bucket(bucketName).key("bar").partNumber(1).uploadId(createMultipartUploadResponse.uploadId()).build(), randomFile.toPath).join
      val p2 = client.uploadPart(UploadPartRequest.builder().bucket(bucketName).key("bar").partNumber(2).uploadId(createMultipartUploadResponse.uploadId()).build(), randomFile.toPath).join

      client.completeMultipartUpload(CompleteMultipartUploadRequest
        .builder()
        .bucket(bucketName)
        .key("bar")
        .multipartUpload(CompletedMultipartUpload
          .builder()
          .parts(CompletedPart.builder().partNumber(1).eTag(p1.eTag()).build(), CompletedPart.builder().partNumber(2).eTag(p2.eTag()).build())
          .build())
        .uploadId(createMultipartUploadResponse.uploadId())
        .build()).join

      val result = client.getObject(GetObjectRequest.builder().bucket(bucketName).key("bar").build(),
        AsyncResponseTransformer.toBytes[GetObjectResponse]()).join
      result.asUtf8String() should be(fileContent + fileContent)
    }

  }

  def createBucket(): String = {
    val bucketName = Random.alphanumeric.take(7).map(_.toLower).mkString
    client.createBucket(CreateBucketRequest.builder().bucket(bucketName).build()).join
    bucketName
  }

  override def client: S3AsyncClient = {
    S3AsyncClient
      .builder()
      .serviceConfiguration(S3Configuration.builder().checksumValidationEnabled(false).build())
      .region(defaultRegion)
      .credentialsProvider(AnonymousCredentialsProvider.create)
      .endpointOverride(endpoint)
      .httpClient(new AkkaHttpAsyncHttpService().createAsyncHttpClientFactory().build())
      .build()
  }

  override def exposedServicePort: Int = 9090

  override lazy val container: GenericContainer = new GenericContainer(
    dockerImage = "adobe/s3mock:2.1.19",
    exposedPorts = Seq(exposedServicePort),
    waitStrategy = Some(TimeoutWaitStrategy(10 seconds))
  )
} 
Example 71
Source File: ITTestS3.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi.s3

import java.io.{File, FileWriter}

import com.github.matsluni.akkahttpspi.{AkkaHttpAsyncHttpService, TestBase}
import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.matchers.should.Matchers
import software.amazon.awssdk.core.async.{AsyncRequestBody, AsyncResponseTransformer}
import software.amazon.awssdk.services.s3.{S3AsyncClient, S3Configuration}
import software.amazon.awssdk.services.s3.model._

import scala.util.Random

class ITTestS3 extends AnyWordSpec with Matchers with TestBase {

  def withClient(testCode: S3AsyncClient => Any): Any = {

    val akkaClient = new AkkaHttpAsyncHttpService().createAsyncHttpClientFactory().build()

    val client = S3AsyncClient
      .builder()
      .serviceConfiguration(S3Configuration.builder().checksumValidationEnabled(false).build())
      .credentialsProvider(credentialProviderChain)
      .region(defaultRegion)
      .httpClient(akkaClient)
      .build()

    try {
      testCode(client)
    }
    finally { // clean up
      akkaClient.close()
      client.close()
    }
  }

  "S3 async client" should {

    "upload and download a file to a bucket + cleanup" in withClient { implicit client =>
      val bucketName = "aws-spi-test-" + Random.alphanumeric.take(10).filterNot(_.isUpper).mkString
      createBucket(bucketName)
      val randomFile = File.createTempFile("aws", Random.alphanumeric.take(5).mkString)
      val fileContent = Random.alphanumeric.take(1000).mkString
      val fileWriter = new FileWriter(randomFile)
      fileWriter.write(fileContent)
      fileWriter.flush()
      client.putObject(PutObjectRequest.builder().bucket(bucketName).key("my-file").build(), randomFile.toPath).join

      val result = client.getObject(GetObjectRequest.builder().bucket(bucketName).key("my-file").build(),
        AsyncResponseTransformer.toBytes[GetObjectResponse]()).join
      result.asUtf8String() should be(fileContent)

      client.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key("my-file").build()).join()

      client.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build()).join()
    }

    "multipart upload" in withClient { implicit client =>
      val bucketName = "aws-spi-test-" + Random.alphanumeric.take(5).map(_.toLower).mkString
      createBucket(bucketName)
      val fileContent = (0 to 1000000).mkString
      val createMultipartUploadResponse = client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(bucketName).key("bar").contentType("text/plain").build()).join()

      val p1 = client.uploadPart(UploadPartRequest.builder().bucket(bucketName).key("bar").partNumber(1).uploadId(createMultipartUploadResponse.uploadId()).build(), AsyncRequestBody.fromString(fileContent)).join
      val p2 = client.uploadPart(UploadPartRequest.builder().bucket(bucketName).key("bar").partNumber(2).uploadId(createMultipartUploadResponse.uploadId()).build(), AsyncRequestBody.fromString(fileContent)).join

      client.completeMultipartUpload(CompleteMultipartUploadRequest
        .builder()
        .bucket(bucketName)
        .key("bar")
        .uploadId(createMultipartUploadResponse.uploadId())
        .multipartUpload(CompletedMultipartUpload
          .builder()
          .parts(CompletedPart.builder().partNumber(1).eTag(p1.eTag()).build(), CompletedPart.builder().partNumber(2).eTag(p2.eTag()).build())
          .build())
        .build()).join

      val result = client.getObject(GetObjectRequest.builder().bucket(bucketName).key("bar").build(),
        AsyncResponseTransformer.toBytes[GetObjectResponse]()).join
      result.asUtf8String() should be(fileContent + fileContent)

      client.deleteObject(DeleteObjectRequest.builder().bucket(bucketName).key("bar").build()).join()
      client.deleteBucket(DeleteBucketRequest.builder().bucket(bucketName).build()).join()
    }
  }

  def createBucket(name: String)(implicit client: S3AsyncClient): Unit = {
    client.createBucket(CreateBucketRequest.builder().bucket(name).build()).join
  }

} 
Example 72
Source File: MultiLinePipeline.scala    From scrapy4s   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.scrapy4s.pipeline

import java.io.FileWriter

import com.scrapy4s.http.Response
import org.slf4j.LoggerFactory


class MultiLinePipeline(filePath: String,
                        linePaser: Response => Seq[String]
                       ) extends Pipeline {
  val logger = LoggerFactory.getLogger(classOf[MultiLinePipeline])

  val writer = new FileWriter(filePath)

  def pipe(response: Response): Unit = {
    val line = linePaser(response)
    line.foreach { l =>
      this.synchronized {
        writer.write(s"$l\n")
      }
    }
  }

  override def close(): Unit = {
    logger.info(s"save to -> $filePath")
    writer.close()
  }
}

object MultiLinePipeline {
  def apply(filePath: String)
           (implicit linePaser: Response => Seq[String] = r => Seq(s"${r.body}")): Pipeline = {
    new MultiLinePipeline(filePath, linePaser = linePaser)
  }
} 
Example 73
Source File: NerHelper.scala    From spark-nlp   with Apache License 2.0 5 votes vote down vote up
package com.johnsnowlabs.benchmarks.spark

import java.io.{BufferedWriter, File, FileWriter}

import com.johnsnowlabs.nlp.annotators.common.NerTagged
import com.johnsnowlabs.nlp.training.CoNLL
import com.johnsnowlabs.nlp.{Annotation, SparkAccessor}
import com.johnsnowlabs.nlp.util.io.ExternalResource
import org.apache.spark.ml.PipelineModel

import scala.collection.mutable


object NerHelper {

  
  def saveNerSpanTags(annotations: Array[Array[Annotation]], file: String): Unit = {
    val bw = new BufferedWriter(new FileWriter(new File(file)))

    bw.write(s"start\tend\ttag\ttext\n")
    for (i <- 0 until annotations.length) {
      for (a <- annotations(i))
        bw.write(s"${a.begin}\t${a.end}\t${a.result}\t${a.metadata("entity").replace("\n", " ")}\n")
    }
    bw.close()
  }

  def calcStat(correct: Int, predicted: Int, predictedCorrect: Int): (Float, Float, Float) = {
    // prec = (predicted & correct) / predicted
    // rec = (predicted & correct) / correct
    val prec = predictedCorrect.toFloat / predicted
    val rec = predictedCorrect.toFloat / correct
    val f1 = 2 * prec * rec / (prec + rec)

    (prec, rec, f1)
  }

  def measureExact(nerReader: CoNLL, model: PipelineModel, file: ExternalResource, printErrors: Int = 0): Unit = {
    val df = nerReader.readDataset(SparkAccessor.benchmarkSpark, file.path).toDF()
    val transformed = model.transform(df)
    val rows = transformed.select("ner_span", "label_span").collect()

    val correctPredicted = mutable.Map[String, Int]()
    val predicted = mutable.Map[String, Int]()
    val correct = mutable.Map[String, Int]()
    var toPrintErrors = printErrors

    for (row <- rows) {

      val predictions = NerTagged.getAnnotations(row, 0).filter(a => a.result != "O")
      val labels = NerTagged.getAnnotations(row, 1).filter(a => a.result != "O")

      for (p <- predictions) {
        val tag = p.metadata("entity")
        predicted(tag) = predicted.getOrElse(tag, 0) + 1
      }

      for (l <- labels) {
        val tag = l.metadata("entity")
        correct(tag) = correct.getOrElse(tag, 0) + 1
      }

      val correctPredictions = labels.toSet.intersect(predictions.toSet)

      for (a <- correctPredictions) {
        val tag = a.metadata("entity")
        correctPredicted(tag) = correctPredicted.getOrElse(tag, 0) + 1
      }

      if (toPrintErrors > 0) {
        for (p <- predictions) {
          if (toPrintErrors > 0 && !correctPredictions.contains(p)) {
            System.out.println(s"Predicted\t${p.result}\t${p.begin}\t${p.end}\t${p.metadata("text")}")
            toPrintErrors -= 1
          }
        }

        for (p <- labels) {
          if (toPrintErrors > 0 && !correctPredictions.contains(p)) {
            System.out.println(s"Correct\t${p.result}\t${p.begin}\t${p.end}\t${p.metadata("text")}")
            toPrintErrors -= 1
          }
        }
      }
    }

    val (prec, rec, f1) = calcStat(correct.values.sum, predicted.values.sum, correctPredicted.values.sum)
    System.out.println(s"$prec\t$rec\t$f1")

    val tags = (correct.keys ++ predicted.keys ++ correctPredicted.keys).toList.distinct

    for (tag <- tags) {
      val (prec, rec, f1) = calcStat(correct.getOrElse(tag, 0), predicted.getOrElse(tag, 0), correctPredicted.getOrElse(tag, 0))
      System.out.println(s"$tag\t$prec\t$rec\t$f1")
    }
  }
} 
Example 74
Source File: CliTestLib.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.cli

import java.io.{File, FileWriter}
import java.nio.file.Files


trait CliTestLib {

  def withFile(content: String = "",
               fileName: String = "hello",
               suffix: String = "world")(testCode: (File, FileWriter) => Any) {
    val file = File.createTempFile(fileName, suffix) // create the fixture
    val writer = new FileWriter(file)
    writer.write(content)
    writer.flush()
    try {
      testCode(file, writer) // "loan" the fixture to the test
    } finally {
      writer.close()
      file.delete()
    }
  }

  def withTempDir(
      prefix: String
  )(testCode: File => Any) {
    val dir = Files.createTempDirectory(prefix).toFile // create the fixture
    try {
      testCode(dir) // "loan" the fixture to the test
    } finally {
      cleanDir(dir)
    }
  }

  def cleanDir(tmpDir: File): Unit = {
    def delete(f: File): Boolean =
      if (f.isDirectory) {
        val removedContent =
          Option(f.listFiles()).toSeq.flatten.map(delete).forall(x => x)
        val removedDir = f.delete()

        removedContent && removedDir
      } else
        f.delete()

    if (!delete(tmpDir))
      Console.err.println(
        s"Warning: unable to remove temporary directory $tmpDir")
  }
} 
Example 75
Source File: CliUnitTest.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.cli

import java.io.{File, FileWriter}

import coursier.moduleString
import coursier.cli.options.DependencyOptions
import coursier.cli.params.DependencyParams
import coursier.parse.JavaOrScalaModule
import org.junit.runner.RunWith
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatestplus.junit.JUnitRunner


@RunWith(classOf[JUnitRunner])
class CliUnitTest extends AnyFlatSpec {

  def withFile(content: String)(testCode: (File, FileWriter) => Any) {
    val file = File.createTempFile("hello", "world") // create the fixture
    val writer = new FileWriter(file)
    writer.write(content)
    writer.flush()
    try {
      testCode(file, writer) // "loan" the fixture to the test
    }
    finally {
      writer.close()
      file.delete()
    }
  }

  "Normal text" should "parse correctly" in withFile(
    "org1:name1--org2:name2") { (file, _) =>
    val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
    val params = DependencyParams(options, None)
      .fold(e => sys.error(e.toString), identity)
    val expected = Map(JavaOrScalaModule.JavaModule(mod"org1:name1") -> Set(JavaOrScalaModule.JavaModule(mod"org2:name2")))
    assert(params.perModuleExclude.equals(expected), s"got ${params.perModuleExclude}")
  }

  "Multiple excludes" should "be combined" in withFile(
    "org1:name1--org2:name2\n" +
      "org1:name1--org3:name3\n" +
      "org4:name4--org5:name5") { (file, _) =>

    val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
    val params = DependencyParams(options, None)
      .fold(e => sys.error(e.toString), identity)
    val expected = Map(
      JavaOrScalaModule.JavaModule(mod"org1:name1") -> Set(JavaOrScalaModule.JavaModule(mod"org2:name2"), JavaOrScalaModule.JavaModule(mod"org3:name3")),
      JavaOrScalaModule.JavaModule(mod"org4:name4") -> Set(JavaOrScalaModule.JavaModule(mod"org5:name5"))
    )
    assert(params.perModuleExclude.equals(expected))
  }

  "extra --" should "error" in withFile(
    "org1:name1--org2:name2--xxx\n" +
      "org1:name1--org3:name3\n" +
      "org4:name4--org5:name5") { (file, _) =>
    val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
    DependencyParams(options, None).toEither match {
      case Left(errors) =>
        assert(errors.exists(_.startsWith("Failed to parse ")))
      case Right(p) =>
        sys.error(s"Should have errored (got $p)")
    }
  }

  "child has no name" should "error" in withFile(
    "org1:name1--org2:") { (file, _) =>
    val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
    DependencyParams(options, None).toEither match {
      case Left(errors) =>
        assert(errors.exists(_.startsWith("Failed to parse ")))
      case Right(p) =>
        sys.error(s"Should have errored (got $p)")
    }
  }

  "child has nothing" should "error" in withFile(
    "org1:name1--:") { (file, _) =>
    val options = DependencyOptions(localExcludeFile = file.getAbsolutePath)
    DependencyParams(options, None).toEither match {
      case Left(errors) =>
        assert(errors.exists(_.startsWith("Failed to parse ")))
      case Right(p) =>
        sys.error(s"Should have errored (got $p)")
    }
  }

} 
Example 76
Source File: RuleStatLocator.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.bmcmt.profiler

import java.io.{BufferedWriter, FileWriter, PrintWriter}

import scala.collection.immutable.SortedMap


class RuleStatLocator {
  private var ruleStats: Map[String, RuleStat] = Map()

  def getRuleStat(ruleName: String): RuleStat = {
    ruleStats.get(ruleName) match {
      case Some(r) => r
      case None =>
        val newRule = new RuleStat(ruleName)
        ruleStats += ruleName -> newRule
        newRule
    }
  }

  def getStats = SortedMap(ruleStats.toSeq :_*)

  def writeStats(filename: String): Unit = {
    val writer = new PrintWriter(new FileWriter(filename, false))
    writer.println("Rule profiling statistics")
    val hrule = List.fill(80)('-').mkString
    writer.println(hrule)
    writer.println("%20s %9s %9s %9s %9s %9s"
      .format("name", "calls", "cells", "smt-consts", "smt-asserts", "smt-avg-size"))
    writer.println(hrule)
    val stats = ruleStats.values.toSeq.sortWith(_.nCalls > _.nCalls)
    for (rs <- stats) {
      writer.println("%-20s %9d %9d %9d %9d %9d"
        .format(rs.ruleName, rs.nCalls, rs.nCellsSelf, rs.nSmtConstsSelf, rs.nSmtAssertsSelf, rs.smtAssertsSizeAvg))
    }
    writer.close()
  }
} 
Example 77
Source File: BfsStrategyStopWatchDecorator.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.bmcmt.search

import java.io.{FileWriter, PrintWriter, Writer}
import java.time.{Duration, LocalDateTime}

import at.forsyte.apalache.tla.bmcmt.search.SearchStrategy.{Finish, FinishOnDeadlock, NextStep}


class BfsStrategyStopWatchDecorator(strategy: SearchStrategy, filename: String) extends SearchStrategy {
  private var currentStep: Int = 0
  private var printWriter: Option[PrintWriter] = None
  private var startTime: LocalDateTime = LocalDateTime.now()

  override def getCommand: SearchStrategy.Command = {
    val command = strategy.getCommand
    command match {
      case NextStep(stepNo, _, _) =>
        if (stepNo == 0) {
          currentStep = 0
          // create a log file and add a header
          printWriter = Some(new PrintWriter(new FileWriter(filename, false)))
          printWriter.get.println("step,total_sec,nanosec_adjustment")
          // start the timer
          startTime = LocalDateTime.now()
        } else {
          appendCsvEntry()
          currentStep = stepNo
        }

      case Finish() | FinishOnDeadlock() =>
        appendCsvEntry()
        printWriter.get.close()
    }
    command
  }

  private def appendCsvEntry(): Unit = {
    val currentTime = LocalDateTime.now()
    val duration = Duration.between(startTime, currentTime)
    printWriter.get.println("%d,%d,%d".format(currentStep, duration.getSeconds, duration.getNano))
    printWriter.get.flush() // get the results as soon as possible
  }

  override def registerResponse(response: SearchStrategy.Response): Unit = {
    strategy.registerResponse(response)
  }
} 
Example 78
Source File: IO.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package testutil

import java.io.{BufferedReader, File, FileWriter, InputStreamReader}

import scala.annotation.tailrec

object IO {
  def readTestResource(path: String): String = {
    val reader = new BufferedReader(new InputStreamReader(getClass.getResourceAsStream(path)))
    val buf = new Array[Char](2048)
    @tailrec def loop(sb: JStringBuilder): String = {
      reader.read(buf) match {
        case -1 => sb.toString
        case count => loop(sb.append(buf, 0, count))
      }
    }
    try loop(new JStringBuilder) finally reader.close()
  }

  def writeTestResource(path: String, data: String): Unit = {
    // assuming working dir used by intellij
    val writer = new FileWriter(s"src/test/resources$path".replaceAllLiterally("/", File.separator))
    try writer.write(data) finally writer.close()
  }
} 
Example 79
Source File: TreeNode.scala    From AppCrawler   with Apache License 2.0 5 votes vote down vote up
package com.testerhome.appcrawler

import java.io.{BufferedWriter, FileWriter}

import scala.collection.mutable.ListBuffer


case class TreeNode[T](
                        value: T,
                        children: ListBuffer[TreeNode[T]] = ListBuffer[TreeNode[T]]()
                      ) {

  def equals(node: TreeNode[T]): Boolean = {
    node.value == this.value
  }


  def find(tree: TreeNode[T], node: TreeNode[T]): Option[TreeNode[T]] = {
    if (tree.equals(node)) {
      return Some(tree)
    }
    tree.children.foreach(t => {
      find(t, node) match {
        case Some(v) => return Some(v)
        case None => {}
      }
    })
    None
  }

  def appendNode(currenTree: TreeNode[T], node: TreeNode[T]): TreeNode[T] = {
    find(currenTree, node) match {
      case Some(v) => {
        v
      }
      case None => {
        this.children.append(node)
        node
      }
    }
  }


  def toXml(tree: TreeNode[T]): String = {
    val s=new StringBuffer()
    val before = (tree: TreeNode[T]) => {
      s.append(s"""<node TEXT="${xml.Utility.escape(tree.value.toString)}">""")
      //todo: 增加图片地址链接   LINK="file:///Users/seveniruby/projects/LBSRefresh/Android_20160216105737/946_StockDetail-Back--.png"
    }
    val after = (tree: TreeNode[T]) => {
      s.append("</node>")
      s.append("\n")
    }

    s.append("""<map version="1.0.1">""")
    s.append("\n")
    traversal[T](tree, before, after)
    s.append("</map>")
    s.toString
  }

  def traversal[T](tree: TreeNode[T],
                   before: (TreeNode[T]) => Any = (x: TreeNode[T]) => Unit,
                   after: (TreeNode[T]) => Any = (x: TreeNode[T]) => Unit): Unit = {
    before(tree)
    tree.children.foreach(t => {
      traversal(t, before, after)
    })
    after(tree)
  }

  def generateFreeMind(list: ListBuffer[T], path:String=null): String = {
    if(list.isEmpty){
      return ""
    }
    val root=TreeNode(list.head)
    var currentNode=root
    list.slice(1, list.size).foreach(e=>{
      currentNode=currentNode.appendNode(root, TreeNode(e))
    })
    val xml=toXml(root)
    if(path!=null){
      val file = new java.io.File(path)
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write(xml)
      bw.close()
    }
    xml
  }

} 
Example 80
Source File: PerformanceDataMetricsFile.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.it.performance

import java.io.{File, FileWriter}
import oharastream.ohara.common.util.{CommonUtils, Releasable}

class PerformanceDataMetricsFile {
  private[this] val logDataInfoFileName = s"${CommonUtils.randomString(5)}.csv"

  private[this] val reportOutputFolderKey = PerformanceTestingUtils.REPORT_OUTPUT_KEY
  private[this] val reportOutputFolder: File = mkdir(
    new File(
      value(reportOutputFolderKey).getOrElse("/tmp/performance")
    )
  )

  def logMeters(report: PerformanceReport): Unit = if (report.records.nonEmpty) {
    // we have to fix the order of key-value
    // if we generate line via map.keys and map.values, the order may be different ...
    val headers = report.records.head._2.keySet.toList
    report.records.values.foreach { items =>
      headers.foreach(
        name =>
          if (!items.contains(name))
            throw new RuntimeException(s"$name disappear?? current:${items.keySet.mkString(",")}")
      )
    }
    val file = path(report.className, s"${report.key.group()}-${report.key.name()}.csv")
    if (file.exists() && !file.delete()) throw new RuntimeException(s"failed to remove file:$file")
    val fileWriter = new FileWriter(file)
    try {
      fileWriter.write("duration," + headers.map(s => s"""\"$s\"""").mkString(","))
      fileWriter.write("\n")
      report.records.foreach {
        case (duration, item) =>
          val line = s"$duration," + headers.map(header => f"${item(header)}%.3f").mkString(",")
          fileWriter.write(line)
          fileWriter.write("\n")
      }
    } finally Releasable.close(fileWriter)
  }

  def logDataInfos(inputDataInfos: Seq[DataInfo]): Unit = {
    val file = path("inputdata", logDataInfoFileName)
    if (file.exists() && !file.delete()) throw new RuntimeException(s"failed to remove file:$file")
    val fileWriter = new FileWriter(file)
    try {
      fileWriter.write("duration,messageNumber,messageSize\n")
      inputDataInfos
        .sortBy(_.duration)(Ordering[Long].reverse)
        .foreach(inputDataInfo => {
          fileWriter.write(
            s"${inputDataInfo.duration / 1000},${inputDataInfo.messageNumber},${inputDataInfo.messageSize}\n"
          )
        })
    } finally Releasable.close(fileWriter)
  }

  private[this] def simpleName(className: String): String = {
    val index = className.lastIndexOf(".")
    if (index != -1) className.substring(index + 1)
    else className
  }

  private[this] def path(className: String, fileName: String): File = {
    new File(
      mkdir(new File(mkdir(new File(reportOutputFolder, simpleName(className))), this.getClass.getSimpleName)),
      fileName
    )
  }

  private[this] def mkdir(folder: File): File = {
    if (!folder.exists() && !folder.mkdirs()) throw new AssertionError(s"failed to create folder on $folder")
    if (folder.exists() && !folder.isDirectory) throw new AssertionError(s"$folder is not a folder")
    folder
  }

  private[this] def value(key: String): Option[String] = sys.env.get(key)
} 
Example 81
Source File: SessionDataFileWriter.scala    From spark_training   with Apache License 2.0 5 votes vote down vote up
package com.malaska.spark.training.streaming.dstream.sessionization

import java.io.BufferedWriter
import java.io.FileWriter

object SessionDataFileWriter {
  
  val eol = System.getProperty("line.separator");  
  
  def main(args: Array[String]) {
    if (args.length == 0) {
        println("SessionDataFileWriter {numberOfRecords} {outputFile} ");
        return;
    }
    
    val writer = new BufferedWriter(new FileWriter(args(1)))
    val loops = args(0).toInt
    
    for (i <- 1 to loops) {
      writer.write(SessionDataGenerator.getNextEvent + eol)
    }
    
    writer.close
  }
} 
Example 82
Source File: SessionDataFileHDFSWriter.scala    From spark_training   with Apache License 2.0 5 votes vote down vote up
package com.malaska.spark.training.streaming.dstream.sessionization

import java.io.BufferedWriter
import java.io.FileWriter
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.conf.Configuration
import java.io.OutputStreamWriter
import org.apache.hadoop.fs.Path
import java.util.Random

object SessionDataFileHDFSWriter {
  
  val eol = System.getProperty("line.separator");  
  
  def main(args: Array[String]) {
    if (args.length == 0) {
        println("SessionDataFileWriter {tempDir} {distDir} {numberOfFiles} {numberOfEventsPerFile} {waitBetweenFiles}");
        return;
    }
    val conf = new Configuration
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/mapred-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"))
    
    val fs = FileSystem.get(new Configuration)
    val rootTempDir = args(0)
    val rootDistDir = args(1)
    val files = args(2).toInt
    val loops = args(3).toInt
    val waitBetweenFiles = args(4).toInt
    val r = new Random
    for (f <- 1 to files) {
      val rootName = "/weblog." + System.currentTimeMillis()
      val tmpPath = new Path(rootTempDir + rootName + ".tmp")
      val writer = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath)))
      
      print(f + ": [")
      
      val randomLoops = loops + r.nextInt(loops)
      
      for (i <- 1 to randomLoops) {
        writer.write(SessionDataGenerator.getNextEvent + eol)
        if (i%100 == 0) {
          print(".")
        }
      }
      println("]")
      writer.close
      
      val distPath = new Path(rootDistDir + rootName + ".dat")
      
      fs.rename(tmpPath, distPath)
      Thread.sleep(waitBetweenFiles)
    }
    println("Done")
  }
} 
Example 83
Source File: MapAndMapPartitions_7_3.scala    From LearningSparkV2   with Apache License 2.0 5 votes vote down vote up
package main.scala.chapter7

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
import scala.math.sqrt
import java.io.FileWriter

object MapAndMapPartitions_7_3 {

  // simulate a connection to a FS
  def getConnection (f: String): FileWriter = {
    new FileWriter(f, true)
  }

  // use function with map()
  def func(v: Long) = {
    // make a connection to DB
    val conn = getConnection("/tmp/sqrt.txt")
    val sr = sqrt(v)
    // write value out to DB
    conn.write(sr.toString())
    conn.write(System.lineSeparator())
    conn.close()
    sr
  }
  // use function for mapPartition
  def funcMapPartions(conn:FileWriter, v: Long) = {
    val sr = sqrt(v)
    conn.write(sr.toString())
    conn.write(System.lineSeparator())
    sr
  }

  // curried function to benchmark any code or function
  def benchmark(name: String)(f: => Unit) {
    val startTime = System.nanoTime
    f
    val endTime = System.nanoTime
    println(s"Time taken in $name: " + (endTime - startTime).toDouble / 1000000000 + " seconds")
  }


  def main (args: Array[String] ) {

    val spark = SparkSession
    .builder
    .appName("MapAndMapPartitions")
    .getOrCreate ()

    import spark.implicits._

    val df = spark.range(1 * 10000000).toDF("id").withColumn("square", $"id" * $"id").repartition(16)
    df.show(5)

    // Benchmark Map function
    benchmark("map function") {
      df.map(r => (func(r.getLong(1)))).show(10)
    }

    // Benchmark MapPartiton function
    benchmark("mapPartion function") {

      val newDF = df.mapPartitions(iterator => {
        val conn = getConnection("/tmp/sqrt.txt")
        val result = iterator.map(data=>{funcMapPartions(conn, data.getLong(1))}).toList
        conn.close()
        result.iterator
      }
      ).toDF().show(10)
    }
  }
} 
Example 84
Source File: TestLoadDataWithJunkChars.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.integration.spark.testsuite.dataload

import java.io.{BufferedWriter, File, FileWriter}
import java.util.Random

import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll

class TestLoadDataWithJunkChars extends QueryTest with BeforeAndAfterAll {
  var filePath = ""
  val junkchars = "ǍǎǏǐǑǒǓǔǕǖǗǘǙǚǛǜǝǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰ"

  def buildTestData() = {
    filePath = s"$integrationPath/spark/target/junkcharsdata.csv"
    val file = new File(filePath)
    val writer = new BufferedWriter(new FileWriter(file))
    writer.write("c1,c2\n")
    val random = new Random
    for (i <- 1 until 1000) {
      writer.write("a" + i + "," + junkchars + "\n")
      if ( i % 100 == 0) {
        writer.flush()
      }
    }
    writer.write("a1000000," + junkchars)
    writer.close
  }

  test("[bug]fix bug of duplicate rows in UnivocityCsvParser #877") {
    buildTestData()
    sql("drop table if exists junkcharsdata")
    sql("""create table if not exists junkcharsdata
             (c1 string, c2 string)
             STORED AS carbondata""")
    sql(s"LOAD DATA LOCAL INPATH '$filePath' into table junkcharsdata")
    checkAnswer(sql("select count(*) from junkcharsdata"), Seq(Row(1000)))
    sql("drop table if exists junkcharsdata")
    new File(filePath).delete()
  }
} 
Example 85
Source File: DiskBlockManagerSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.mockito.Mockito.{mock, when}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}

import org.apache.spark.SparkConf
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  val blockManager = mock(classOf[BlockManager])
  when(blockManager.conf).thenReturn(testConf)
  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    super.afterAll()
    Utils.deleteRecursively(rootDir0)
    Utils.deleteRecursively(rootDir1)
  }

  override def beforeEach() {
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(blockManager, conf)
  }

  override def afterEach() {
    diskBlockManager.stop()
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 86
Source File: HashShuffleManagerSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.shuffle.hash

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.scalatest.FunSuite

import org.apache.spark.{SparkEnv, SparkContext, LocalSparkContext, SparkConf}
import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.shuffle.FileShuffleBlockManager
import org.apache.spark.storage.{ShuffleBlockId, FileSegment}

class HashShuffleManagerSuite extends FunSuite with LocalSparkContext {
  private val testConf = new SparkConf(false)

  private def checkSegments(expected: FileSegment, buffer: ManagedBuffer) {
    assert(buffer.isInstanceOf[FileSegmentManagedBuffer])
    val segment = buffer.asInstanceOf[FileSegmentManagedBuffer]
    assert(expected.file.getCanonicalPath === segment.getFile.getCanonicalPath)
    assert(expected.offset === segment.getOffset)
    assert(expected.length === segment.getLength)
  }

  test("consolidated shuffle can write to shuffle group without messing existing offsets/lengths") {

    val conf = new SparkConf(false)
    // reset after EACH object write. This is to ensure that there are bytes appended after
    // an object is written. So if the codepaths assume writeObject is end of data, this should
    // flush those bugs out. This was common bug in ExternalAppendOnlyMap, etc.
    conf.set("spark.serializer.objectStreamReset", "1")
    conf.set("spark.serializer", "org.apache.spark.serializer.JavaSerializer")
    conf.set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    sc = new SparkContext("local", "test", conf)

    val shuffleBlockManager =
      SparkEnv.get.shuffleManager.shuffleBlockManager.asInstanceOf[FileShuffleBlockManager]

    val shuffle1 = shuffleBlockManager.forMapTask(1, 1, 1, new JavaSerializer(conf),
      new ShuffleWriteMetrics)
    for (writer <- shuffle1.writers) {
      writer.write("test1")
      writer.write("test2")
    }
    for (writer <- shuffle1.writers) {
      writer.commitAndClose()
    }

    val shuffle1Segment = shuffle1.writers(0).fileSegment()
    shuffle1.releaseWriters(success = true)

    val shuffle2 = shuffleBlockManager.forMapTask(1, 2, 1, new JavaSerializer(conf),
      new ShuffleWriteMetrics)

    for (writer <- shuffle2.writers) {
      writer.write("test3")
      writer.write("test4")
    }
    for (writer <- shuffle2.writers) {
      writer.commitAndClose()
    }
    val shuffle2Segment = shuffle2.writers(0).fileSegment()
    shuffle2.releaseWriters(success = true)

    // Now comes the test :
    // Write to shuffle 3; and close it, but before registering it, check if the file lengths for
    // previous task (forof shuffle1) is the same as 'segments'. Earlier, we were inferring length
    // of block based on remaining data in file : which could mess things up when there is concurrent read
    // and writes happening to the same shuffle group.

    val shuffle3 = shuffleBlockManager.forMapTask(1, 3, 1, new JavaSerializer(testConf),
      new ShuffleWriteMetrics)
    for (writer <- shuffle3.writers) {
      writer.write("test3")
      writer.write("test4")
    }
    for (writer <- shuffle3.writers) {
      writer.commitAndClose()
    }
    // check before we register.
    checkSegments(shuffle2Segment, shuffleBlockManager.getBlockData(ShuffleBlockId(1, 2, 0)))
    shuffle3.releaseWriters(success = true)
    checkSegments(shuffle2Segment, shuffleBlockManager.getBlockData(ShuffleBlockId(1, 2, 0)))
    shuffleBlockManager.removeShuffle(1)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 87
Source File: DiskBlockManagerSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(rootDir0)
      Utils.deleteRecursively(rootDir1)
    } finally {
      super.afterAll()
    }
  }

  override def beforeEach() {
    super.beforeEach()
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(conf, deleteFilesOnStop = true)
  }

  override def afterEach() {
    try {
      diskBlockManager.stop()
    } finally {
      super.afterEach()
    }
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 88
Source File: LogPageSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker.ui

import java.io.{File, FileWriter}

import org.mockito.Mockito.{mock, when}
import org.scalatest.PrivateMethodTester

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.worker.Worker

class LogPageSuite extends SparkFunSuite with PrivateMethodTester {

  test("get logs simple") {
    val webui = mock(classOf[WorkerWebUI])
    val worker = mock(classOf[Worker])
    val tmpDir = new File(sys.props("java.io.tmpdir"))
    val workDir = new File(tmpDir, "work-dir")
    workDir.mkdir()
    when(webui.workDir).thenReturn(workDir)
    when(webui.worker).thenReturn(worker)
    when(worker.conf).thenReturn(new SparkConf())
    val logPage = new LogPage(webui)

    // Prepare some fake log files to read later
    val out = "some stdout here"
    val err = "some stderr here"
    val tmpOut = new File(workDir, "stdout")
    val tmpErr = new File(workDir, "stderr")
    val tmpErrBad = new File(tmpDir, "stderr") // outside the working directory
    val tmpOutBad = new File(tmpDir, "stdout")
    val tmpRand = new File(workDir, "random")
    write(tmpOut, out)
    write(tmpErr, err)
    write(tmpOutBad, out)
    write(tmpErrBad, err)
    write(tmpRand, "1 6 4 5 2 7 8")

    // Get the logs. All log types other than "stderr" or "stdout" will be rejected
    val getLog = PrivateMethod[(String, Long, Long, Long)]('getLog)
    val (stdout, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stdout", None, 100)
    val (stderr, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "stderr", None, 100)
    val (error1, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "random", None, 100)
    val (error2, _, _, _) =
      logPage invokePrivate getLog(workDir.getAbsolutePath, "does-not-exist.txt", None, 100)
    // These files exist, but live outside the working directory
    val (error3, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stderr", None, 100)
    val (error4, _, _, _) =
      logPage invokePrivate getLog(tmpDir.getAbsolutePath, "stdout", None, 100)
    assert(stdout === out)
    assert(stderr === err)
    assert(error1.startsWith("Error: Log type must be one of "))
    assert(error2.startsWith("Error: Log type must be one of "))
    assert(error3.startsWith("Error: invalid log directory"))
    assert(error4.startsWith("Error: invalid log directory"))
  }

  
  private def write(f: File, s: String): Unit = {
    val writer = new FileWriter(f)
    try {
      writer.write(s)
    } finally {
      writer.close()
    }
  }

} 
Example 89
Source File: DistServiceExecutor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.distributeservice

import java.io.{File, FileWriter}
import java.net.InetAddress
import scala.collection.JavaConverters._
import scala.io.Source
import scala.sys.process._
import scala.util.{Failure, Success, Try}

import akka.actor.Actor
import org.apache.commons.io.FileUtils
import org.apache.commons.lang.text.StrSubstitutor
import org.slf4j.Logger

import org.apache.gearpump.cluster.{ExecutorContext, UserConfig}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.InstallService
import org.apache.gearpump.util.{ActorUtil, LogUtil}

class DistServiceExecutor(executorContext: ExecutorContext, userConf: UserConfig) extends Actor {
  import executorContext._
  private val LOG: Logger = LogUtil.getLogger(getClass, executor = executorId, app = appId)

  override def receive: Receive = {
    case InstallService(url, zipFileName, targetPath, scriptData, serviceName, serviceSettings) =>
      LOG.info(s"Executor $executorId receive command to install " +
        s"service $serviceName to $targetPath")
      unzipFile(url, zipFileName, targetPath)
      installService(scriptData, serviceName, serviceSettings)
  }

  private def unzipFile(url: String, zipFileName: String, targetPath: String) = {
    val zipFile = File.createTempFile(System.currentTimeMillis().toString, zipFileName)
    val dir = new File(targetPath)
    if (dir.exists()) {
      FileUtils.forceDelete(dir)
    }
    val bytes = FileServer.newClient.get(url).get
    FileUtils.writeByteArrayToFile(zipFile, bytes)
    val result = Try(s"unzip ${zipFile.getAbsolutePath} -d $targetPath".!!)
    result match {
      case Success(msg) => LOG.info(s"Executor $executorId unzip file to $targetPath")
      case Failure(ex) => throw ex
    }
  }

  private def installService(
      scriptData: Array[Byte], serviceName: String, serviceSettings: Map[String, Any]) = {
    val tempFile = File.createTempFile("gearpump", serviceName)
    FileUtils.writeByteArrayToFile(tempFile, scriptData)
    val script = new File("/etc/init.d", serviceName)
    writeFileWithEnvVariables(tempFile, script, serviceSettings ++ getEnvSettings)
    val result = Try(s"chkconfig --add $serviceName".!!)
    result match {
      case Success(msg) => LOG.info(s"Executor install service $serviceName successfully!")
      case Failure(ex) => throw ex
    }
  }

  private def getEnvSettings: Map[String, Any] = {
    Map("workerId" -> worker,
      "localhost" -> ActorUtil.getSystemAddress(context.system).host.get,
      "hostname" -> InetAddress.getLocalHost.getHostName)
  }

  private def writeFileWithEnvVariables(source: File, target: File, envs: Map[String, Any]) = {
    val writer = new FileWriter(target)
    val sub = new StrSubstitutor(envs.asJava)
    sub.setEnableSubstitutionInVariables(true)
    Source.fromFile(source).getLines().foreach(line => writer.write(sub.replace(line) + "\r\n"))
    writer.close()
  }
} 
Example 90
Source File: GoldBunsetsuDepInCabocha.scala    From jigg   with Apache License 2.0 5 votes vote down vote up
package jigg.nlp.ccg


object GoldBunsetsuDepInCoNLL {

  case class Opts(
    @Help(text="Path to Cabocha file (same sentences with the CCGBank file)") cabocha: File = new File("")
  )

  def main(args:Array[String]) = {
    val opts = CommandLineParser.readIn[Opts](args)

    val dict = new JapaneseDictionary(new Word2CategoryDictionary)

    val conv = new JapaneseParseTreeConverter(dict)
    val parseTrees = new CCGBankReader()
      .readParseTrees(IOUtil.openStandardIterator, -1, true)
      .map(conv.toLabelTree _).toSeq
    val goldDerivs = parseTrees.map(conv.toDerivation)
    val sentences = parseTrees.map(conv.toSentenceFromLabelTree)

    val bunsetsuSentencesWithPredHead =
      new CabochaReader(sentences).readSentences(opts.cabocha.getPath)

    val bunsetsuSentencesWithGoldHead =
      bunsetsuSentencesWithPredHead zip goldDerivs map { case (sentence, deriv) =>
        BunsetsuSentence(sentence.bunsetsuSeq).parseWithCCGDerivation(deriv)
      }
    for (sentence <- bunsetsuSentencesWithGoldHead) {
      println(sentence.renderInCoNLL)
    }
  }
}