java.nio.file.StandardOpenOption Scala Examples

The following examples show how to use java.nio.file.StandardOpenOption. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: FileIO.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.io.files

import java.io.File
import java.nio.channels.FileChannel
import java.nio.file.{FileSystems, Files, Path, StandardOpenOption}

import scala.util.control.NonFatal
import com.typesafe.config.Config
import swave.core.impl.util.SettingsCompanion
import swave.core.io.Bytes
import swave.core.macros._

object FileIO extends SpoutFromFiles with DrainToFiles {

  lazy val userHomePath: Path = FileSystems.getDefault.getPath(System getProperty "user.home")

  def resolveFileSystemPath(pathName: String): Path =
    if (pathName.length >= 2 && pathName.charAt(0) == '~' && pathName.charAt(1) == File.separatorChar) {
      userHomePath.resolve(pathName substring 2)
    } else FileSystems.getDefault.getPath(pathName)

  val WriteCreateOptions: Set[StandardOpenOption] = {
    import StandardOpenOption._
    Set(CREATE, TRUNCATE_EXISTING, WRITE)
  }

  final case class Settings(defaultFileReadingChunkSize: Int, defaultFileWritingChunkSize: Int) {
    requireArg(defaultFileReadingChunkSize > 0, "`defaultFileChunkSize` must be > 0")
    requireArg(defaultFileWritingChunkSize >= 0, "`defaultFileWritingChunkSize` must be >= 0")

    def withDefaultFileReadingChunkSize(defaultFileReadingChunkSize: Int) =
      copy(defaultFileReadingChunkSize = defaultFileReadingChunkSize)
    def withDefaultFileWritingChunkSize(defaultFileWritingChunkSize: Int) =
      copy(defaultFileWritingChunkSize = defaultFileWritingChunkSize)
  }

  object Settings extends SettingsCompanion[Settings]("swave.core.file-io") {
    def fromSubConfig(c: Config): Settings =
      Settings(
        defaultFileReadingChunkSize = c getInt "default-file-reading-chunk-size",
        defaultFileWritingChunkSize = c getInt "default-file-writing-chunk-size")
  }

  def writeFile[T: Bytes](fileName: String, data: T): Unit = writeFile(resolveFileSystemPath(fileName), data)
  def writeFile[T: Bytes](file: File, data: T): Unit       = writeFile(file.toPath, data)
  def writeFile[T: Bytes](path: Path, data: T, options: StandardOpenOption*): Unit = {
    implicit def decorator(value: T): Bytes.Decorator[T] = Bytes.decorator(value)
    Files.write(path, data.toArray, options: _*)
    ()
  }

  def readFile[T: Bytes](fileName: String): T = readFile(resolveFileSystemPath(fileName))
  def readFile[T: Bytes](file: File): T       = readFile(file.toPath)
  def readFile[T: Bytes](path: Path): T       = implicitly[Bytes[T]].apply(Files.readAllBytes(path))

  private[io] def quietClose(channel: FileChannel): Unit =
    try channel.close()
    catch { case NonFatal(_) ⇒ }
} 
Example 2
Source File: StringWriter.scala    From ScalaClean   with Apache License 2.0 5 votes vote down vote up
package org.scalaclean.analysis

import java.nio.file.{Files, Path, StandardOpenOption}

import scala.collection.mutable


class StringWriter(targetPath: Path) {
  private val target = Files.newBufferedWriter(targetPath,
    StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE)

  def writeLine(line: String): Boolean = {
    target.write(line)
    target.newLine()
    true
  }

  def flush(): Unit = {
    target.flush()
  }

  def close(): Unit = {
    flush()
    target.close()
  }
} 
Example 3
Source File: SortedStringWriter.scala    From ScalaClean   with Apache License 2.0 5 votes vote down vote up
package org.scalaclean.analysis

import java.io.BufferedWriter
import java.nio.file.{Files, Path, StandardOpenOption}

import scala.collection.mutable

// TODO This class should not be needed - use StringWriter instead
// however ElementWriter appears to be very sensitive to write order right now
class SortedStringWriter(targetPath: Path) {
  val target: BufferedWriter = Files.newBufferedWriter(targetPath,
    StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE)

  private val strings: mutable.Set[String] = mutable.SortedSet[String]()

  def writeLine(s: String): Boolean = {
    strings.add(s)
  }

  def flush(): Unit = {
    strings.toVector.foreach { line =>
      target.write(line)
      target.newLine()
    }
    strings.clear
    target.flush()
  }

  def close(): Unit = {
    flush()
    target.close()
  }
} 
Example 4
Source File: BlobLengthParsers.scala    From incubator-daffodil   with Apache License 2.0 5 votes vote down vote up
package org.apache.daffodil.processors.parsers

import java.nio.file.Files
import java.nio.file.StandardOpenOption

import org.apache.daffodil.processors.ElementRuntimeData
import org.apache.daffodil.processors.LengthInBitsEv
import org.apache.daffodil.util.MaybeULong

sealed abstract class BlobLengthParser(override val context: ElementRuntimeData)
  extends PrimParser {

  protected def getLengthInBits(pstate: PState): Long

  override final def parse(start: PState): Unit = {
    val dis = start.dataInputStream
    val currentElement = start.simpleElement
    val nBits = getLengthInBits(start)

    val blobPath = try {
      val blobDir = start.output.getBlobDirectory
      Files.createDirectories(blobDir)
      Files.createTempFile(blobDir, start.output.getBlobPrefix, start.output.getBlobSuffix)
    } catch {
      case e: Exception => start.SDE("Unable to create blob file: ", e.getMessage)
    }
    val blobStream = Files.newOutputStream(blobPath, StandardOpenOption.WRITE)

    var remainingBitsToGet = nBits

    val array = new Array[Byte](start.tunable.blobChunkSizeInBytes)
    val blobChunkSizeInBits = start.tunable.blobChunkSizeInBytes * 8

    while (remainingBitsToGet > 0) {
      val bitsToGet = Math.min(remainingBitsToGet, blobChunkSizeInBits).toInt
      if (dis.isDefinedForLength(bitsToGet)) {
        start.dataInputStream.getByteArray(bitsToGet, start, array)
        val bytesToPut = (bitsToGet + 7) / 8
        blobStream.write(array, 0, bytesToPut)
        remainingBitsToGet -= bitsToGet
      } else {
        val remainingBits =
          if (dis.remainingBits.isDefined) {
            val totalBitsRead = nBits - remainingBitsToGet
            MaybeULong(dis.remainingBits.get + totalBitsRead)
          } else {
            MaybeULong.Nope
          }
        PENotEnoughBits(start, nBits, remainingBits)
        remainingBitsToGet = 0 // break out of the loop
      }
    }

    blobStream.close()

    if (start.isSuccess) {
      currentElement.setDataValue(blobPath.toUri)
      start.addBlobPath(blobPath)
    } else {
      Files.delete(blobPath)
    }

  }
}

final class BlobSpecifiedLengthParser(erd: ElementRuntimeData, lengthEv: LengthInBitsEv)
  extends BlobLengthParser(erd) {

  override val runtimeDependencies = Vector(lengthEv)

  override def getLengthInBits(pstate: PState): Long = {
    lengthEv.evaluate(pstate).get
  }

} 
Example 5
Source File: WriteTree.scala    From guardrail   with MIT License 5 votes vote down vote up
package com.twilio.guardrail

import cats.data.{ Writer, WriterT }
import cats.implicits._
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{ Files, Path, StandardOpenOption }
import scala.io.AnsiColor
import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration.Duration

sealed trait WriteTreeState
case object FileAbsent    extends WriteTreeState
case object FileDifferent extends WriteTreeState
case object FileIdentical extends WriteTreeState
case class WriteTree(path: Path, contents: Future[Array[Byte]])
object WriteTree {
  val unsafeWriteTreeLogged: WriteTree => Writer[List[String], Path] = {
    case WriteTree(path, dataF) =>
      val _    = Files.createDirectories(path.getParent)
      val data = Await.result(dataF, Duration.Inf)
      for {
        writeState <- if (Files.exists(path)) {
          val exists: Array[Byte] = Files.readAllBytes(path)
          val diffIdx: Option[Int] =
            exists
              .zip(data)
              .zipWithIndex
              .find({ case ((a, b), _) => a != b })
              .map(_._2)
              .orElse(Some(Math.max(exists.length, data.length)))
              .filterNot(Function.const(data.length == exists.length))

          diffIdx.fold[Writer[List[String], WriteTreeState]](Writer.value(FileIdentical))({ diffIdx =>
            val existSample = new String(exists, UTF_8)
              .slice(Math.max(diffIdx - 10, diffIdx), Math.max(diffIdx - 10, diffIdx) + 50)
              .replace("\n", "\\n")
            val newSample = new String(data, UTF_8)
              .slice(Math.max(diffIdx - 10, diffIdx), Math.max(diffIdx - 10, diffIdx) + 50)
              .replace("\n", "\\n")

            Writer.tell(List(s"""|
            |${AnsiColor.RED}Warning:${AnsiColor.RESET}
            |  The file $path contained different content than was expected.
            |
            |  Existing file: $existSample
            |  New file     : $newSample
            |""".stripMargin)) >> Writer.value(FileDifferent)
          })
        } else Writer.value[List[String], WriteTreeState](FileAbsent)
      } yield writeState match {
        case FileAbsent    => Files.write(path, data, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)
        case FileDifferent => Files.write(path, data, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)
        case FileIdentical => path
      }
  }

  val unsafeWriteTree: WriteTree => Path =
    unsafeWriteTreeLogged.map {
      case WriterT((lines, path)) =>
        lines.foreach(System.err.println(_))
        path
    }
} 
Example 6
Source File: TimelineHook.scala    From tensorflow_scala   with Apache License 2.0 5 votes vote down vote up
package org.platanios.tensorflow.api.learn.hooks

import org.platanios.tensorflow.api.core.client.{Session, Timeline}
import org.platanios.tensorflow.api.ops.{Output, UntypedOp}
import org.platanios.tensorflow.api.tensors.Tensor
import org.platanios.tensorflow.proto.RunOptions

import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory

import java.nio.file.{Files, Path, StandardOpenOption}


class TimelineHook protected (
    val workingDir: Path,
    val showDataFlow: Boolean = false,
    val showMemory: Boolean = false,
    val prettyJson: Boolean = false,
    val trigger: HookTrigger = StepHookTrigger(1000),
    val triggerAtEnd: Boolean = true
) extends TriggeredHook(trigger, triggerAtEnd) {
  override protected def fetches: Seq[Output[Any]] = Seq.empty
  override protected def targets: Set[UntypedOp] = Set.empty

  override protected def runOptions: Option[RunOptions] = {
    Some(RunOptions.newBuilder().setTraceLevel(RunOptions.TraceLevel.FULL_TRACE).build())
  }

  override protected def wantMetadata: Boolean = true

  override protected def onTrigger(
      step: Long,
      elapsed: Option[(Double, Int)],
      runResult: Hook.SessionRunResult[Seq[Tensor[Any]]],
      session: Session
  ): Unit = {
    TimelineHook.logger.info("Saving timeline.")
    val file = workingDir.resolve(s"trace$step.json")
    val stepStatistics = runResult.runMetadata.get.getStepStats
    val chromeTraceJSON = Timeline.generateChromeTrace(stepStatistics, showDataFlow, showMemory, prettyJson)
    val fileWriter = Files.newBufferedWriter(file, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE)
    fileWriter.write(chromeTraceJSON)
    fileWriter.flush()
    fileWriter.close()
    TimelineHook.logger.info(s"Saved timeline to '$file'.")
  }
}

object TimelineHook {
  private[TimelineHook] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / Timeline"))

  def apply(
      workingDir: Path,
      showDataFlow: Boolean = false,
      showMemory: Boolean = false,
      prettyJson: Boolean = false,
      trigger: HookTrigger = StepHookTrigger(1000),
      triggerAtEnd: Boolean = true
  ): TimelineHook = {
    new TimelineHook(workingDir, showDataFlow, showMemory, prettyJson, trigger, triggerAtEnd)
  }
} 
Example 7
Source File: TFRecordReader.scala    From tensorflow_scala   with Apache License 2.0 5 votes vote down vote up
package org.platanios.tensorflow.api.io

import org.platanios.tensorflow.api.core.exception.{DataLossException, OutOfRangeException}
import org.platanios.tensorflow.api.utilities.{CRC32C, Coding}
import org.platanios.tensorflow.proto.Example

import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory

import java.io.BufferedInputStream
import java.nio.file.{Files, Path, StandardOpenOption}


      private[this] def readNext(): Example = {
        try {
          // Read the header data.
          val encLength = new Array[Byte](12)
          fileStream.read(encLength)
          val recordLength = Coding.decodeFixedInt64(encLength).toInt
          val encLengthMaskedCrc = CRC32C.mask(CRC32C.value(encLength.take(8)))
          if (Coding.decodeFixedInt32(encLength, offset = 8) != encLengthMaskedCrc) {
            throw DataLossException("Encountered corrupted TensorFlow record.")
          }

          // Read the data.
          val encData = new Array[Byte](recordLength + 4)
          fileStream.read(encData)
          val recordData = encData.take(recordLength)
          val encDataMaskedCrc = CRC32C.mask(CRC32C.value(encData.take(recordLength)))
          if (Coding.decodeFixedInt32(encData, offset = recordLength) != encDataMaskedCrc) {
            throw DataLossException("Encountered corrupted TensorFlow record.")
          }

          Example.parseFrom(recordData)
        } catch {
          case _: OutOfRangeException | _: DataLossException =>
            // We ignore partial read exceptions, because a record may be truncated. The record reader holds the offset
            // prior to the failed read, and so retrying will succeed.
            TFRecordReader.logger.info(s"No more TF records stored at '${filePath.toAbsolutePath}'.")
            null
        }
      }

      override def hasNext: Boolean = {
        if (nextExample == null)
          nextExample = readNext()
        nextExample != null
      }

      override def next(): Example = {
        val example = {
          if (nextExample == null)
            readNext()
          else
            nextExample
        }
        if (example != null)
          nextExample = readNext()
        example
      }
    }
  }
}

private[io] object TFRecordReader {
  private[TFRecordReader] val logger: Logger = Logger(LoggerFactory.getLogger("TF Record Reader"))
} 
Example 8
Source File: EventFileReader.scala    From tensorflow_scala   with Apache License 2.0 5 votes vote down vote up
package org.platanios.tensorflow.api.io.events

import org.platanios.tensorflow.api.core.exception.{DataLossException, OutOfRangeException}
import org.platanios.tensorflow.api.io.{CompressionType, Loader, NoCompression}
import org.platanios.tensorflow.api.utilities.{CRC32C, Coding}
import org.platanios.tensorflow.proto.Event

import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory

import java.io.BufferedInputStream
import java.nio.file.{Files, Path, StandardOpenOption}


      private[this] def readNext(): Event = {
        try {
          // Read the header data.
          val encLength = new Array[Byte](12)
          fileStream.read(encLength)
          val recordLength = Coding.decodeFixedInt64(encLength).toInt
          val encLengthMaskedCrc = CRC32C.mask(CRC32C.value(encLength.take(8)))
          if (Coding.decodeFixedInt32(encLength, offset = 8) != encLengthMaskedCrc) {
            throw DataLossException("Encountered corrupted TensorFlow record.")
          }

          // Read the data.
          val encData = new Array[Byte](recordLength + 4)
          fileStream.read(encData)
          val recordData = encData.take(recordLength)
          val encDataMaskedCrc = CRC32C.mask(CRC32C.value(encData.take(recordLength)))
          if (Coding.decodeFixedInt32(encData, offset = recordLength) != encDataMaskedCrc) {
            throw DataLossException("Encountered corrupted TensorFlow record.")
          }

          Event.parseFrom(recordData)
        } catch {
          case _: OutOfRangeException | _: DataLossException =>
            // We ignore partial read exceptions, because a record may be truncated. The record reader holds the offset
            // prior to the failed read, and so retrying will succeed.
            EventFileReader.logger.info(s"No more TF records stored at '${filePath.toAbsolutePath}'.")
            null
        }
      }

      override def hasNext: Boolean = {
        if (nextEvent == null)
          nextEvent = readNext()
        nextEvent != null
      }

      override def next(): Event = {
        val event = {
          if (nextEvent == null)
            readNext()
          else
            nextEvent
        }
        if (event != null)
          nextEvent = readNext()
        event
      }
    }
  }
}

private[io] object EventFileReader {
  private[EventFileReader] val logger: Logger = Logger(LoggerFactory.getLogger("Event File Reader"))
} 
Example 9
Source File: EventFileReaderSuite.scala    From tensorflow_scala   with Apache License 2.0 5 votes vote down vote up
package org.platanios.tensorflow.api.io.events

import org.platanios.tensorflow.proto.Event

import org.junit.{Rule, Test}
import org.junit.rules.TemporaryFolder
import org.scalatestplus.junit.JUnitSuite

import java.nio.file.{Files, Path, StandardOpenOption}


class EventFileReaderSuite extends JUnitSuite {
  private[this] val record: Array[Byte] = Array(
    0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0xa3, 0x7f, 0x4b, 0x22, 0x09, 0x00, 0x00, 0xc0,
    0x25, 0xdd, 0x75, 0xd5, 0x41, 0x1a, 0x0d, 0x62,
    0x72, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x76, 0x65,
    0x6e, 0x74, 0x3a, 0x31, 0xec, 0xf3, 0x32, 0x8d).map(_.toByte)

  private[this] val _tempFolder: TemporaryFolder = new TemporaryFolder

  @Rule def tempFolder: TemporaryFolder = _tempFolder

  private[this] def writeToFile(filePath: Path, data: Array[Byte]): Unit = {
    Files.write(filePath, data, StandardOpenOption.APPEND)
  }

  @Test def testEmptyEventFile(): Unit = {
    val filePath = tempFolder.newFile().toPath
    writeToFile(filePath, Array.empty[Byte])
    val reader = EventFileReader(filePath)
    assert(reader.load().toSeq === Seq.empty[Event])
  }

  @Test def testSingleWrite(): Unit = {
    val filePath = tempFolder.newFile().toPath
    writeToFile(filePath, record)
    val reader = EventFileReader(filePath)
    val events = reader.load().toSeq
    assert(events.size === 1)
    assert(events.head.getWallTime === 1440183447.0)
  }

  @Test def testMultipleWrites(): Unit = {
    val filePath = tempFolder.newFile().toPath
    writeToFile(filePath, record)
    val reader = EventFileReader(filePath)
    assert(reader.load().toSeq.size === 1)
    writeToFile(filePath, record)
    assert(reader.load().toSeq.size === 1)
  }

  @Test def testMultipleLoads(): Unit = {
    val filePath = tempFolder.newFile().toPath
    writeToFile(filePath, record)
    val reader = EventFileReader(filePath)
    reader.load()
    reader.load()
    assert(reader.load().toSeq.size === 1)
  }

  @Test def testMultipleWritesAtOnce(): Unit = {
    val filePath = tempFolder.newFile().toPath
    writeToFile(filePath, record)
    writeToFile(filePath, record)
    val reader = EventFileReader(filePath)
    assert(reader.load().toSeq.size === 2)
  }

  @Test def testMultipleWritesWithBadWrite(): Unit = {
    val filePath = tempFolder.newFile().toPath
    writeToFile(filePath, record)
    writeToFile(filePath, record)
    // Test that we ignore partial record writes at the end of the file.
    writeToFile(filePath, Array(1, 2, 3).map(_.toByte))
    val reader = EventFileReader(filePath)
    assert(reader.load().toSeq.size === 2)
  }
} 
Example 10
Source File: GenerateSources.scala    From seed   with Apache License 2.0 5 votes vote down vote up
import java.io.File
import java.nio.file.{Files, Paths, StandardOpenOption}

object GenerateSources {
  def main(args: Array[String]): Unit = {
    val modulePath = sys.env("MODULE_PATH")
    val moduleSourcePaths =
      sys.env("MODULE_SOURCE_PATHS").split(File.pathSeparatorChar)
    val generatedPath =
      Paths.get(modulePath).resolve("demo").resolve("Generated.scala")
    val pathsScala = moduleSourcePaths.mkString("\"", "\", \"", "\"")
    val output     = s"object Generated { val modulePaths = List($pathsScala) }"

    Files.write(generatedPath, output.getBytes)
  }
} 
Example 11
Source File: FileChannelSuite.scala    From scalaz-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio

import java.nio.file.{ Files, Paths, StandardOpenOption }

import zio.{ Chunk, DefaultRuntime }
import testz.{ Harness, assert }
import zio.nio.channels.AsynchronousFileChannel

import scala.io.Source

object FileChannelSuite extends DefaultRuntime {

  def tests[T](harness: Harness[T]): T = {
    import harness._

    section(
      test("asynchronous file read") { () =>
        val path = Paths.get("src/test/resources/async_file_read_test.txt")

        val testProgram = for {
          channel <- AsynchronousFileChannel.open(path, Set(StandardOpenOption.READ))
          buffer  <- Buffer.byte(16)
          _       <- channel.readBuffer(buffer, 0)
          _       <- buffer.flip
          array   <- buffer.array
          text    = array.takeWhile(_ != 10).map(_.toChar).mkString.trim
          _       <- channel.close
        } yield text

        val result = unsafeRun(testProgram)

        assert(result == "Hello World")
      },
      test("asynchronous file write") { () =>
        val path    = Paths.get("src/test/resources/async_file_write_test.txt")
        val options = Set(StandardOpenOption.CREATE, StandardOpenOption.WRITE)

        val testProgram = for {
          channel <- AsynchronousFileChannel.open(path, options)
          buffer  <- Buffer.byte(Chunk.fromArray("Hello World".getBytes))
          _       <- channel.writeBuffer(buffer, 0)
          _       <- channel.close
        } yield ()

        unsafeRun(testProgram)

        val result = Source.fromFile(path.toFile()).getLines.toSeq
        Files.delete(path)

        assert(result.size == 1)
        assert(result.head == "Hello World")
      }
    )
  }

} 
Example 12
Source File: RecoverLog.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote

import java.nio.channels.FileChannel
import java.nio.file.{Files, Paths, StandardOpenOption}
import java.time.Instant

import cats.effect.Effect
import polynote.app.{Args, MainArgs}
import polynote.kernel.logging.Logging
import polynote.messages.{Message, Notebook, NotebookUpdate, ShortList}
import polynote.server.AppEnv
import zio.{Ref, Runtime, Task, UIO, ZIO}
import zio.ZIO.effectTotal
import zio.blocking.effectBlocking
import fs2.Stream
import polynote.server.repository.{FileBasedRepository, NotebookContent}
import polynote.server.repository.format.ipynb.IPythonFormat
import polynote.server.repository.fs.WAL
import polynote.server.taskConcurrent
import scodec.bits.ByteVector
import scodec.stream.decode
import scodec.codecs
import scodec.stream.decode.StreamDecoder

object RecoverLog {

  def replay(messages: Stream[Task, (Instant, Message)], ref: Ref[Notebook], log: Logging.Service): UIO[Unit] = messages.map(_._2).evalMap {
    case nb: Notebook => ref.set(nb)
    case upd: NotebookUpdate => ref.update {
      nb => try {
        upd.applyTo(nb)
      } catch {
        case err: Throwable =>
          log.errorSync(Some("Dropped update because an error occurred when applying it"), err)
          nb
      }
    }
    case _ => ZIO.unit
  }.compile.drain.catchAll {
    err =>
      log.error(Some("Error occurred while replaying the log; printing the final state anyway."), err)
  }

  def main(implicit ev: Effect[Task]): ZIO[AppEnv, String, Int] = for {
    args     <- ZIO.access[MainArgs](_.get[Args].rest)
    path     <- ZIO(args.head).flatMap(pathStr => effectBlocking(Paths.get(pathStr).toRealPath())).orDie
    is       <- effectBlocking(FileChannel.open(path, StandardOpenOption.READ)).orDie
    log      <- Logging.access
    _        <- Logging.info(s"Reading log entries from ${path}...")
    messages  = WAL.decoder.decodeMmap(is)
    ref      <- Ref.make(Notebook("", ShortList.Nil, None))
    _        <- replay(messages, ref, log)
    format    = new IPythonFormat
    result   <- ref.get
    encoded  <- format.encodeNotebook(NotebookContent(result.cells, result.config)).orDie
    _        <- effectTotal(println(encoded))
  } yield 0
} 
Example 13
Source File: PlanWriter.scala    From piglet   with Apache License 2.0 5 votes vote down vote up
package dbis.piglet.tools

import java.nio.file.{Files, Path, StandardOpenOption}

import dbis.piglet.op.{PigOperator, TimingOp}
import dbis.piglet.plan.DataflowPlan
import dbis.piglet.tools.logging.PigletLogging
//import guru.nidi.graphviz.engine.{Format, Graphviz}
//import guru.nidi.graphviz.parse.Parser

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.concurrent.duration.Duration


case class Node(id: String, var time: Option[Duration] = None, var label: String = "") {
  
  private def mkLabel = {
    val t = if(time.isDefined) s"\n${time.get.toMillis}ms (${BigDecimal(time.get.toMillis / 1000.0).setScale(2,BigDecimal.RoundingMode.HALF_UP).toDouble}s)" else ""
    val l = s"$label\n$id\n$t" 
    PlanWriter.quote(l)
  }
  
  override def toString = s"op$id ${if(label.trim().nonEmpty) s"[label=$mkLabel]" else ""}"
}

case class Edge(from: String, to: String, var label: String = "") {
  override def toString = s"op$from -> op$to ${if(label.trim().nonEmpty) s"[label=$label]" else "" }"
}


  private def writeDotFile(file: Path, graph: String): Unit = {
    logger.debug(s"writing dot file to $file")
    if(Files.notExists(file.getParent)) {
      Files.createDirectories(file.getParent)
    }
    Files.write(file, List(graph).asJava, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)
  }
  
  
} 
Example 14
Source File: ResourceUtils.scala    From sbt-lagom-descriptor-generator   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.spec

import java.io.{ File, InputStream }
import java.nio.file.{ Files, Paths, StandardOpenOption }

import scala.io.{ BufferedSource, Source }


  def writeFile(folder: File, relativeFile: File, fileContents: String): File = {
    val path = Paths.get(folder.getAbsolutePath, relativeFile.getPath)
    // `path` is tha absolute route to the file so only path.parent must be created as directories
    Files.createDirectories(path.getParent)
    Files.write(
      path,
      fileContents.getBytes,
      StandardOpenOption.CREATE,
      StandardOpenOption.SYNC,
      StandardOpenOption.TRUNCATE_EXISTING
    ).toFile

  }
} 
Example 15
Source File: SwaggerSpecRunner.scala    From play-swagger   with Apache License 2.0 5 votes vote down vote up
package com.iheart.playSwagger

import java.nio.file.{ Files, Paths, StandardOpenOption }

import play.api.libs.json.{ JsValue, Json }

import scala.util.{ Failure, Success, Try }

object SwaggerSpecRunner extends App {
  implicit def cl: ClassLoader = getClass.getClassLoader

  val targetFile :: routesFile :: domainNameSpaceArgs :: outputTransformersArgs :: swaggerV3String :: apiVersion :: swaggerPrettyJson :: namingStrategy :: Nil = args.toList
  private def fileArg = Paths.get(targetFile)
  private def swaggerJson = {
    val swaggerV3 = java.lang.Boolean.parseBoolean(swaggerV3String)
    val domainModelQualifier = PrefixDomainModelQualifier(domainNameSpaceArgs.split(","): _*)
    val transformersStrs: Seq[String] = if (outputTransformersArgs.isEmpty) Seq() else outputTransformersArgs.split(",")
    val transformers = transformersStrs.map { clazz ⇒
      Try(cl.loadClass(clazz).asSubclass(classOf[OutputTransformer]).newInstance()) match {
        case Failure(ex: ClassCastException) ⇒
          throw new IllegalArgumentException("Transformer should be a subclass of com.iheart.playSwagger.OutputTransformer:" + clazz, ex)
        case Failure(ex) ⇒ throw new IllegalArgumentException("Could not create transformer", ex)
        case Success(el) ⇒ el
      }
    }

    val swaggerSpec: JsValue = SwaggerSpecGenerator(
      NamingStrategy.from(namingStrategy),
      domainModelQualifier,
      outputTransformers = transformers,
      swaggerV3 = swaggerV3,
      apiVersion = Some(apiVersion)).generate(routesFile).get

    if (swaggerPrettyJson.toBoolean) Json.prettyPrint(swaggerSpec)
    else swaggerSpec.toString
  }

  Files.write(fileArg, swaggerJson.getBytes, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)
} 
Example 16
Source File: ClientSpec.scala    From scala-ipfs-api   with MIT License 5 votes vote down vote up
package io.ipfs.api

import java.io.{ByteArrayOutputStream, InputStream}
import java.nio.file.{Files, Paths, StandardOpenOption}
import java.util

import io.ipfs.api.ClientSpec._
import org.specs2.mutable._

import scala.util.Random

class ClientSpec extends Specification {
  isolated

  val client = new Client("localhost")
  "IPFS client" should {

    "show the version" in  {
      client.version mustEqual "0.4.2"
    }

    "have an ID" in {
      client.id.ID.length mustNotEqual 0
    }

    "store data" in {
      val name = randomName
      val add = store(name = name)
      add.length mustEqual 1
      val added = add(0)
      added.Name mustEqual name
      added.Hash.length mustNotEqual 0
    }

    "cat data" in {
      val data = randomBytes
      val added = store(data = data)(0)

      val in: InputStream = client.cat(added.Hash)
      util.Arrays.equals(toArray(in), data) mustEqual true
    }

    "dht put and get" in {
      val (key, value) = (random.nextString(10), random.nextString(10))
      val puts: Array[DHTResponse] = client.dhtPut(key, value)
      puts.length mustNotEqual 0

      client.dhtGet(key).Extra mustEqual value
    }
  }

  private def randomBytes = {
    val buffer = new Array[Byte](0x1500)
    random.nextBytes(buffer)
    buffer
  }

  private def store(name: String = randomName, data: Array[Byte] = randomBytes): Array[Add] = {
    val storePath = Paths.get(name)
    Files.write(storePath, data, StandardOpenOption.CREATE)
    client.add(Array(storePath))
  }
}

object ClientSpec {
  val random = new Random(666)
  def randomName: String = random.nextInt()+".test.dat"

  def toArray(in: InputStream): Array[Byte] = {
    val out = new ByteArrayOutputStream()
    try {
      val buff  = new Array[Byte](0x1000)
      var nRead = 0
      while ( {nRead = in.read(buff);nRead} != -1)
        out.write(buff, 0, nRead)
    } finally {
      in.close()
    }
    out.toByteArray
  }
} 
Example 17
Source File: FileChannelSpec.scala    From zio-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio.core.channels

import java.nio.charset.StandardCharsets
import java.nio.file.{ Files, StandardOpenOption }

import zio.nio.core.file.Path
import zio.test._
import zio.test.Assertion._
import zio.test.environment.TestEnvironment
import zio.nio.core.{ BaseSpec, Buffer }
import zio.{ Chunk, ZIO }

import scala.io.Source

object FileChannelSpec extends BaseSpec {

  override def spec = suite("FileChannelSpec")(
    testM("asynchronous file buffer read") {
      val path = Path("nio-core/src/test/resources/async_file_read_test.txt")
      for {
        channel <- AsynchronousFileChannel.open(path, StandardOpenOption.READ)
        buffer  <- Buffer.byte(16)
        _       <- channel.readBuffer(buffer, 0)
        _       <- buffer.flip
        array   <- buffer.array
        text    = array.takeWhile(_ != 10).map(_.toChar).mkString.trim
      } yield assert(text)(equalTo("Hello World"))
    },
    testM("asynchronous file chunk read") {
      val path = Path("nio-core/src/test/resources/async_file_read_test.txt")
      for {
        channel <- AsynchronousFileChannel.open(path, StandardOpenOption.READ)
        bytes   <- channel.read(500, 0L)
      } yield assert(bytes)(equalTo(Chunk.fromArray("Hello World".getBytes(StandardCharsets.UTF_8))))
    },
    testM("asynchronous file write") {
      val path = Path("nio-core/src/test/resources/async_file_write_test.txt")
      val zChannel = AsynchronousFileChannel
        .open(
          path,
          StandardOpenOption.CREATE,
          StandardOpenOption.WRITE
        )
      for {
        channel <- zChannel
        buffer  <- Buffer.byte(Chunk.fromArray("Hello World".getBytes))
        _       <- channel.writeBuffer(buffer, 0)
        path    <- ZIO.effectTotal(Path("nio-core/src/test/resources/async_file_write_test.txt"))
        result  <- ZIO.effect(Source.fromFile(path.toFile).getLines().toSeq)
        _       <- ZIO.effect(Files.delete(path.javaPath))
      } yield assert(result.size)(equalTo(1)) && assert(result.head)(equalTo("Hello World"))
    },
    testM("memory mapped buffer") {
      val path = Path("nio-core/src/test/resources/async_file_read_test.txt")
      for {
        env <- ZIO.environment[TestEnvironment]
        result <- FileChannel
                   .open(path, StandardOpenOption.READ)
                   .provide(env)
                   .bracket(_.close.ignore) { channel =>
                     for {
                       buffer <- channel.map(FileChannel.MapMode.READ_ONLY, 0L, 6L)
                       bytes  <- buffer.getChunk()
                     } yield assert(bytes)(equalTo(Chunk.fromArray("Hello ".getBytes(StandardCharsets.UTF_8))))
                   }
      } yield result
    }
  )
} 
Example 18
Source File: FileChannelSpec.scala    From zio-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio.channels

import java.nio.charset.StandardCharsets
import java.nio.file.StandardOpenOption

import zio.{ Chunk, ZIO }
import zio.nio.BaseSpec
import zio.nio.core.Buffer
import zio.nio.core.file.Path
import zio.nio.file.Files
import zio.test._
import zio.test.Assertion._

import scala.io.Source

object FileChannelSpec extends BaseSpec {

  override def spec = suite("FileChannelSpec")(
    testM("asynchronous file buffer read") {
      val path = Path("nio/src/test/resources/async_file_read_test.txt")
      AsynchronousFileChannel
        .open(path, StandardOpenOption.READ)
        .use { channel =>
          for {
            buffer <- Buffer.byte(16)
            _      <- channel.readBuffer(buffer, 0)
            _      <- buffer.flip
            array  <- buffer.array
            text   = array.takeWhile(_ != 10).map(_.toChar).mkString.trim
          } yield assert(text)(equalTo("Hello World"))
        }
    },
    testM("asynchronous file chunk read") {
      val path = Path("nio/src/test/resources/async_file_read_test.txt")
      AsynchronousFileChannel
        .open(path, StandardOpenOption.READ)
        .use { channel =>
          for {
            bytes <- channel.read(500, 0L)
          } yield assert(bytes)(equalTo(Chunk.fromArray("Hello World".getBytes(StandardCharsets.UTF_8))))
        }
    },
    testM("asynchronous file write") {
      val path = Path("nio/src/test/resources/async_file_write_test.txt")
      AsynchronousFileChannel
        .open(
          path,
          StandardOpenOption.CREATE,
          StandardOpenOption.WRITE
        )
        .use { channel =>
          for {
            buffer <- Buffer.byte(Chunk.fromArray("Hello World".getBytes))
            _      <- channel.writeBuffer(buffer, 0)
            path   <- ZIO.effectTotal(Path("nio/src/test/resources/async_file_write_test.txt"))
            result <- ZIO.effect(Source.fromFile(path.toFile).getLines().toSeq)
            _      <- Files.delete(path)
          } yield assert(result.size)(equalTo(1)) && assert(result.head)(equalTo("Hello World"))
        }
    },
    testM("memory mapped buffer") {
      val path = Path("nio/src/test/resources/async_file_read_test.txt")
      FileChannel
        .open(path, StandardOpenOption.READ)
        .use { channel =>
          for {
            buffer <- channel.map(FileChannel.MapMode.READ_ONLY, 0L, 6L)
            bytes  <- buffer.getChunk()
          } yield assert(bytes)(equalTo(Chunk.fromArray("Hello ".getBytes(StandardCharsets.UTF_8))))
        }
    }
  )
} 
Example 19
Source File: RecentlyFilesHandler.scala    From MoVE   with Mozilla Public License 2.0 5 votes vote down vote up
package de.thm.move.controllers

import java.nio.file.{Files, Path, StandardOpenOption}
import javafx.event.ActionEvent
import javafx.scene.control.MenuItem

import de.thm.move.Global
import de.thm.move.implicits.FxHandlerImplicits._
import de.thm.move.util.JFxUtils
import de.thm.recent._
import spray.json.JsonFormat

class RecentlyFilesHandler(recent:Recent[Path], pathClicked: Path => Unit) {

  private def menuItem(path:Path): MenuItem = {
    val item = new MenuItem(path.toString)
    JFxUtils.addFontIcon(item, "\uf1c9")
    item.setOnAction { _:ActionEvent =>
      incrementPriorityOf(path)
      println(recent.recentValuesByPriority)
      pathClicked(path)
    }
    item
  }

  def incrementPriorityOf(path:Path): Unit =
    recent.incrementPriority(path)

  def getMenuItems:Seq[MenuItem] =
    recent.recentElementsByPriority.map(menuItem)

  def writeTo(outputFile:Path)(implicit pathFormat:JsonFormat[Path]): Unit = {
    val jsonString = recent.toJson
    Files.write(outputFile, jsonString.getBytes(Global.encoding))
  }
} 
Example 20
Source File: ClassShader.scala    From sbt-idea-plugin   with Apache License 2.0 5 votes vote down vote up
package org.jetbrains.sbtidea.packaging.artifact

import java.nio.file.{Files, Path, StandardOpenOption}

import org.jetbrains.sbtidea.packaging.PackagingKeys.ShadePattern
import org.pantsbuild.jarjar.{NiceJJProcessor, _}
import org.pantsbuild.jarjar.util.EntryStruct
import sbt.Keys.TaskStreams

class ClassShader(patterns: Seq[ShadePattern])(implicit val streams: TaskStreams) {

  private val processor = new NiceJJProcessor(patterns.map {
    case ShadePattern(pat, res) =>
      val jRule = new Rule()
      jRule.setPattern(pat)
      jRule.setResult(res)
      jRule
  })

  private val entry = new EntryStruct

  if (streams!=null)
    streams.log.info(s"Initialized shader with ${patterns.size} patterns")

  def applyShading(from: Path, to: Path)(cont: => Unit): Unit = {
    entry.data = Files.readAllBytes(from)
    entry.name = from.toString.substring(1).replace('\\', '/') // leading '/' cannot be used in ZFS also fix class names produced under windows
    entry.time = -1
    if (processor.process(entry)) {
      val newPath = to.getFileSystem.getPath(entry.name)
      val parent = newPath.getParent
      if (parent != null && !Files.exists(parent))
        Files.createDirectories(parent)
      Files.write(newPath, entry.data, StandardOpenOption.CREATE)
    }
  }

}

class NoOpClassShader() extends ClassShader(Seq())(null) {
  override def applyShading(from: Path, to: Path)(cont: => Unit): Unit = cont
} 
Example 21
Source File: ResumingEventFilter.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Path, Paths }
import java.nio.file.StandardOpenOption
import java.nio.file.StandardOpenOption._

import scala.concurrent.Future

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, IOResult }
import akka.util.ByteString

import spray.json._
import com.typesafe.config.{ Config, ConfigFactory }

object ResumingEventFilter extends App with EventMarshalling {
  val config = ConfigFactory.load() 
  val maxLine = config.getInt("log-stream-processor.max-line")
  
  if(args.length != 3) {
    System.err.println("Provide args: input-file output-file state")
    System.exit(1)
  }

  val inputFile = FileArg.shellExpanded(args(0))
  val outputFile = FileArg.shellExpanded(args(1))

  val filterState = args(2) match {
    case State(state) => state
    case unknown => 
      System.err.println(s"Unknown state $unknown, exiting.") 
      System.exit(1)
  }
  import akka.stream.scaladsl._

  val source: Source[ByteString, Future[IOResult]] = 
    FileIO.fromPath(inputFile)

  val sink: Sink[ByteString, Future[IOResult]] = 
    FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND))

  val frame: Flow[ByteString, String, NotUsed] =  
    Framing.delimiter(ByteString("\n"), maxLine) 
      .map(_.decodeString("UTF8"))


  import akka.stream.ActorAttributes
  import akka.stream.Supervision

  import LogStreamProcessor.LogParseException

  val decider : Supervision.Decider = {
    case _: LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }

  val parse: Flow[String, Event, NotUsed] = 
    Flow[String].map(LogStreamProcessor.parseLineEx) 
      .collect { case Some(e) => e }
      .withAttributes(ActorAttributes.supervisionStrategy(decider))


  val filter: Flow[Event, Event, NotUsed] =   
    Flow[Event].filter(_.state == filterState)
  
  val serialize: Flow[Event, ByteString, NotUsed] =  
    Flow[Event].map(event => ByteString(event.toJson.compactPrint))

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher


  val graphDecider : Supervision.Decider = { 
    case _: LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }

  import akka.stream.ActorMaterializerSettings
  implicit val materializer = ActorMaterializer(
    ActorMaterializerSettings(system)
      .withSupervisionStrategy(graphDecider)
  )



  val composedFlow: Flow[ByteString, ByteString, NotUsed] =  
    frame.via(parse)
      .via(filter)
      .via(serialize)

  val runnableGraph: RunnableGraph[Future[IOResult]] = 
    source.via(composedFlow).toMat(sink)(Keep.right)

  runnableGraph.run().foreach { result =>
    println(s"Wrote ${result.count} bytes to '$outputFile'.")
    system.terminate()
  }  

} 
Example 22
Source File: ReportGenerator.scala    From scala-serialization   with MIT License 5 votes vote down vote up
package com.komanov.serialization.converters

import java.io.{ByteArrayOutputStream, File}
import java.nio.file.{Files, StandardOpenOption}
import java.util.zip.GZIPOutputStream

import com.komanov.serialization.converters.IoUtils._


object ReportGenerator extends App {

  val flush = true

  val dir = new File(System.getProperty("user.home"), "123")
  require(!flush || dir.exists() || dir.mkdirs())

  val (raws, gzips) = (Seq.newBuilder[(String, Seq[Int])], Seq.newBuilder[(String, Seq[Int])])

  for ((converterName, converter) <- Converters.all if converter ne ScalaPbConverter if converter ne ScroogeConverter) {
    val results = Seq.newBuilder[(Int, Int)]
    for ((name, site) <- TestData.sites) {
      val bytes = converter.toByteArray(site)
      val gzipLen = getGzipByteLength(bytes)

      results += bytes.length -> gzipLen

      if (flush) {
        val normalizedConverterName = converterName.toLowerCase().replace(" ", "-")
        Files.write(dir.toPath.resolve(s"site_${name}_$normalizedConverterName.bin"), bytes, StandardOpenOption.CREATE)
      }
    }

    raws += converterName -> results.result().map(_._1)
    gzips += converterName -> results.result().map(_._2)
  }

  println("Data Sizes (raw)")
  printHeaders
  printSizes(raws.result())

  println("Data Sizes (gzip)")
  printHeaders
  printSizes(gzips.result())

  private def printHeaders: Any = {
    println("Converter," + TestData.sites.map(_._1).mkString(","))
  }

  private def printSizes(all: Seq[(String, Seq[Int])]): Unit = {
    for ((name, list) <- all) {
      println(name + "," + list.mkString(","))
    }
  }

  private def getGzipByteLength(bytes: Array[Byte]): Int = {
    using(new ByteArrayOutputStream()) { baos =>
      using(new GZIPOutputStream(baos)) { os =>
        os.write(bytes)
      }
      baos.toByteArray.length
    }
  }

} 
Example 23
Source File: Metadata.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.mwz.sonar.scala

import java.nio.file.Paths
import java.nio.file.StandardOpenOption

import cats.data.NonEmptyChain
import cats.effect.Blocker
import cats.effect.ExitCode
import cats.effect.IO
import cats.effect.IOApp
import cats.instances.string._
import com.mwz.sonar.scala.metadata._
import com.mwz.sonar.scala.metadata.scalastyle.ScalastyleRules
import com.mwz.sonar.scala.metadata.scalastyle.ScalastyleRulesRepository
import com.mwz.sonar.scala.metadata.scapegoat.ScapegoatRules
import com.mwz.sonar.scala.metadata.scapegoat.ScapegoatRulesRepository
import fs2.Stream
import fs2.io.file._
import fs2.text
import io.circe.Printer
import io.circe.generic.JsonCodec
import io.circe.syntax._

@JsonCodec
final case class SonarScalaMetadata(
  rules: Rules,
  repositories: Map[String, RulesRepository]
)

@JsonCodec
final case class Rules(
  scalastyle: NonEmptyChain[Rule],
  scapegoat: NonEmptyChain[Rule]
)

object Metadata extends IOApp {
  private val metadata: SonarScalaMetadata =
    SonarScalaMetadata(
      rules = Rules(sort(ScalastyleRules.rules), sort(ScapegoatRules.rules)),
      repositories = Map(
        ScalastyleRulesRepository.RepositoryKey ->
        ScalastyleRulesRepository.rulesRepository
          .copy(rules = sort(ScalastyleRulesRepository.rulesRepository.rules)),
        ScapegoatRulesRepository.RepositoryKey ->
        ScapegoatRulesRepository.rulesRepository
          .copy(rules = sort(ScapegoatRulesRepository.rulesRepository.rules))
      )
    )
  private val printer: Printer =
    Printer.spaces2SortKeys.copy(
      colonLeft = "",
      lbraceLeft = "",
      rbraceRight = "",
      lbracketLeft = "",
      lrbracketsEmpty = "",
      rbracketRight = "",
      arrayCommaLeft = "",
      objectCommaLeft = ""
    )

  // Chain is missing sortBy, which should be added in 2.2.0.
  private def sort(rules: NonEmptyChain[Rule]): NonEmptyChain[Rule] =
    NonEmptyChain.fromNonEmptyList(rules.toNonEmptyList.sortBy(_.name))

  def run(args: List[String]): IO[ExitCode] = {
    val write: Stream[IO, Unit] = Stream.resource(Blocker[IO]).flatMap { blocker =>
      Stream[IO, String](metadata.asJson.printWith(printer))
        .through(text.utf8Encode)
        .through(
          writeAll(
            Paths.get("sonar-scala-metadata.json"),
            blocker,
            List(StandardOpenOption.TRUNCATE_EXISTING)
          )
        )
    }
    write.compile.drain.as(ExitCode.Success)
  }
} 
Example 24
Source File: SourcesPackageWriter.scala    From exodus   with MIT License 5 votes vote down vote up
package com.wix.bazel.migrator

import java.nio.file.{Files, Path, StandardOpenOption}

import com.wix.bazel.migrator.model.{Package, Target}

class SourcesPackageWriter(repoRoot: Path, bazelPackages: Set[Package]) {
  def write(): Unit = {
    bazelPackages
      .flatMap(jvmTargetsAndRelativePathFromMonoRepoRoot)
      .flatMap(sourceDirAndRelativePackagePaths)
      .foreach(writeSourcesTarget)
  }

  private def jvmTargetsAndRelativePathFromMonoRepoRoot(bazelPackage: Package): Set[JvmTargetAndRelativePath] = {
    val r = bazelPackage.targets.collect {
      case jvm: Target.Jvm => (jvm, bazelPackage.relativePathFromMonoRepoRoot)
    }

    r.map(JvmTargetAndRelativePathFromMonoRepoRoot(_))
  }

  def sourceDirAndRelativePackagePaths(jvmTargetAndRelativePath: JvmTargetAndRelativePath): Set[SourceDirPathAndRelativePackagePath] = {
    val basePackagePath = repoRoot.resolve(jvmTargetAndRelativePath.relativePath)
    jvmTargetAndRelativePath.jvm.sources.map { source =>
      val sourceDirPath = basePackagePath.resolve(adjustSource(source))
      SourceDirPathAndRelativePackagePath(sourceDirPath, jvmTargetAndRelativePath.relativePath)
    }
  }

  private def writeSourcesTarget(s: SourceDirPathAndRelativePackagePath) =
    Files.write(
      s.sourceDirBuildPath,
      s.sourcesTarget.getBytes,
      StandardOpenOption.CREATE, StandardOpenOption.APPEND
    )


  private def adjustSource(source: String) = {
    if (source.startsWith("/"))
      source.drop(1)
    else
      source
  }

  private[migrator] case class SourcesTargetAndSourceDirPath(sourceDirBuildPath: Path, sourcesTarget: Array[Byte])

  private[migrator] case class JvmTargetAndRelativePath(jvm: Target.Jvm, relativePath: String)

  private[migrator] object JvmTargetAndRelativePathFromMonoRepoRoot {
    def apply(targetAndRelativePath: (Target.Jvm, String)) =
      JvmTargetAndRelativePath(targetAndRelativePath._1, targetAndRelativePath._2)
  }

}

private[migrator] case class SourceDirPathAndRelativePackagePath(sourceDirPath: Path, relativePackagePath: String){
  def sourcesTarget: String = {
    if (sourceDirPath.endsWith(relativePackagePath))
      """
        |sources()
        |""".stripMargin
    else
      s"""
         |sources(
         |    visibility = ["//$relativePackagePath:__pkg__"]
         |)
         |""".stripMargin
  }

  def sourceDirBuildPath: Path = sourceDirPath.resolve("BUILD.bazel")
} 
Example 25
Source File: BazelRcManagedDevEnvWriter.scala    From exodus   with MIT License 5 votes vote down vote up
package com.wix.bazel.migrator

import java.nio.file.{Files, Path, StandardOpenOption}


class BazelRcManagedDevEnvWriter(repoRoot: Path, defaultOptions: List[String]) {

  private val bazelRcManagedDevEnvPath = repoRoot.resolve("tools/bazelrc/.bazelrc.managed.dev.env")

  def resetFileWithDefaultOptions(): Unit = {
    deleteIfExists()
    appendLines(defaultOptions)
  }

  def appendLine(line: String): Unit = appendLines(List(line))

  def appendLines(lines: List[String]): Unit = writeToDisk(lines.mkString("", System.lineSeparator(), System.lineSeparator()))

  private def deleteIfExists(): Unit = Files.deleteIfExists(bazelRcManagedDevEnvPath)

  private def writeToDisk(contents: String): Unit = {
    Files.createDirectories(bazelRcManagedDevEnvPath.getParent)
    Files.write(bazelRcManagedDevEnvPath, contents.getBytes, StandardOpenOption.APPEND, StandardOpenOption.CREATE)
  }

}

object BazelRcManagedDevEnvWriter {
  val defaultExodusOptions: List[String] = List(
    "# fetch",
    "fetch --experimental_multi_threaded_digest=true",
    "",
    "# query",
    "query --experimental_multi_threaded_digest=true",
    "",
    "# test",
    "test --test_tmpdir=/tmp",
    "test --test_output=errors",
    "",
    "# build",
    "build:bazel16uplocal --action_env=PLACE_HOLDER=SO_USING_CONFIG_GROUP_WILL_WORK_BW_CMPTBL",
    "build --strategy=Scalac=worker",
    "build --strict_java_deps=off",
    "build --strict_proto_deps=off",
    "build --experimental_multi_threaded_digest=true",
    "",
    "# this flag makes Bazel keep the analysis cache when test flags such as 'test_arg' (and other 'test_xxx' flags) change",
    "build --trim_test_configuration=true",
    "",
    "# the following flags serve tests but associated with the build command in order to avoid mutual analysis cache",
    "# invalidation between test commands and build commands (see https://github.com/bazelbuild/bazel/issues/7450)",
    "build --test_env=BUILD_TOOL=BAZEL",
    "build --test_env=DISPLAY",
    "build --test_env=LC_ALL=en_US.UTF-8",
  )
} 
Example 26
Source File: RowCSVWriter.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2017, Yahoo Holdings Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.
package com.yahoo.maha.report


  def close() {
    csvWriter.close()
  }

}

trait RowCSVWriterProvider {
  def newRowCSVWriter: RowCSVWriter
}

case class FileRowCSVWriterProvider(file: File) extends RowCSVWriterProvider {
  def newRowCSVWriter: RowCSVWriter = {
    if(file.exists() && file.length() > 0) {
      Files.write(file.toPath, Array[Byte](), StandardOpenOption.TRUNCATE_EXISTING) // Clear file
    }
    val fos = new FileOutputStream(file.getAbsoluteFile, true)
    val writerTry = safeCloseable(fos)(new OutputStreamWriter(_, StandardCharsets.UTF_8))
      .flatMap(safeCloseable(_)(new BufferedWriter(_)))
      .flatMap(safeCloseable(_)(new RowCSVWriter(_, RowCSVWriter.DEFAULT_SEPARATOR)))
    require(writerTry.isSuccess, s"Failed to create RowCSVWriter safely : $writerTry")
    writerTry.get
  }
} 
Example 27
Source File: FileUtils.scala    From skeuomorph   with Apache License 2.0 5 votes vote down vote up
package higherkindness.skeuomorph

import java.io.{File, FileOutputStream, InputStream}
import java.nio.file.{Files, Paths, StandardOpenOption}

import cats.effect.{Resource, Sync}

object FileUtils {
  def fileHandle[F[_]: Sync](name: String): Resource[F, File] =
    Resource.make(
      Sync[F].delay(new File(name))
    )(file => Sync[F].delay(file.deleteOnExit()))

  def fileOutputStream[F[_]: Sync](file: File): Resource[F, FileOutputStream] =
    Resource.make(
      Sync[F].delay(new FileOutputStream(file))
    )(fos => Sync[F].delay(fos.close()))

  def fileInputStream[F[_]: Sync](name: String): Resource[F, InputStream] =
    Resource.make(
      Sync[F].delay(Files.newInputStream(Paths.get(name), StandardOpenOption.DELETE_ON_CLOSE))
    )(is => Sync[F].delay(is.close()))
} 
Example 28
Source File: FlinkGenerator.scala    From milan   with Apache License 2.0 5 votes vote down vote up
package com.amazon.milan.compiler.flink.generator

import java.io.{ByteArrayOutputStream, OutputStream}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path, StandardOpenOption}

import com.amazon.milan.application.{Application, ApplicationConfiguration, ApplicationInstance}
import com.amazon.milan.compiler.flink.internal.FlinkTypeEmitter
import com.amazon.milan.lang.StreamGraph
import com.amazon.milan.program.{Cycle, StreamExpression}
import com.amazon.milan.{Id, SemanticVersion}
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory


case class GeneratorConfig(preventGenericTypeInformation: Boolean = false)


object FlinkGenerator {
  val default = new FlinkGenerator(GeneratorConfig())
}


class FlinkGenerator(classLoader: ClassLoader, generatorConfig: GeneratorConfig) {

  private val generatorTypeLifter = new FlinkTypeLifter(new FlinkTypeEmitter, this.generatorConfig.preventGenericTypeInformation)

  private val logger = Logger(LoggerFactory.getLogger(getClass))

  def this(generatorConfig: GeneratorConfig) {
    this(getClass.getClassLoader, generatorConfig)
  }

  def generateScala(graph: StreamGraph,
                    appConfig: ApplicationConfiguration,
                    packageName: String,
                    className: String): String = {
    val application = new Application(Id.newId(), graph, SemanticVersion.ZERO)
    val instance = new ApplicationInstance(Id.newId(), application, appConfig)
    this.generateScala(instance, packageName, className)
  }

  def generateScala(instance: ApplicationInstance,
                    outputPath: Path,
                    packageName: String,
                    className: String): Unit = {
    val scalaCode = this.generateScala(instance, packageName, className)
    val contents = scalaCode.getBytes(StandardCharsets.UTF_8)
    Files.write(outputPath, contents, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)
  }

  def generateScala(instance: ApplicationInstance,
                    packageName: String,
                    className: String): String = {
    val output = new ByteArrayOutputStream()
    this.generateScala(instance, output, packageName, className)

    output.flush()
    StandardCharsets.UTF_8.decode(ByteBuffer.wrap(output.toByteArray)).toString
  }

  def generateScala(instance: ApplicationInstance,
                    output: OutputStream,
                    packageName: String,
                    className: String): Unit = {
    val finalGraph = instance.application.graph.getDereferencedGraph
    finalGraph.typeCheckGraph()

    val outputs = new GeneratorOutputs(this.generatorTypeLifter)
    val context = GeneratorContext.createEmpty(instance.instanceDefinitionId, finalGraph, instance.config, outputs, this.generatorTypeLifter)

    // Ensure that every data stream is generated.
    finalGraph
      .getStreams
      .foreach(stream => this.ensureStreamIsGenerated(context, stream))

    // Close any cycles.
    finalGraph
      .getStreams
      .filter(_.isInstanceOf[Cycle])
      .map(_.asInstanceOf[Cycle])
      .foreach(context.closeCycle)

    // Add all sinks at the end.
    instance.config.dataSinks.foreach(sink => context.generateSink(sink))

    val generated = context.output.generateScala(packageName, className)
    output.write(generated.getBytes(StandardCharsets.UTF_8))
  }

  private def ensureStreamIsGenerated(context: GeneratorContext,
                                      stream: StreamExpression): Unit = {
    context.getOrGenerateDataStream(stream)
  }
} 
Example 29
Source File: FileSinkFunction.scala    From milan   with Apache License 2.0 5 votes vote down vote up
package com.amazon.milan.compiler.flink.runtime

import java.io.OutputStream
import java.nio.file.{Files, Paths, StandardOpenOption}

import com.amazon.milan.dataformats.DataOutputFormat
import org.apache.flink.streaming.api.functions.sink.SinkFunction


class FileSinkFunction[T](path: String, dataFormat: DataOutputFormat[T]) extends SinkFunction[T] {
  @transient private lazy val outputStream = this.openOutputStream()

  override def invoke(value: T, context: SinkFunction.Context[_]): Unit = {
    this.dataFormat.writeValue(value, this.outputStream)
    this.outputStream.flush()
  }

  private def openOutputStream(): OutputStream = {
    Files.newOutputStream(Paths.get(this.path), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)
  }
} 
Example 30
Source File: DbService.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.service

import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths, StandardOpenOption}

import akka.event.LoggingAdapter
import me.snov.sns.model.{Configuration, Subscription, Topic}
import spray.json._

trait DbService {
  def load(): Option[Configuration]

  def save(configuration: Configuration)
}

class MemoryDbService extends DbService {
  override def load(): Option[Configuration] = {
    Some(Configuration(subscriptions= List[Subscription](), topics= List[Topic]()))
  }

  override def save(configuration: Configuration): Unit = {}
}

class FileDbService(dbFilePath: String)(implicit log: LoggingAdapter) extends DbService {

  val subscriptionsName = "subscriptions"
  val topicsName = "topics"
  
  val path = Paths.get(dbFilePath)
  
  def load(): Option[Configuration] = {
    if (Files.exists(path)) {
      log.debug("Loading DB")
      try {
        val configuration = read().parseJson.convertTo[Configuration]
        log.info("Loaded DB")
        return Some(configuration)
      } catch {
        case e: DeserializationException => log.error(e, "Unable to parse configuration")
        case e: RuntimeException => log.error(e,"Unable to load configuration")
      }
    }
    None
  }
  
  def save(configuration: Configuration) = {
    log.debug("Saving DB")
    write(configuration.toJson.prettyPrint)
  }

  private def write(contents: String) = {
    Files.write(path, contents.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)
  }

  private def read(): String = {
    new String(Files.readAllBytes(path))
  }
} 
Example 31
Source File: EventsReportGenerator.scala    From scala-serialization   with MIT License 5 votes vote down vote up
package com.komanov.serialization.converters

import java.io.{ByteArrayOutputStream, File}
import java.nio.file.{Files, StandardOpenOption}
import java.util.zip.GZIPOutputStream

import com.komanov.serialization.converters.IoUtils._


object EventsReportGenerator extends App {

  val flush = false

  val dir = new File(new File(System.getProperty("user.home"), "123"), "events")
  require(!flush || dir.exists() || dir.mkdirs())

  val (raws, gzips, both) = (Seq.newBuilder[(String, Seq[Int])], Seq.newBuilder[(String, Seq[Int])], Seq.newBuilder[(String, Seq[Int])])

  for ((converterName, converter) <- Converters.all if converter ne ScroogeConverter if converter ne ScalaPbConverter) {
    val results = Seq.newBuilder[(Int, Int)]
    for ((name, site, events) <- TestData.all) {
      val bytes = converter.toByteArray(site)
      val gzipLen = getGzipByteLength(bytes)

      val eventsAndBytes = events.map(e => e -> converter.toByteArray(e.event))
      val eventsLen = eventsAndBytes.map(_._2.length).sum
      val eventsGzipLen = eventsAndBytes.map(_._2).map(getGzipByteLength).sum

      results += bytes.length -> gzipLen
      results += eventsLen -> eventsGzipLen

      if (flush) {
        val normalizedConverterName = converterName.toLowerCase().replace(" ", "-")
        Files.write(dir.getParentFile.toPath.resolve(s"site_${name}_$normalizedConverterName.bin"), bytes, StandardOpenOption.CREATE)
        for ((event, eventBytes) <- eventsAndBytes) {
          Files.write(dir.toPath.resolve(s"${name}_${normalizedConverterName}_${event.event.getClass.getSimpleName}.bin"), eventBytes, StandardOpenOption.CREATE)
        }
      }
    }

    raws += converterName -> results.result().map(_._1)
    gzips += converterName -> results.result().map(_._2)
    both += (converterName + " (rw)") -> results.result().map(_._1)
    both += (converterName + " (gz)") -> results.result().map(_._2)
  }

  println("Data Sizes (raw)")
  printHeaders
  printSizes(raws.result())

  println("Data Sizes (gzip)")
  printHeaders
  printSizes(gzips.result())

  println("Data Sizes")
  printHeaders
  printSizes(both.result())

  private def printHeaders: Any = {
    println("Converter," + TestData.sites.flatMap(t => Seq(t._1, "ev " + t._1)).mkString(","))
  }

  private def printSizes(all: Seq[(String, Seq[Int])]): Unit = {
    for ((name, list) <- all) {
      println(name + "," + list.mkString(","))
    }
  }

  private def getGzipByteLength(bytes: Array[Byte]): Int = {
    using(new ByteArrayOutputStream()) { baos =>
      using(new GZIPOutputStream(baos)) { os =>
        os.write(bytes)
      }
      baos.toByteArray.length
    }
  }

} 
Example 32
Source File: BidiEventFilter.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Path, Paths }
import java.nio.file.StandardOpenOption
import java.nio.file.StandardOpenOption._


import scala.concurrent.Future

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, IOResult }
import akka.stream.scaladsl._
import akka.stream.scaladsl.JsonFraming
import akka.util.ByteString

import spray.json._
import com.typesafe.config.{ Config, ConfigFactory }

object BidiEventFilter extends App with EventMarshalling {
  val config = ConfigFactory.load() 
  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsonObject = config.getInt("log-stream-processor.max-json-object")

  if(args.length != 5) {
    System.err.println("Provide args: input-format output-format input-file output-file state")
    System.exit(1)
  }

  val inputFile = FileArg.shellExpanded(args(2))
  val outputFile = FileArg.shellExpanded(args(3))
  val filterState = args(4) match {
    case State(state) => state
    case unknown => 
      System.err.println(s"Unknown state $unknown, exiting.") 
      System.exit(1)
  }


  val inFlow: Flow[ByteString, Event, NotUsed] = 
    if(args(0).toLowerCase == "json") {
      JsonFraming.objectScanner(maxJsonObject)
      .map(_.decodeString("UTF8").parseJson.convertTo[Event])
    } else {
      Framing.delimiter(ByteString("\n"), maxLine)
        .map(_.decodeString("UTF8"))
        .map(LogStreamProcessor.parseLineEx)
        .collect { case Some(event) => event }
    }

  val outFlow: Flow[Event, ByteString, NotUsed] = 
    if(args(1).toLowerCase == "json") {
      Flow[Event].map(event => ByteString(event.toJson.compactPrint))
    } else {
      Flow[Event].map{ event => 
        ByteString(LogStreamProcessor.logLine(event))
      }
    }
  val bidiFlow = BidiFlow.fromFlows(inFlow, outFlow)

    
  val source: Source[ByteString, Future[IOResult]] = 
    FileIO.fromPath(inputFile)

  val sink: Sink[ByteString, Future[IOResult]] = 
    FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND))
  

  val filter: Flow[Event, Event, NotUsed] =   
    Flow[Event].filter(_.state == filterState)

  val flow = bidiFlow.join(filter)


  val runnableGraph: RunnableGraph[Future[IOResult]] = 
    source.via(flow).toMat(sink)(Keep.right)

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  implicit val materializer = ActorMaterializer()

  runnableGraph.run().foreach { result =>
    println(s"Wrote ${result.count} bytes to '$outputFile'.")
    system.terminate()
  }  
} 
Example 33
Source File: GenerateLogFile.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Path, Paths }
import java.nio.file.StandardOpenOption
import java.nio.file.StandardOpenOption._
import java.time.ZonedDateTime
import java.time.format.DateTimeFormatter

import scala.concurrent.Future

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, IOResult }
import akka.stream.scaladsl._
import akka.util.ByteString

object GenerateLogFile extends App {
  val filePath = args(0)
  val numberOfLines = args(1).toInt
  val rnd = new java.util.Random()
  val sink = FileIO.toPath(FileArg.shellExpanded(filePath), Set(CREATE, WRITE, APPEND))
  def line(i: Int) = {
    val host = "my-host"
    val service = "my-service"
    val time = ZonedDateTime.now.format(DateTimeFormatter.ISO_INSTANT)
    val state = if( i % 10 == 0) "warning" 
      else if(i % 101 == 0) "error" 
      else if(i % 1002 == 0) "critical"
      else "ok"
    val description = "Some description of what has happened."
    val tag = "tag"
    val metric = rnd.nextDouble() * 100
    s"$host | $service | $state | $time | $description | $tag | $metric \n"
  }

  val graph = Source.fromIterator{() => 
    Iterator.tabulate(numberOfLines)(line)
  }.map(l=> ByteString(l)).toMat(sink)(Keep.right)

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  implicit val materializer = ActorMaterializer()

  graph.run().foreach { result =>
    println(s"Wrote ${result.count} bytes to '$filePath'.")
    system.terminate()
  }  
} 
Example 34
Source File: NIOLogFileWriter.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.writer.file

import java.nio.ByteBuffer
import java.nio.channels.FileChannel
import java.nio.file.{OpenOption, StandardOpenOption}

import scala.annotation.tailrec

class NIOLogFileWriter(lf: LogFile) extends LogFileWriter {
  private lazy val options: List[OpenOption] = if (lf.append) {
    List(StandardOpenOption.WRITE, StandardOpenOption.APPEND, StandardOpenOption.CREATE)
  } else {
    List(StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE)
  }
  private lazy val channel: FileChannel = FileChannel.open(lf.path, options: _*)

  override def write(output: String): Unit = {
    val bytes = output.getBytes(lf.charset)
    val buffer = ByteBuffer.wrap(bytes)
    writeBuffer(buffer)
    buffer.clear()
  }

  @tailrec
  private def writeBuffer(buffer: ByteBuffer): Unit = if (buffer.hasRemaining) {
    channel.write(buffer)
    writeBuffer(buffer)
  }

  override def flush(): Unit = channel.force(false)

  override def dispose(): Unit = if (channel.isOpen) {
    channel.close()
  }
} 
Example 35
Source File: BarLoader.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package impl

import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.util.Date

import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.server._
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import play.api.libs.ws.ahc.AhcWSComponents
import api.BarService
import api.FooService
import com.softwaremill.macwire._

class BarLoader extends LagomApplicationLoader {

  override def load(context: LagomApplicationContext): LagomApplication =
    new BarApplication(context) {
      override def serviceLocator = NoServiceLocator
    }

  override def loadDevMode(context: LagomApplicationContext): LagomApplication =
    new BarApplication(context) with LagomDevModeComponents
}

abstract class BarApplication(context: LagomApplicationContext) extends LagomApplication(context) with AhcWSComponents {

  override lazy val lagomServer = serverFor[BarService](wire[BarServiceImpl])

  lazy val fooService = serviceClient.implement[FooService]

  Files.write(
    environment.getFile("target/reload.log").toPath,
    s"${new Date()} - reloaded\n".getBytes("utf-8"),
    StandardOpenOption.CREATE,
    StandardOpenOption.APPEND
  )
} 
Example 36
Source File: BazLoader.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package impl

import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.util.Date

import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.server._
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import play.api.libs.ws.ahc.AhcWSComponents
import api.BazService
import com.softwaremill.macwire._

class BazLoader extends LagomApplicationLoader {

  override def load(context: LagomApplicationContext): LagomApplication =
    new BazApplication(context) {
      override def serviceLocator = NoServiceLocator
    }

  override def loadDevMode(context: LagomApplicationContext): LagomApplication =
    new BazApplication(context) with LagomDevModeComponents
}

abstract class BazApplication(context: LagomApplicationContext) extends LagomApplication(context) with AhcWSComponents {

  override lazy val lagomServer = serverFor[BazService](wire[BazServiceImpl])

  Files.write(
    environment.getFile("target/reload.log").toPath,
    s"${new Date()} - reloaded\n".getBytes("utf-8"),
    StandardOpenOption.CREATE,
    StandardOpenOption.APPEND
  )
} 
Example 37
Source File: FooLoader.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package impl

import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.util.Date

import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.server._
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import play.api.libs.ws.ahc.AhcWSComponents
import api.FooService
import com.softwaremill.macwire._

class FooLoader extends LagomApplicationLoader {

  override def load(context: LagomApplicationContext): LagomApplication =
    new FooApplication(context) {
      override def serviceLocator = NoServiceLocator
    }

  override def loadDevMode(context: LagomApplicationContext): LagomApplication =
    new FooApplication(context) with LagomDevModeComponents
}

abstract class FooApplication(context: LagomApplicationContext) extends LagomApplication(context) with AhcWSComponents {

  override lazy val lagomServer = serverFor[FooService](wire[FooServiceImpl])

  Files.write(
    environment.getFile("target/reload.log").toPath,
    s"${new Date()} - reloaded\n".getBytes("utf-8"),
    StandardOpenOption.CREATE,
    StandardOpenOption.APPEND
  )
} 
Example 38
Source File: file.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio

import java.nio.ByteBuffer
import java.nio.file.WatchEvent.Kind
import java.nio.file.{ Path, StandardOpenOption }

import monix.execution.Scheduler

package object file {
  def readAsync(path: Path, chunkSize: Int)(implicit s: Scheduler): AsyncFileChannelObservable = {
    require(chunkSize > 1)

    val channel = TaskFileChannel(path, StandardOpenOption.READ)
    new AsyncFileChannelObservable(channel, chunkSize)
  }

  def writeAsync(
    path: Path,
    flags: Seq[StandardOpenOption] = Seq.empty)(implicit s: Scheduler): AsyncFileChannelConsumer = {

    appendAsync(path, 0, flags)
  }

  def appendAsync(
    path: Path,
    startPosition: Long,
    flags: Seq[StandardOpenOption] = Seq.empty)(implicit s: Scheduler): AsyncFileChannelConsumer = {

    val flagsWithWriteOptions = flags :+ StandardOpenOption.WRITE :+ StandardOpenOption.CREATE
    val channel = TaskFileChannel(path, flagsWithWriteOptions: _*)
    new AsyncFileChannelConsumer(channel, startPosition)
  }

  def watchAsync(
    path: Path,
    events: Seq[Kind[_]] = Seq.empty)(implicit s: Scheduler): WatchServiceObservable = {
    val watcher = TaskWatchService(path, events: _*)
    new AsyncWatchServiceObservable(watcher)
  }

  private[file] def asyncChannelWrapper(taskFileChannel: TaskFileChannel) = new AsyncChannel {
    override val closeOnComplete: Boolean = true

    override def read(dst: ByteBuffer, position: Long) = taskFileChannel.read(dst, position)
    override def write(b: ByteBuffer, position: Long) = taskFileChannel.write(b, position)
    override def close() = taskFileChannel.close()
  }
} 
Example 39
Source File: IntegrationTest.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.file

import java.nio.file.{ Files, Paths, StandardOpenOption }
import java.util

import minitest.SimpleTestSuite
import monix.execution.Callback
import monix.nio.file

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }
import scala.util.control.NonFatal

object IntegrationTest extends SimpleTestSuite {
  test("same file generated") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val from = Paths.get(this.getClass.getResource("/testFiles/file.txt").toURI)
    val to = Paths.get("src/test/resources/out.txt")
    val consumer = file.writeAsync(to)
    val p = Promise[Boolean]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(true)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .consumeWith(consumer)
      .runAsync(callback)

    val result = Await.result(p.future, 3.second)
    assert(result)

    val f1 = Files.readAllBytes(from)
    val f2 = Files.readAllBytes(to)
    Files.delete(to) // clean
    assert(util.Arrays.equals(f1, f2))
  }

  test("add data to existing file") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val from = Paths.get(this.getClass.getResource("/testFiles/file.txt").toURI)
    val to = Paths.get("src/test/resources/existing.txt")
    val strSeq = Seq("A", "\u0024", "\u00A2", "\u20AC", new String(Array(0xF0, 0x90, 0x8D, 0x88).map(_.toByte)), "B")

    try {
      Files.write(to, strSeq.flatMap(_.getBytes).toArray, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.APPEND)
    } catch {
      case NonFatal(e) => fail(s"got error: $e")
    }
    val consumer = file.appendAsync(to, Files.size(to))
    val p = Promise[Boolean]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(true)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .consumeWith(consumer)
      .runAsync(callback)

    val result = Await.result(p.future, 3.second)
    assert(result)

    val f1 = Files.readAllBytes(from)
    val f2 = Files.readAllBytes(to)
    Files.delete(to) // clean

    val all1: Seq[Byte] = strSeq.flatMap(_.getBytes) ++ f1.toSeq
    assertEquals(all1, f2.toSeq)
  }
}