java.io.RandomAccessFile Scala Examples

The following examples show how to use java.io.RandomAccessFile. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: PortLock.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import java.io.RandomAccessFile
import java.nio.channels.{
  ClosedChannelException,
  FileChannel,
  FileLock,
  OverlappingFileLockException
}
import java.nio.file.{Files, Path, Paths}

import com.daml.ports.Port

private[postgresql] object PortLock {

  // We can't use `sys.props("java.io.tmpdir")` because Bazel changes this for each test run.
  // For this to be useful, it needs to be shared across concurrent runs.
  private val portLockDirectory: Path = {
    val tempDirectory =
      if (sys.props("os.name").startsWith("Windows")) {
        Paths.get(sys.props("user.home"), "AppData", "Local", "Temp")
      } else {
        Paths.get("/tmp")
      }
    tempDirectory.resolve(Paths.get("daml", "build", "postgresql-testing", "ports"))
  }

  def lock(port: Port): Either[FailedToLock, Locked] = {
    Files.createDirectories(portLockDirectory)
    val portLockFile = portLockDirectory.resolve(port.toString)
    val file = new RandomAccessFile(portLockFile.toFile, "rw")
    val channel = file.getChannel
    try {
      val lock = channel.tryLock()
      val locked = new Locked(port, lock, channel, file)
      if (lock != null) {
        Right(locked)
      } else {
        locked.unlock()
        Left(FailedToLock(port))
      }
    } catch {
      case _: OverlappingFileLockException =>
        channel.close()
        file.close()
        Left(FailedToLock(port))
    }
  }

  final class Locked(val port: Port, lock: FileLock, channel: FileChannel, file: RandomAccessFile) {
    def unlock(): Unit = {
      try {
        lock.release()
      } catch {
        // ignore
        case _: ClosedChannelException =>
      }
      channel.close()
      file.close()
    }
  }

  case class FailedToLock(port: Port) extends RuntimeException(s"Failed to lock port $port.")

} 
Example 2
Source File: DiskStore.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{FileOutputStream, IOException, RandomAccessFile}
import java.nio.ByteBuffer
import java.nio.channels.FileChannel.MapMode

import com.google.common.io.Closeables

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
import org.apache.spark.util.io.ChunkedByteBuffer


  def put(blockId: BlockId)(writeFunc: FileOutputStream => Unit): Unit = {
    if (contains(blockId)) {
      throw new IllegalStateException(s"Block $blockId is already present in the disk store")
    }
    logDebug(s"Attempting to put block $blockId")
    val startTime = System.currentTimeMillis
    val file = diskManager.getFile(blockId)
    val fileOutputStream = new FileOutputStream(file)
    var threwException: Boolean = true
    try {
      writeFunc(fileOutputStream)
      threwException = false
    } finally {
      try {
        Closeables.close(fileOutputStream, threwException)
      } finally {
         if (threwException) {
          remove(blockId)
        }
      }
    }
    val finishTime = System.currentTimeMillis
    logDebug("Block %s stored as %s file on disk in %d ms".format(
      file.getName,
      Utils.bytesToString(file.length()),
      finishTime - startTime))
  }

  def putBytes(blockId: BlockId, bytes: ChunkedByteBuffer): Unit = {
    put(blockId) { fileOutputStream =>
      val channel = fileOutputStream.getChannel
      Utils.tryWithSafeFinally {
        bytes.writeFully(channel)
      } {
        channel.close()
      }
    }
  }

  def getBytes(blockId: BlockId): ChunkedByteBuffer = {
    val file = diskManager.getFile(blockId.name)
    val channel = new RandomAccessFile(file, "r").getChannel
    Utils.tryWithSafeFinally {
      // For small files, directly read rather than memory map
      if (file.length < minMemoryMapBytes) {
        val buf = ByteBuffer.allocate(file.length.toInt)
        channel.position(0)
        while (buf.remaining() != 0) {
          if (channel.read(buf) == -1) {
            throw new IOException("Reached EOF before filling buffer\n" +
              s"offset=0\nfile=${file.getAbsolutePath}\nbuf.remaining=${buf.remaining}")
          }
        }
        buf.flip()
        new ChunkedByteBuffer(buf)
      } else {
        new ChunkedByteBuffer(channel.map(MapMode.READ_ONLY, 0, file.length))
      }
    } {
      channel.close()
    }
  }

  def remove(blockId: BlockId): Boolean = {
    val file = diskManager.getFile(blockId.name)
    if (file.exists()) {
      val ret = file.delete()
      if (!ret) {
        logWarning(s"Error deleting ${file.getPath()}")
      }
      ret
    } else {
      false
    }
  }

  def contains(blockId: BlockId): Boolean = {
    val file = diskManager.getFile(blockId.name)
    file.exists()
  }
} 
Example 3
Source File: RokkuFixtures.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.testkit

import java.io.{File, RandomAccessFile}

import com.amazonaws.services.s3.AmazonS3
import com.ing.wbaa.testkit.awssdk.S3SdkHelpers
import org.scalatest.Assertion

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Random, Try}

trait RokkuFixtures extends S3SdkHelpers {

  
  def withHomeBucket(s3Client: AmazonS3, objects: Seq[String])(testCode: String => Future[Assertion])(implicit exCtx: ExecutionContext): Future[Assertion] = {
    val testBucket = "home"
    Try(s3Client.createBucket(testBucket))
    objects.foreach(obj => s3Client.putObject(testBucket, obj, ""))
    testCode(testBucket).andThen {
      case _ =>
      cleanBucket(s3Client, testBucket)
    }
  }

  private def cleanBucket(s3Client: AmazonS3, bucketName: String) = {
    import scala.collection.JavaConverters._
    s3Client.listObjectsV2(bucketName).getObjectSummaries.asScala.toList.map(_.getKey).foreach { key =>
      s3Client.deleteObject(bucketName, key)
    }
  }
} 
Example 4
Source File: FileOps.scala    From sbt-aws-lambda   with Apache License 2.0 5 votes vote down vote up
package com.gilt.aws.lambda

import java.io.{ File, RandomAccessFile }
import java.nio.ByteBuffer

object FileOps {

  def fileToBuffer(file: File): ByteBuffer = {
    val buffer = ByteBuffer.allocate(file.length().toInt)
    val aFile = new RandomAccessFile(file, "r")
    val inChannel = aFile.getChannel()
    while (inChannel.read(buffer) > 0) {}
    inChannel.close()
    buffer.rewind()
    buffer
  }

} 
Example 5
Source File: DiskStore.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{FileOutputStream, IOException, RandomAccessFile}
import java.nio.ByteBuffer
import java.nio.channels.FileChannel.MapMode

import com.google.common.io.Closeables

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
import org.apache.spark.util.io.ChunkedByteBuffer


  def put(blockId: BlockId)(writeFunc: FileOutputStream => Unit): Unit = {
    if (contains(blockId)) {
      throw new IllegalStateException(s"Block $blockId is already present in the disk store")
    }
    logDebug(s"Attempting to put block $blockId")
    val startTime = System.currentTimeMillis
    val file = diskManager.getFile(blockId)
    val fileOutputStream = new FileOutputStream(file)
    var threwException: Boolean = true
    try {
      writeFunc(fileOutputStream)
      threwException = false
    } finally {
      try {
        Closeables.close(fileOutputStream, threwException)
      } finally {
         if (threwException) {
          remove(blockId)
        }
      }
    }
    val finishTime = System.currentTimeMillis
    logDebug("Block %s stored as %s file on disk in %d ms".format(
      file.getName,
      Utils.bytesToString(file.length()),
      finishTime - startTime))
  }

  def putBytes(blockId: BlockId, bytes: ChunkedByteBuffer): Unit = {
    put(blockId) { fileOutputStream =>
      val channel = fileOutputStream.getChannel
      Utils.tryWithSafeFinally {
        bytes.writeFully(channel)
      } {
        channel.close()
      }
    }
  }

  def getBytes(blockId: BlockId): ChunkedByteBuffer = {
    val file = diskManager.getFile(blockId.name)
    val channel = new RandomAccessFile(file, "r").getChannel
    Utils.tryWithSafeFinally {
      // For small files, directly read rather than memory map
      if (file.length < minMemoryMapBytes) {
        val buf = ByteBuffer.allocate(file.length.toInt)
        channel.position(0)
        while (buf.remaining() != 0) {
          if (channel.read(buf) == -1) {
            throw new IOException("Reached EOF before filling buffer\n" +
              s"offset=0\nfile=${file.getAbsolutePath}\nbuf.remaining=${buf.remaining}")
          }
        }
        buf.flip()
        new ChunkedByteBuffer(buf)
      } else {
        new ChunkedByteBuffer(channel.map(MapMode.READ_ONLY, 0, file.length))
      }
    } {
      channel.close()
    }
  }

  def remove(blockId: BlockId): Boolean = {
    val file = diskManager.getFile(blockId.name)
    if (file.exists()) {
      val ret = file.delete()
      if (!ret) {
        logWarning(s"Error deleting ${file.getPath()}")
      }
      ret
    } else {
      false
    }
  }

  def contains(blockId: BlockId): Boolean = {
    val file = diskManager.getFile(blockId.name)
    file.exists()
  }
} 
Example 6
Source File: LogEventBroadcaster.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter13

import io.netty.bootstrap.Bootstrap
import io.netty.channel.{ ChannelOption, EventLoopGroup }
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioDatagramChannel
import java.io.File
import java.io.RandomAccessFile
import java.net.InetSocketAddress
import java.lang.{ Boolean ⇒ JBoolean }
import java.util.Objects

import scala.util.control.Breaks._


object LogEventBroadcaster {

  @throws[Exception]
  def main(args: Array[String]): Unit = {
    if (args.length != 2)
      throw new IllegalArgumentException

    //创建并启动一个新的 LogEventBroadcaster 的实例
    val broadcaster =
      new LogEventBroadcaster(new InetSocketAddress("255.255.255.255", args(0).toInt), new File(args(1)))

    try {
      broadcaster.run()
    } finally {
      broadcaster.stop()
    }
  }
}

class LogEventBroadcaster(address: InetSocketAddress, file: File) {
  val group: EventLoopGroup = new NioEventLoopGroup
  val bootstrap = new Bootstrap

  //引导该 NioDatagramChannel(无连接的)
  bootstrap
    .group(group)
    .channel(classOf[NioDatagramChannel])
    //设置 SO_BROADCAST 套接字选项
    .option[JBoolean](ChannelOption.SO_BROADCAST, true)
    .handler(new LogEventEncoder(address))

  @throws[Exception]
  def run(): Unit = { //绑定 Channel
    val ch = bootstrap.bind(0).sync.channel
    var pointer: Long = 0
    //启动主处理循环

    breakable {
      while (true) {
        val len = file.length
        if (len < pointer) { // file was reset
          //如果有必要,将文件指针设置到该文件的最后一个字节
          pointer = len
        } else if (len > pointer) { // Content was added
          val raf = new RandomAccessFile(file, "r")
          //设置当前的文件指针,以确保没有任何的旧日志被发送
          raf.seek(pointer)
          Iterator.continually(raf.readLine())
            .takeWhile(Objects.nonNull)
            .foreach { line ⇒
              ch.writeAndFlush(LogEvent(file.getAbsolutePath, line))
            }
          //存储其在文件中的当前位置
          pointer = raf.getFilePointer
          raf.close()
        }
        try {
          //休眠 1 秒,如果被中断,则退出循环;否则重新处理它
          Thread.sleep(1000)
        } catch {
          case e: InterruptedException ⇒
            Thread.interrupted
            break
        }
      }
    }
  }

  def stop(): Unit = {
    group.shutdownGracefully()
  }
} 
Example 7
Source File: ScatterGatherChannelSuite.scala    From scalaz-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio

import java.io.{ File, RandomAccessFile }

import zio.nio.channels.{ GatheringByteChannel, ScatteringByteChannel }
import zio.{ Chunk, DefaultRuntime, IO }
import testz.{ Harness, assert }

import scala.io.Source

object ScatterGatherChannelSuite extends DefaultRuntime {

  def tests[T](harness: Harness[T]): T = {
    import harness._

    section(
      test("scattering read") { () =>
        val raf         = new RandomAccessFile("src/test/resources/scattering_read_test.txt", "r")
        val fileChannel = raf.getChannel()

        val readLine: Buffer[Byte] => IO[Exception, String] = buffer =>
          for {
            _     <- buffer.flip
            array <- buffer.array
            text  = array.takeWhile(_ != 10).map(_.toChar).mkString.trim
          } yield text

        val testProgram = for {
          buffs   <- IO.collectAll(Seq(Buffer.byte(5), Buffer.byte(5)))
          channel = new ScatteringByteChannel(fileChannel)
          _       <- channel.readBuffer(buffs)
          list    <- IO.collectAll(buffs.map(readLine))
          _       <- channel.close
        } yield list

        val t1 :: t2 :: Nil = unsafeRun(testProgram)

        assert(t1 == "Hello")
        assert(t2 == "World")
      },
      test("gathering write") { () =>
        val file        = new File("src/test/resources/gathering_write_test.txt")
        val raf         = new RandomAccessFile(file, "rw")
        val fileChannel = raf.getChannel()

        val testProgram = for {
          buffs <- IO.collectAll(
                    Seq(
                      Buffer.byte(Chunk.fromArray("Hello".getBytes)),
                      Buffer.byte(Chunk.fromArray("World".getBytes))
                    )
                  )
          channel = new GatheringByteChannel(fileChannel)
          _       <- channel.writeBuffer(buffs)
          _       <- channel.close
        } yield ()

        unsafeRun(testProgram)

        val result = Source.fromFile(file).getLines.toSeq
        file.delete()

        assert(result.size == 1)
        assert(result.head == "HelloWorld")
      }
    )
  }

} 
Example 8
Source File: DiskStore.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{FileOutputStream, IOException, RandomAccessFile}
import java.nio.ByteBuffer
import java.nio.channels.FileChannel.MapMode

import com.google.common.io.Closeables

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
import org.apache.spark.util.io.ChunkedByteBuffer


  def put(blockId: BlockId)(writeFunc: FileOutputStream => Unit): Unit = {
    if (contains(blockId)) {
      throw new IllegalStateException(s"Block $blockId is already present in the disk store")
    }
    logDebug(s"Attempting to put block $blockId")
    val startTime = System.currentTimeMillis
    val file = diskManager.getFile(blockId)
    val fileOutputStream = new FileOutputStream(file)
    var threwException: Boolean = true
    try {
      writeFunc(fileOutputStream)
      threwException = false
    } finally {
      try {
        Closeables.close(fileOutputStream, threwException)
      } finally {
         if (threwException) {
          remove(blockId)
        }
      }
    }
    val finishTime = System.currentTimeMillis
    logDebug("Block %s stored as %s file on disk in %d ms".format(
      file.getName,
      Utils.bytesToString(file.length()),
      finishTime - startTime))
  }

  def putBytes(blockId: BlockId, bytes: ChunkedByteBuffer): Unit = {
    put(blockId) { fileOutputStream =>
      val channel = fileOutputStream.getChannel
      Utils.tryWithSafeFinally {
        bytes.writeFully(channel)
      } {
        channel.close()
      }
    }
  }

  def getBytes(blockId: BlockId): ChunkedByteBuffer = {
    val file = diskManager.getFile(blockId.name)
    val channel = new RandomAccessFile(file, "r").getChannel
    Utils.tryWithSafeFinally {
      // For small files, directly read rather than memory map
      if (file.length < minMemoryMapBytes) {
        val buf = ByteBuffer.allocate(file.length.toInt)
        channel.position(0)
        while (buf.remaining() != 0) {
          if (channel.read(buf) == -1) {
            throw new IOException("Reached EOF before filling buffer\n" +
              s"offset=0\nfile=${file.getAbsolutePath}\nbuf.remaining=${buf.remaining}")
          }
        }
        buf.flip()
        new ChunkedByteBuffer(buf)
      } else {
        new ChunkedByteBuffer(channel.map(MapMode.READ_ONLY, 0, file.length))
      }
    } {
      channel.close()
    }
  }

  def remove(blockId: BlockId): Boolean = {
    val file = diskManager.getFile(blockId.name)
    if (file.exists()) {
      val ret = file.delete()
      if (!ret) {
        logWarning(s"Error deleting ${file.getPath()}")
      }
      ret
    } else {
      false
    }
  }

  def contains(blockId: BlockId): Boolean = {
    val file = diskManager.getFile(blockId.name)
    file.exists()
  }
} 
Example 9
Source File: DevMode.scala    From korolev   with Apache License 2.0 5 votes vote down vote up
package korolev.internal

import java.io.{File, RandomAccessFile}
import java.nio.ByteBuffer

import levsha.impl.DiffRenderContext

private[korolev] object DevMode {

  private val DevModeKey = "korolev.dev"
  private val DevModeDirectoryKey = "korolev.dev.directory"
  private val DevModeDefaultDirectory = "target/korolev/"

  class ForRenderContext(identifier: String) {

    lazy val file = new File(DevMode.renderStateDirectory, identifier)

    lazy val saved: Boolean =
      DevMode.isActive && file.exists

    def isActive = DevMode.isActive

    def loadRenderContext() =
      if (saved) {
        val nioFile = new RandomAccessFile(file, "r")
        val channel = nioFile.getChannel
        try {
          val buffer = ByteBuffer.allocate(channel.size.toInt)
          channel.read(buffer)
          buffer.position(0)
          Some(buffer)
        } finally {
          nioFile.close()
          channel.close()
        }
      } else {
        None
      }

    def saveRenderContext(renderContext: DiffRenderContext[_]): Unit = {
      val nioFile = new RandomAccessFile(file, "rw")
      val channel = nioFile.getChannel
      try {
        val buffer = renderContext.save()
        channel.write(buffer)
        ()
      } finally {
        nioFile.close()
        channel.close()
      }
    }
  }

  val isActive = sys.env.get(DevModeKey)
    .orElse(sys.props.get(DevModeKey))
    .fold(false)(_ == "true")

  lazy val workDirectory = {
    val directoryPath = sys.env.get(DevModeDirectoryKey)
      .orElse(sys.props.get(DevModeDirectoryKey))
      .getOrElse(DevModeDefaultDirectory)

    val file = new File(directoryPath)
    if (!file.exists()) {
      file.mkdirs()
    } else if (!file.isDirectory) {
      throw new ExceptionInInitializerError(s"$directoryPath should be directory")
    }
    file
  }

  lazy val sessionsDirectory = {
    val file = new File(workDirectory, "sessions")
    file.mkdir()
    file
  }

  lazy val renderStateDirectory = {
    val file = new File(workDirectory, "render-contexts")
    file.mkdir()
    file
  }
} 
Example 10
Source File: ScatterGatherChannelSpec.scala    From zio-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio.core.channels

import java.io.{ File, RandomAccessFile }

import zio.nio.core.{ BaseSpec, Buffer }
import zio.test.Assertion._
import zio.test._
import zio.{ Chunk, IO, ZIO }

import scala.io.Source

object ScatterGatherChannelSpec extends BaseSpec {

  override def spec = suite("ScatterGatherChannelSpec")(
    testM("scattering read") {
      for {
        raf         <- ZIO.effectTotal(new RandomAccessFile("nio-core/src/test/resources/scattering_read_test.txt", "r"))
        fileChannel = raf.getChannel
        readLine = (buffer: Buffer[Byte]) =>
          for {
            _     <- buffer.flip
            array <- buffer.array
            text  = array.takeWhile(_ != 10).map(_.toChar).mkString.trim
          } yield text
        buffs   <- IO.collectAll(Seq(Buffer.byte(5), Buffer.byte(5)))
        channel = new FileChannel(fileChannel)
        _       <- channel.readBuffer(buffs)
        list    <- IO.collectAll(buffs.map(readLine))
        _       <- channel.close
      } yield assert(list)(equalTo("Hello" :: "World" :: Nil))
    },
    testM("gathering write") {
      for {
        file        <- ZIO.effect(new File("nio-core/src/test/resources/gathering_write_test.txt"))
        raf         = new RandomAccessFile(file, "rw")
        fileChannel = raf.getChannel

        buffs <- IO.collectAll(
                  Seq(
                    Buffer.byte(Chunk.fromArray("Hello".getBytes)),
                    Buffer.byte(Chunk.fromArray("World".getBytes))
                  )
                )
        channel = new FileChannel(fileChannel)
        _       <- channel.writeBuffer(buffs)
        _       <- channel.close
        result  = Source.fromFile(file).getLines().toSeq
        _       = file.delete()
      } yield assert(result)(equalTo(Seq("HelloWorld")))
    }
  )
} 
Example 11
Source File: ScatterGatherChannelSpec.scala    From zio-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio.channels

import java.io.{ File, RandomAccessFile }

import zio.nio.core.Buffer
import zio.nio.BaseSpec
import zio.test.Assertion._
import zio.test._
import zio.{ Chunk, IO, ZIO }

import scala.io.Source

object ScatterGatherChannelSpec extends BaseSpec {

  override def spec = suite("ScatterGatherChannelSpec")(
    testM("scattering read") {
      for {
        raf         <- ZIO.effectTotal(new RandomAccessFile("nio/src/test/resources/scattering_read_test.txt", "r"))
        fileChannel = raf.getChannel
        readLine = (buffer: Buffer[Byte]) =>
          for {
            _     <- buffer.flip
            array <- buffer.array
            text  = array.takeWhile(_ != 10).map(_.toChar).mkString.trim
          } yield text
        buffs <- IO.collectAll(Seq(Buffer.byte(5), Buffer.byte(5)))
        list <- FileChannel(fileChannel).use { channel =>
                 for {
                   _    <- channel.readBuffer(buffs)
                   list <- IO.collectAll(buffs.map(readLine))
                 } yield list
               }
      } yield assert(list)(equalTo("Hello" :: "World" :: Nil))
    },
    testM("gathering write") {
      for {
        file        <- ZIO.effect(new File("nio/src/test/resources/gathering_write_test.txt"))
        raf         = new RandomAccessFile(file, "rw")
        fileChannel = raf.getChannel

        buffs <- IO.collectAll(
                  Seq(
                    Buffer.byte(Chunk.fromArray("Hello".getBytes)),
                    Buffer.byte(Chunk.fromArray("World".getBytes))
                  )
                )
        _      <- FileChannel(fileChannel).use(_.writeBuffer(buffs).unit)
        result = Source.fromFile(file).getLines().toSeq
        _      = file.delete()
      } yield assert(result)(equalTo(Seq("HelloWorld")))
    }
  )
}