org.iq80.leveldb.DB Scala Examples

The following examples show how to use org.iq80.leveldb.DB. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: CommonAssetsApi.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.api.common

import com.wavesplatform.account.Address
import com.wavesplatform.api.common.CommonAssetsApi.AssetInfo
import com.wavesplatform.crypto
import com.wavesplatform.database.{AddressId, KeyTags}
import com.wavesplatform.state.{AssetDescription, Blockchain, Diff, Portfolio}
import com.wavesplatform.transaction.Asset.IssuedAsset
import com.wavesplatform.transaction.assets.IssueTransaction
import monix.reactive.Observable
import org.iq80.leveldb.DB

trait CommonAssetsApi {
  def description(assetId: IssuedAsset): Option[AssetDescription]

  def fullInfo(assetId: IssuedAsset): Option[AssetInfo]

  def wavesDistribution(height: Int, after: Option[Address]): Observable[(Address, Long)]

  def assetDistribution(asset: IssuedAsset, height: Int, after: Option[Address]): Observable[(Address, Long)]
}

object CommonAssetsApi {
  final case class AssetInfo(description: AssetDescription, issueTransaction: Option[IssueTransaction], sponsorBalance: Option[Long])

  def apply(diff: => Diff, db: DB, blockchain: Blockchain): CommonAssetsApi = new CommonAssetsApi {
    def description(assetId: IssuedAsset): Option[AssetDescription] = {
      blockchain.assetDescription(assetId)
    }

    def fullInfo(assetId: IssuedAsset): Option[AssetInfo] =
      for {
        assetInfo <- blockchain.assetDescription(assetId)
        sponsorBalance = if (assetInfo.sponsorship != 0) Some(blockchain.wavesPortfolio(assetInfo.issuer.toAddress).spendableBalance) else None
      } yield AssetInfo(assetInfo, blockchain.transactionInfo(assetId.id).collect { case (_, it: IssueTransaction, true) => it }, sponsorBalance)

    override def wavesDistribution(height: Int, after: Option[Address]): Observable[(Address, Long)] =
      balanceDistribution(
        db,
        height,
        after,
        if (height == blockchain.height) diff.portfolios else Map.empty[Address, Portfolio],
        KeyTags.WavesBalance.prefixBytes,
        bs => AddressId.fromByteArray(bs.slice(2, bs.length - 4)),
        _.balance
      )

    override def assetDistribution(asset: IssuedAsset, height: Int, after: Option[Address]): Observable[(Address, Long)] =
      balanceDistribution(
        db,
        height,
        after,
        if (height == blockchain.height) diff.portfolios else Map.empty[Address, Portfolio],
        KeyTags.AssetBalance.prefixBytes ++ asset.id.arr,
        bs => AddressId.fromByteArray(bs.slice(2 + crypto.DigestLength, bs.length - 4)),
        _.assets.getOrElse(asset, 0L)
      )
  }
} 
Example 2
Source File: LeveldbNumericIdentifierStore.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.leveldb

import java.nio.ByteBuffer

import org.iq80.leveldb.{ DB, DBIterator }

private class LeveldbNumericIdentifierStore(leveldb: DB, classifier: Int) {
  private var idMap: Map[String, Int] =
    Map.empty

  private val idKeyEnd: Int =
    Int.MaxValue

  private val idKeyEndBytes: Array[Byte] =
    idKeyBytes(idKeyEnd)

  leveldb.put(idKeyEndBytes, Array.empty[Byte])

  def numericId(id: String): Int = idMap.get(id) match {
    case None    => writeIdMapping(id, idMap.size + 1)
    case Some(v) => v
  }

  def findId(numericId: Int): Option[String] =
    idMap.find { case (_, nid) => nid == numericId }.map(_._1)

  def readIdMap(iter: DBIterator): Unit = {
    iter.seek(idKeyBytes(0))
    idMap = readIdMap(Map.empty, iter)
  }

  private def readIdMap(idMap: Map[String, Int], iter: DBIterator): Map[String, Int] = {
    if (!iter.hasNext) idMap else {
      val nextEntry = iter.next()
      val nextKey = idKey(nextEntry.getKey)
      if (nextKey == idKeyEnd) idMap else {
        val nextVal = new String(nextEntry.getValue, "UTF-8")
        readIdMap(idMap + (nextVal -> nextKey), iter)
      }
    }
  }

  private def writeIdMapping(id: String, nid: Int): Int = {
    idMap = idMap + (id -> nid)
    leveldb.put(idKeyBytes(nid), id.getBytes("UTF-8"))
    nid
  }

  private def idKeyBytes(nid: Int): Array[Byte] = {
    val bb = ByteBuffer.allocate(8)
    bb.putInt(classifier)
    bb.putInt(nid)
    bb.array
  }

  private def idKey(a: Array[Byte]): Int = {
    val bb = ByteBuffer.wrap(a)
    bb.getInt
    bb.getInt
  }
} 
Example 3
Source File: LeveldbDeletionMetadataStore.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.leveldb

import java.nio.ByteBuffer

import com.rbmhtechnology.eventuate.log.DeletionMetadata
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog.WithBatch
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog.longBytes
import org.iq80.leveldb.DB
import org.iq80.leveldb.WriteOptions

private class LeveldbDeletionMetadataStore(val leveldb: DB, val leveldbWriteOptions: WriteOptions, classifier: Int) extends WithBatch {
  private val DeletedToSequenceNrKey: Int = 1
  private val RemoteLogIdsKey: Int = 2

  private val StringSetSeparatorChar = '\u0000'

  def writeDeletionMetadata(info: DeletionMetadata): Unit = withBatch { batch =>
    batch.put(idKeyBytes(DeletedToSequenceNrKey), longBytes(info.toSequenceNr))
    batch.put(idKeyBytes(RemoteLogIdsKey), stringSetBytes(info.remoteLogIds))
  }

  def readDeletionMetadata(): DeletionMetadata = {
    val toSequenceNr = longFromBytes(leveldb.get(idKeyBytes(DeletedToSequenceNrKey)))
    val remoteLogIds = stringSetFromBytes(leveldb.get(idKeyBytes(RemoteLogIdsKey)))
    DeletionMetadata(toSequenceNr, remoteLogIds)
  }

  private def idKeyBytes(key: Int): Array[Byte] = {
    val bb = ByteBuffer.allocate(8)
    bb.putInt(classifier)
    bb.putInt(key)
    bb.array
  }

  private def longFromBytes(longBytes: Array[Byte]): Long =
    if (longBytes == null) 0 else LeveldbEventLog.longFromBytes(longBytes)

  private def stringSetBytes(set: Set[String]): Array[Byte] =
    set.mkString(StringSetSeparatorChar.toString).getBytes("UTF-8")

  private def stringSetFromBytes(setBytes: Array[Byte]): Set[String] =
    if (setBytes == null || setBytes.length == 0)
      Set.empty
    else
      new String(setBytes, "UTF-8").split(StringSetSeparatorChar).toSet
} 
Example 4
Source File: LeveldbReplicationProgressStore.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.leveldb

import java.nio.ByteBuffer

import org.iq80.leveldb.{ DB, DBIterator, WriteBatch }

private class LeveldbReplicationProgressStore(leveldb: DB, classifier: Int, numericId: String => Int, findId: Int => Option[String]) {
  private val rpKeyEnd: Int =
    Int.MaxValue

  private val rpKeyEndBytes: Array[Byte] =
    rpKeyBytes(rpKeyEnd)

  leveldb.put(rpKeyEndBytes, Array.empty[Byte])

  def writeReplicationProgress(logId: String, logSnr: Long, batch: WriteBatch): Unit = {
    val nid = numericId(logId)
    batch.put(rpKeyBytes(nid), LeveldbEventLog.longBytes(logSnr))
  }

  def readReplicationProgress(logId: String): Long = {
    val nid = numericId(logId)
    val progress = leveldb.get(rpKeyBytes(nid))
    if (progress == null) 0L else LeveldbEventLog.longFromBytes(progress)
  }

  def readReplicationProgresses(iter: DBIterator): Map[String, Long] = {
    iter.seek(rpKeyBytes(0))
    readReplicationProgresses(Map.empty, iter).foldLeft(Map.empty[String, Long]) {
      case (acc, (nid, progress)) => findId(nid) match {
        case Some(id) => acc + (id -> progress)
        case None     => acc
      }
    }
  }

  private def readReplicationProgresses(rpMap: Map[Int, Long], iter: DBIterator): Map[Int, Long] = {
    if (!iter.hasNext) rpMap else {
      val nextEntry = iter.next()
      val nextKey = rpKey(nextEntry.getKey)
      if (nextKey == rpKeyEnd) rpMap else {
        val nextVal = LeveldbEventLog.longFromBytes(nextEntry.getValue)
        readReplicationProgresses(rpMap + (nextKey -> nextVal), iter)
      }
    }
  }

  private def rpKeyBytes(nid: Int): Array[Byte] = {
    val bb = ByteBuffer.allocate(8)
    bb.putInt(classifier)
    bb.putInt(nid)
    bb.array
  }

  private def rpKey(a: Array[Byte]): Int = {
    val bb = ByteBuffer.wrap(a)
    bb.getInt
    bb.getInt
  }
} 
Example 5
Source File: LeveldbDeletionActor.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.leveldb

import java.io.Closeable

import akka.actor.Actor
import akka.actor.PoisonPill
import akka.actor.Props
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog._

import org.iq80.leveldb.DB
import org.iq80.leveldb.ReadOptions
import org.iq80.leveldb.WriteOptions

import scala.annotation.tailrec
import scala.concurrent.Promise

private object LeveldbDeletionActor {
  case object DeleteBatch

  def props(leveldb: DB, leveldbReadOptions: ReadOptions, leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]): Props =
    Props(new LeveldbDeletionActor(leveldb, leveldbReadOptions, leveldbWriteOptions, batchSize, toSequenceNr, promise))
}

private class LeveldbDeletionActor(
  val leveldb: DB,
  val leveldbReadOptions: ReadOptions,
  val leveldbWriteOptions: WriteOptions,
  batchSize: Int,
  toSequenceNr: Long,
  promise: Promise[Unit])
  extends Actor with WithBatch {

  import LeveldbDeletionActor._

  val eventKeyIterator: CloseableIterator[EventKey] = newEventKeyIterator

  override def preStart() = self ! DeleteBatch

  override def postStop() = eventKeyIterator.close()

  override def receive = {
    case DeleteBatch =>
      withBatch { batch =>
        eventKeyIterator.take(batchSize).foreach { eventKey =>
          batch.delete(eventKeyBytes(eventKey.classifier, eventKey.sequenceNr))
        }
      }
      if (eventKeyIterator.hasNext) {
        self ! DeleteBatch
      } else {
        promise.success(())
        self ! PoisonPill
      }
  }

  private def newEventKeyIterator: CloseableIterator[EventKey] = {
    new Iterator[EventKey] with Closeable {
      val iterator = leveldb.iterator(leveldbReadOptions.snapshot(leveldb.getSnapshot))
      iterator.seek(eventKeyBytes(EventKey.DefaultClassifier, 1L))

      @tailrec
      override def hasNext: Boolean = {
        val key = eventKey(iterator.peekNext().getKey)
        key != eventKeyEnd &&
          (key.sequenceNr <= toSequenceNr || {
            iterator.seek(eventKeyBytes(key.classifier + 1, 1L))
            hasNext
          })
      }

      override def next() = eventKey(iterator.next().getKey)
      override def close() = {
        iterator.close()
        leveldbReadOptions.snapshot().close()
      }
    }
  }
} 
Example 6
Source File: ShuffleTestAccessor.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.shuffle

import java.io.File
import java.util.concurrent.ConcurrentMap

import org.apache.hadoop.yarn.api.records.ApplicationId
import org.fusesource.leveldbjni.JniDBFactory
import org.iq80.leveldb.{DB, Options}

import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo


object ShuffleTestAccessor {

  def getBlockResolver(handler: ExternalShuffleBlockHandler): ExternalShuffleBlockResolver = {
    handler.blockManager
  }

  def getExecutorInfo(
      appId: ApplicationId,
      execId: String,
      resolver: ExternalShuffleBlockResolver
  ): Option[ExecutorShuffleInfo] = {
    val id = new AppExecId(appId.toString, execId)
    Option(resolver.executors.get(id))
  }

  def registeredExecutorFile(resolver: ExternalShuffleBlockResolver): File = {
    resolver.registeredExecutorFile
  }

  def shuffleServiceLevelDB(resolver: ExternalShuffleBlockResolver): DB = {
    resolver.db
  }

  def reloadRegisteredExecutors(
    file: File): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    val options: Options = new Options
    options.createIfMissing(true)
    val factory = new JniDBFactory
    val db = factory.open(file, options)
    val result = ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
    db.close()
    result
  }

  def reloadRegisteredExecutors(
      db: DB): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
  }
} 
Example 7
Source File: ShuffleTestAccessor.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.shuffle

import java.io.File
import java.util.concurrent.ConcurrentMap

import org.apache.hadoop.yarn.api.records.ApplicationId
import org.fusesource.leveldbjni.JniDBFactory
import org.iq80.leveldb.{DB, Options}

import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo


object ShuffleTestAccessor {

  def getBlockResolver(handler: ExternalShuffleBlockHandler): ExternalShuffleBlockResolver = {
    handler.blockManager
  }

  def getExecutorInfo(
      appId: ApplicationId,
      execId: String,
      resolver: ExternalShuffleBlockResolver
  ): Option[ExecutorShuffleInfo] = {
    val id = new AppExecId(appId.toString, execId)
    Option(resolver.executors.get(id))
  }

  def registeredExecutorFile(resolver: ExternalShuffleBlockResolver): File = {
    resolver.registeredExecutorFile
  }

  def shuffleServiceLevelDB(resolver: ExternalShuffleBlockResolver): DB = {
    resolver.db
  }

  def reloadRegisteredExecutors(
    file: File): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    val options: Options = new Options
    options.createIfMissing(true)
    val factory = new JniDBFactory
    val db = factory.open(file, options)
    val result = ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
    db.close()
    result
  }

  def reloadRegisteredExecutors(
      db: DB): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
  }
} 
Example 8
Source File: ShuffleTestAccessor.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.shuffle

import java.io.File
import java.util.concurrent.ConcurrentMap

import org.apache.hadoop.yarn.api.records.ApplicationId
import org.fusesource.leveldbjni.JniDBFactory
import org.iq80.leveldb.{DB, Options}

import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo


object ShuffleTestAccessor {

  def getBlockResolver(handler: ExternalShuffleBlockHandler): ExternalShuffleBlockResolver = {
    handler.blockManager
  }

  def getExecutorInfo(
      appId: ApplicationId,
      execId: String,
      resolver: ExternalShuffleBlockResolver
  ): Option[ExecutorShuffleInfo] = {
    val id = new AppExecId(appId.toString, execId)
    Option(resolver.executors.get(id))
  }

  def registeredExecutorFile(resolver: ExternalShuffleBlockResolver): File = {
    resolver.registeredExecutorFile
  }

  def shuffleServiceLevelDB(resolver: ExternalShuffleBlockResolver): DB = {
    resolver.db
  }

  def reloadRegisteredExecutors(
    file: File): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    val options: Options = new Options
    options.createIfMissing(true)
    val factory = new JniDBFactory
    val db = factory.open(file, options)
    val result = ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
    db.close()
    result
  }

  def reloadRegisteredExecutors(
      db: DB): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
  }
} 
Example 9
Source File: ShuffleTestAccessor.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.shuffle

import java.io.{IOException, File}
import java.util.concurrent.ConcurrentMap

import org.apache.hadoop.yarn.api.records.ApplicationId
import org.fusesource.leveldbjni.JniDBFactory
import org.iq80.leveldb.{DB, Options}

import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo


object ShuffleTestAccessor {

  def getBlockResolver(handler: ExternalShuffleBlockHandler): ExternalShuffleBlockResolver = {
    handler.blockManager
  }

  def getExecutorInfo(
      appId: ApplicationId,
      execId: String,
      resolver: ExternalShuffleBlockResolver
  ): Option[ExecutorShuffleInfo] = {
    val id = new AppExecId(appId.toString, execId)
    Option(resolver.executors.get(id))
  }

  def registeredExecutorFile(resolver: ExternalShuffleBlockResolver): File = {
    resolver.registeredExecutorFile
  }

  def shuffleServiceLevelDB(resolver: ExternalShuffleBlockResolver): DB = {
    resolver.db
  }

  def reloadRegisteredExecutors(
    file: File): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    val options: Options = new Options
    options.createIfMissing(true)
    val factory = new JniDBFactory
    val db = factory.open(file, options)
    val result = ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
    db.close()
    result
  }

  def reloadRegisteredExecutors(
      db: DB): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
  }
} 
Example 10
Source File: DBState.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.state

import java.io.File

import com.wavesplatform.Application
import com.wavesplatform.account.AddressScheme
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.database.{LevelDBWriter, openDB}
import com.wavesplatform.lang.directives.DirectiveSet
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.transaction.smart.WavesEnvironment
import com.wavesplatform.utils.ScorexLogging
import monix.eval.Coeval
import org.iq80.leveldb.DB
import org.openjdk.jmh.annotations.{Param, Scope, State, TearDown}

@State(Scope.Benchmark)
abstract class DBState extends ScorexLogging {
  @Param(Array("waves.conf"))
  var configFile = ""

  lazy val settings: WavesSettings = Application.loadApplicationConfig(Some(new File(configFile)).filter(_.exists()))

  lazy val db: DB = openDB(settings.dbSettings.directory)

  lazy val levelDBWriter: LevelDBWriter =
    LevelDBWriter.readOnly(
      db,
      settings.copy(dbSettings = settings.dbSettings.copy(maxCacheSize = 1))
    )

  AddressScheme.current = new AddressScheme { override val chainId: Byte = 'W' }

  lazy val environment = new WavesEnvironment(
    AddressScheme.current.chainId,
    Coeval.raiseError(new NotImplementedError("`tx` is not implemented")),
    Coeval(levelDBWriter.height),
    levelDBWriter,
    null,
    DirectiveSet.contractDirectiveSet,
    ByteStr.empty
  )

  @TearDown
  def close(): Unit = {
    db.close()
  }
} 
Example 11
Source File: BalanceDistribution.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.api.common

import com.google.common.collect.AbstractIterator
import com.google.common.primitives.{Ints, Longs}
import com.wavesplatform.account.Address
import com.wavesplatform.database.{AddressId, DBExt, DBResource, Keys}
import com.wavesplatform.state.Portfolio
import com.wavesplatform.state.Portfolio.longSemigroup
import monix.eval.Task
import monix.reactive.Observable
import org.iq80.leveldb.DB

import scala.annotation.tailrec
import scala.jdk.CollectionConverters._

trait BalanceDistribution {
  import BalanceDistribution._
  def balanceDistribution(
      db: DB,
      height: Int,
      after: Option[Address],
      overrides: Map[Address, Portfolio],
      globalPrefix: Array[Byte],
      addressId: Array[Byte] => AddressId,
      balanceOf: Portfolio => Long
  ): Observable[(Address, Long)] =
    db.resourceObservable
      .flatMap { resource =>
        resource.iterator.seek(
          globalPrefix ++ after.flatMap(address => resource.get(Keys.addressId(address))).fold(Array.emptyByteArray)(id => Longs.toByteArray(id.toLong + 1))
        )
        Observable.fromIterator(Task(new BalanceIterator(resource, globalPrefix, addressId, balanceOf, height, overrides).asScala.filter(_._2 > 0)))
      }

}

object BalanceDistribution {
  class BalanceIterator(
      resource: DBResource,
      globalPrefix: Array[Byte],
      addressId: Array[Byte] => AddressId,
      balanceOf: Portfolio => Long,
      height: Int,
      private var pendingPortfolios: Map[Address, Portfolio]
  ) extends AbstractIterator[(Address, Long)] {
    @inline
    private def stillSameAddress(expected: AddressId): Boolean = resource.iterator.hasNext && {
      val maybeNext = resource.iterator.peekNext().getKey
      maybeNext.startsWith(globalPrefix) && addressId(maybeNext) == expected
    }
    @tailrec
    private def findNextBalance(): Option[(Address, Long)] = {
      if (!resource.iterator.hasNext) None
      else {
        val current = resource.iterator.next()
        if (!current.getKey.startsWith(globalPrefix)) None
        else {
          val aid           = addressId(current.getKey)
          val address       = resource.get(Keys.idToAddress(aid))
          var balance       = Longs.fromByteArray(current.getValue)
          var currentHeight = Ints.fromByteArray(current.getKey.takeRight(4))

          while (stillSameAddress(aid)) {
            val next       = resource.iterator.next()
            val nextHeight = Ints.fromByteArray(next.getKey.takeRight(4))
            if (nextHeight <= height) {
              currentHeight = nextHeight
              balance = Longs.fromByteArray(next.getValue)
            }
          }

          pendingPortfolios -= address
          val adjustedBalance = longSemigroup.combine(balance, pendingPortfolios.get(address).fold(0L)(balanceOf))

          if (currentHeight <= height && adjustedBalance > 0) Some(address -> adjustedBalance)
          else findNextBalance()
        }
      }
    }

    override def computeNext(): (Address, Long) = findNextBalance() match {
      case Some(balance) => balance
      case None =>
        if (pendingPortfolios.nonEmpty) {
          val (address, portfolio) = pendingPortfolios.head
          pendingPortfolios -= address
          address -> balanceOf(portfolio)
        } else {
          endOfData()
        }
    }
  }
} 
Example 12
Source File: SnapshotAssemblerBench.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package benches

import java.io.File
import java.util.concurrent.TimeUnit

import benches.SnapshotAssemblerBench.SnapshotAssemblerBenchState
import encry.view.state.avlTree.utils.implicits.Instances._
import benches.StateBenches.{StateBenchState, benchSettings}
import benches.Utils.{getRandomTempDir, utxoFromBoxHolder}
import encry.settings.Settings
import encry.storage.{RootNodesStorage, VersionalStorage}
import encry.storage.VersionalStorage.{StorageKey, StorageValue, StorageVersion}
import encry.storage.levelDb.versionalLevelDB.{LevelDbFactory, VLDBWrapper, VersionalLevelDBCompanion}
import encry.utils.FileHelper
import encry.view.fast.sync.SnapshotHolder
import encry.view.state.UtxoState
import encry.view.state.avlTree.AvlTree
import org.encryfoundation.common.utils.TaggedTypes.Height
import org.iq80.leveldb.{DB, Options}
import org.openjdk.jmh.annotations.{Benchmark, Mode, Scope, State}
import org.openjdk.jmh.infra.Blackhole
import org.openjdk.jmh.profile.GCProfiler
import org.openjdk.jmh.runner.{Runner, RunnerException}
import org.openjdk.jmh.runner.options.{OptionsBuilder, TimeValue, VerboseMode}
import scorex.utils.Random

class SnapshotAssemblerBench {

  
  @Benchmark
  def createTree(stateBench: SnapshotAssemblerBenchState, bh: Blackhole): Unit = {
    bh.consume {
      //stateBench.a.initializeSnapshotData(stateBench.block1)
    }
  }
}
object SnapshotAssemblerBench {

  @throws[RunnerException]
  def main(args: Array[String]): Unit = {
    val opt = new OptionsBuilder()
      .include(".*" + classOf[SnapshotAssemblerBench].getSimpleName + ".*")
      .forks(1)
      .threads(1)
      .warmupIterations(benchSettings.benchesSettings.warmUpIterations)
      .measurementIterations(benchSettings.benchesSettings.measurementIterations)
      .mode(Mode.AverageTime)
      .timeUnit(TimeUnit.SECONDS)
      .verbosity(VerboseMode.EXTRA)
      .addProfiler(classOf[GCProfiler])
      .warmupTime(TimeValue.milliseconds(benchSettings.benchesSettings.warmUpTime))
      .measurementTime(TimeValue.milliseconds(benchSettings.benchesSettings.measurementTime))
      .build
    new Runner(opt).run
  }

  @State(Scope.Benchmark)
  class SnapshotAssemblerBenchState extends Settings {

    val a: AvlTree[StorageKey, StorageValue] =
      createAvl("9gKDVmfsA6J4b78jDBx6JmS86Zph98NnjnUqTJBkW7zitQMReia", 0, 500000)
    val block1                              = Utils.generateGenesisBlock(Height @@ 1)


    def createAvl(address: String, from: Int, to: Int): AvlTree[StorageKey, StorageValue] = {
      val firstDir: File = FileHelper.getRandomTempDir
      val firstStorage: VLDBWrapper = {
        val levelDBInit = LevelDbFactory.factory.open(firstDir, new Options)
        VLDBWrapper(VersionalLevelDBCompanion(levelDBInit, settings.levelDB, keySize = 32))
      }
      val dir: File = FileHelper.getRandomTempDir
      val levelDb: DB = LevelDbFactory.factory.open(dir, new Options)
      val rootNodesStorage = RootNodesStorage[StorageKey, StorageValue](levelDb, 10, dir)

      val firstAvl: AvlTree[StorageKey, StorageValue] = AvlTree[StorageKey, StorageValue](firstStorage, rootNodesStorage)
      val avlNew = (from to to).foldLeft(firstAvl) { case (avl, i) =>
        val bx = Utils.genAssetBox(address, i, nonce = i)
        val b = (StorageKey !@@ bx.id, StorageValue @@ bx.bytes)
        avl.insertAndDeleteMany(StorageVersion @@ Random.randomBytes(), List(b), List.empty)
      }
      avlNew
    }

    def tmpDir: File = FileHelper.getRandomTempDir
  }

} 
Example 13
Source File: package.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.api
import com.wavesplatform.account.Address
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.database.{DBExt, Keys}
import com.wavesplatform.state.{Diff, Height}
import com.wavesplatform.transaction.CreateAliasTransaction
import com.wavesplatform.transaction.lease.LeaseTransaction
import monix.reactive.Observable
import org.iq80.leveldb.DB

package object common extends BalanceDistribution with AddressTransactions {
  def aliasesOfAddress(db: DB, maybeDiff: => Option[(Height, Diff)], address: Address): Observable[(Height, CreateAliasTransaction)] = {
    val disabledAliases = db.get(Keys.disabledAliases)
    addressTransactions(db, maybeDiff, address, Some(address), Set(CreateAliasTransaction.typeId), None)
      .collect { case (height, cat: CreateAliasTransaction, true) if disabledAliases.isEmpty || !disabledAliases(cat.alias) => height -> cat }
  }

  def activeLeases(
      db: DB,
      maybeDiff: Option[(Height, Diff)],
      address: Address,
      leaseIsActive: ByteStr => Boolean
  ): Observable[(Height, LeaseTransaction)] =
    addressTransactions(db, maybeDiff, address, None, Set(LeaseTransaction.typeId), None)
      .collect { case (h, lt: LeaseTransaction, true) if leaseIsActive(lt.id()) => h -> lt }
} 
Example 14
Source File: ReadOnlyDB.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.database

import com.wavesplatform.metrics.LevelDBStats
import com.wavesplatform.metrics.LevelDBStats.DbHistogramExt
import org.iq80.leveldb.{DB, DBIterator, ReadOptions}

import scala.annotation.tailrec

class ReadOnlyDB(db: DB, readOptions: ReadOptions) {
  def get[V](key: Key[V]): V = {
    val bytes = db.get(key.keyBytes, readOptions)
    LevelDBStats.read.recordTagged(key, bytes)
    key.parse(bytes)
  }

  def has[V](key: Key[V]): Boolean = {
    val bytes = db.get(key.keyBytes, readOptions)
    LevelDBStats.read.recordTagged(key, bytes)
    bytes != null
  }

  def iterator: DBIterator = db.iterator(readOptions)

  def iterateOver(tag: KeyTags.KeyTag)(f: DBEntry => Unit): Unit = iterateOver(tag.prefixBytes)(f)

  def iterateOver(prefix: Array[Byte])(f: DBEntry => Unit): Unit = {
    val iterator = db.iterator(readOptions)
    try {
      iterator.seek(prefix)
      while (iterator.hasNext && iterator.peekNext().getKey.startsWith(prefix)) f(iterator.next())
    } finally iterator.close()
  }

  def read[T](keyName: String, prefix: Array[Byte], seek: Array[Byte], n: Int)(deserialize: DBEntry => T): Vector[T] = {
    val iter = iterator
    @tailrec def loop(aux: Vector[T], restN: Int, totalBytesRead: Long): (Vector[T], Long) =
      if (restN > 0 && iter.hasNext) {
        val elem = iter.next()
        if (elem.getKey.startsWith(prefix)) loop(aux :+ deserialize(elem), restN - 1, totalBytesRead + elem.getValue.length)
        else (aux, totalBytesRead)
      } else (aux, totalBytesRead)

    try {
      iter.seek(seek)
      val (r, totalBytesRead) = loop(Vector.empty, n, 0)
      LevelDBStats.read.recordTagged(keyName, totalBytesRead)
      r
    } finally iter.close()
  }
} 
Example 15
Source File: DBResource.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.database

import org.iq80.leveldb.{DB, DBIterator, ReadOptions}

trait DBResource extends AutoCloseable {
  def get[V](key: Key[V]): V
  def iterator: DBIterator
}

object DBResource {
  def apply(db: DB): DBResource = new DBResource {
    private[this] val snapshot = db.getSnapshot
    private[this] val readOptions = new ReadOptions().snapshot(snapshot)

    override def get[V](key: Key[V]): V = key.parse(db.get(key.keyBytes, readOptions))

    override val iterator: DBIterator = db.iterator(readOptions)

    override def close(): Unit = {
      iterator.close()
      snapshot.close()
    }
  }
} 
Example 16
Source File: LevelDBJNADBFactory.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.database.jna

import java.io.File

import com.protonail.leveldb.jna.{LevelDB, LevelDBCompressionType, LevelDBOptions}
import org.iq80.leveldb.{CompressionType, DB, DBFactory, Options}

class LevelDBJNADBFactory extends DBFactory {
  private[this] def openJnaDatabase(path: File, options: Options): LevelDB = {
    val opts = LevelDBJNADBFactory.toJNAOptions(options)
    try new LevelDB(path.toString, opts) finally opts.close()
  }

  override def open(path: File, options: Options): DB = {
    val db = openJnaDatabase(path, options)
    new LevelDBJNADB(db)
  }

  override def destroy(path: File, options: Options): Unit = {
    val options1 = LevelDBJNADBFactory.toJNAOptions(options)
    try LevelDB.destroy(path.toString, options1) finally options1.close()
  }

  override def repair(path: File, options: Options): Unit = {
    val options1 = LevelDBJNADBFactory.toJNAOptions(options)
    try LevelDB.repair(path.toString, options1) finally options1.close()
  }
}

private object LevelDBJNADBFactory {
  def toJNAOptions(o1: Options): LevelDBOptions = {
    val opts = new LevelDBOptions
    opts.setBlockRestartInterval(o1.blockRestartInterval())
    opts.setBlockSize(o1.blockSize())
    opts.setCompressionType(o1.compressionType() match {
      case CompressionType.NONE => LevelDBCompressionType.NoCompression
      case CompressionType.SNAPPY => LevelDBCompressionType.SnappyCompression
    })
    opts.setCreateIfMissing(o1.createIfMissing())
    opts.setErrorIfExists(o1.errorIfExists())
    opts.setMaxOpenFiles(o1.maxOpenFiles())
    opts.setParanoidChecks(o1.paranoidChecks())
    opts.setWriteBufferSize(o1.writeBufferSize())
    opts
  }
} 
Example 17
Source File: History.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.history

import com.wavesplatform.block.{Block, MicroBlock}
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.database
import com.wavesplatform.database.DBExt
import com.wavesplatform.state.{Blockchain, Height}
import org.iq80.leveldb.DB

trait History {
  def loadBlockBytes(id: ByteStr): Option[(Byte, Array[Byte])]
  def loadMicroBlock(id: ByteStr): Option[MicroBlock]
  def blockIdsAfter(candidates: Seq[ByteStr], count: Int): Seq[ByteStr]
}

object History {
  private def versionedBytes(block: Block): (Byte, Array[Byte]) = block.header.version -> block.bytes()

  def apply(blockchain: Blockchain, liquidBlock: ByteStr => Option[Block], microBlock: ByteStr => Option[MicroBlock], db: DB): History = new History {
    override def loadBlockBytes(id: ByteStr): Option[(Byte, Array[Byte])] =
      liquidBlock(id)
        .orElse(blockchain.heightOf(id).flatMap { h =>
          db.readOnly { ro =>
            database.loadBlock(Height(h), ro)
          }
        })
        .map(versionedBytes)

    override def loadMicroBlock(id: ByteStr): Option[MicroBlock] = microBlock(id)

    override def blockIdsAfter(candidates: Seq[ByteStr], count: Int): Seq[ByteStr] =
      candidates.view.flatMap(blockchain.heightOf).headOption.fold[Seq[ByteStr]](Seq.empty) { firstCommonHeight =>
        (firstCommonHeight to firstCommonHeight + count).flatMap(blockchain.blockId)
      }
  }
} 
Example 18
Source File: StorageFactory.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.history

import com.wavesplatform.account.Address
import com.wavesplatform.database.{DBExt, Keys, LevelDBWriter, loadActiveLeases}
import com.wavesplatform.events.BlockchainUpdateTriggers
import com.wavesplatform.mining.Miner
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.state.BlockchainUpdaterImpl
import com.wavesplatform.transaction.Asset
import com.wavesplatform.utils.{ScorexLogging, Time, UnsupportedFeature, forceStopApplication}
import monix.reactive.Observer
import org.iq80.leveldb.DB

object StorageFactory extends ScorexLogging {
  private val StorageVersion = 5

  def apply(
      settings: WavesSettings,
      db: DB,
      time: Time,
      spendableBalanceChanged: Observer[(Address, Asset)],
      blockchainUpdateTriggers: BlockchainUpdateTriggers,
      miner: Miner = _ => ()
  ): (BlockchainUpdaterImpl, AutoCloseable) = {
    checkVersion(db)
    val levelDBWriter = LevelDBWriter(db, spendableBalanceChanged, settings)
    val bui = new BlockchainUpdaterImpl(
      levelDBWriter,
      spendableBalanceChanged,
      settings,
      time,
      blockchainUpdateTriggers,
      (minHeight, maxHeight) => loadActiveLeases(db, minHeight, maxHeight),
      miner
    )
    (bui, levelDBWriter)
  }

  private def checkVersion(db: DB): Unit = db.readWrite { rw =>
    val version = rw.get(Keys.version)
    val height  = rw.get(Keys.height)
    if (version != StorageVersion) {
      if (height == 0) {
        // The storage is empty, set current version
        rw.put(Keys.version, StorageVersion)
      } else {
        // Here we've detected that the storage is not empty and doesn't contain version
        log.error(
          s"Storage version $version is not compatible with expected version $StorageVersion! Please, rebuild node's state, use import or sync from scratch."
        )
        log.error("FOR THIS REASON THE NODE STOPPED AUTOMATICALLY")
        forceStopApplication(UnsupportedFeature)
      }
    }
  }
} 
Example 19
Source File: TestStorageFactory.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.database

import com.google.common.hash.{Funnels, BloomFilter => GBloomFilter}
import com.wavesplatform.account.Address
import com.wavesplatform.events.BlockchainUpdateTriggers
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.state.BlockchainUpdaterImpl
import com.wavesplatform.transaction.Asset
import com.wavesplatform.utils.Time
import monix.reactive.Observer
import org.iq80.leveldb.DB

object TestStorageFactory {
  private def wrappedFilter(): BloomFilter = new Wrapper(GBloomFilter.create(Funnels.byteArrayFunnel(), 1000L))

  def apply(
      settings: WavesSettings,
      db: DB,
      time: Time,
      spendableBalanceChanged: Observer[(Address, Asset)],
      blockchainUpdateTriggers: BlockchainUpdateTriggers
  ): (BlockchainUpdaterImpl, LevelDBWriter) = {
    val levelDBWriter: LevelDBWriter = new LevelDBWriter(db, spendableBalanceChanged, settings.blockchainSettings, settings.dbSettings) {
      override val orderFilter: BloomFilter        = wrappedFilter()
      override val dataKeyFilter: BloomFilter      = wrappedFilter()
      override val wavesBalanceFilter: BloomFilter = wrappedFilter()
      override val assetBalanceFilter: BloomFilter = wrappedFilter()
    }
    (
      new BlockchainUpdaterImpl(levelDBWriter, spendableBalanceChanged, settings, time, blockchainUpdateTriggers, loadActiveLeases(db, _, _)),
      levelDBWriter
    )
  }
} 
Example 20
Source File: package.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.state

import com.wavesplatform.account.Address
import com.wavesplatform.api.common.AddressTransactions
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.database.{DBResource, LevelDBWriter, TestStorageFactory}
import com.wavesplatform.events.BlockchainUpdateTriggers
import com.wavesplatform.settings.TestSettings._
import com.wavesplatform.settings.{BlockchainSettings, FunctionalitySettings, GenesisSettings, RewardsSettings, TestSettings}
import com.wavesplatform.transaction.{Asset, Transaction}
import com.wavesplatform.utils.SystemTime
import monix.reactive.Observer
import org.iq80.leveldb.DB

package object utils {

  def addressTransactions(
      db: DB,
      diff: => Option[(Height, Diff)],
      address: Address,
      types: Set[Transaction.Type],
      fromId: Option[ByteStr]
  ): Seq[(Height, Transaction)] = {
    val resource = DBResource(db)
    try AddressTransactions.allAddressTransactions(resource, diff, address, None, types, fromId).map { case (h, tx, _) => h -> tx }.toSeq
    finally resource.close()
  }

  object TestLevelDB {
    def withFunctionalitySettings(
        writableDB: DB,
        spendableBalanceChanged: Observer[(Address, Asset)],
        fs: FunctionalitySettings
    ): LevelDBWriter =
      TestStorageFactory(
        TestSettings.Default.withFunctionalitySettings(fs),
        writableDB,
        SystemTime,
        spendableBalanceChanged,
        BlockchainUpdateTriggers.noop
      )._2

    def createTestBlockchainSettings(fs: FunctionalitySettings): BlockchainSettings =
      BlockchainSettings('T', fs, GenesisSettings.TESTNET, RewardsSettings.TESTNET)
  }
} 
Example 21
Source File: WithDB.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform

import java.nio.file.Files

import com.wavesplatform.account.Address
import com.wavesplatform.database.LevelDBFactory
import com.wavesplatform.events.BlockchainUpdateTriggers
import com.wavesplatform.transaction.Asset
import monix.reactive.subjects.{PublishSubject, Subject}
import org.iq80.leveldb.{DB, Options}
import org.scalatest.{BeforeAndAfterEach, Suite}

trait WithDB extends BeforeAndAfterEach {
  this: Suite =>

  private val path                  = Files.createTempDirectory("lvl").toAbsolutePath
  private var currentDBInstance: DB = _

  protected val ignoreSpendableBalanceChanged: Subject[(Address, Asset), (Address, Asset)] = PublishSubject()

  protected val ignoreBlockchainUpdateTriggers: BlockchainUpdateTriggers = BlockchainUpdateTriggers.noop

  def db: DB = currentDBInstance

  override def beforeEach(): Unit = {
    currentDBInstance = LevelDBFactory.factory.open(path.toFile, new Options().createIfMissing(true))
    super.beforeEach()
  }

  override def afterEach(): Unit =
    try {
      super.afterEach()
      db.close()
    } finally {
      TestHelpers.deleteRecursively(path)
    }
} 
Example 22
Source File: RateCache.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.caches

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.dex.db.RateDB
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.asset.Asset.{IssuedAsset, Waves}
import org.iq80.leveldb.DB

import scala.collection.JavaConverters._
import scala.collection.concurrent.TrieMap

trait RateCache {

  
  def deleteRate(asset: Asset): Option[Double]

}

object RateCache {

  private val WavesRate = Option(1d)

  def apply(db: DB): RateCache = new RateCache {

    private val rateDB  = RateDB(db)
    private val rateMap = new ConcurrentHashMap[IssuedAsset, Double](rateDB.getAllRates.asJava)

    def upsertRate(asset: Asset, value: Double): Option[Double] =
      asset.fold { WavesRate } { issuedAsset =>
        rateDB.upsertRate(issuedAsset, value)
        Option(rateMap.put(issuedAsset, value))
      }

    def getRate(asset: Asset): Option[Double] = asset.fold(WavesRate)(asset => Option(rateMap get asset))

    def getAllRates: Map[Asset, Double] = {
      rateMap.asScala.toMap.map { case (issuedAsset, value) => Asset.fromCompatId(issuedAsset.compatId) -> value } + (Waves -> 1d)
    }

    def deleteRate(asset: Asset): Option[Double] = asset.fold(WavesRate) { issuedAsset =>
      rateDB.deleteRate(issuedAsset)
      Option(rateMap.remove(issuedAsset))
    }
  }

  def inMem: RateCache = new RateCache {

    private val rates: TrieMap[Asset, Double] = TrieMap(Waves -> 1d)

    def upsertRate(asset: Asset, value: Double): Option[Double] = {
      asset.fold { WavesRate } { issuedAsset =>
        val previousValue = rates.get(issuedAsset)
        rates += (asset -> value)
        previousValue
      }
    }

    def getRate(asset: Asset): Option[Double] = rates.get(asset)
    def getAllRates: Map[Asset, Double]       = rates.toMap

    def deleteRate(asset: Asset): Option[Double] =
      asset.fold { Option(1d) } { issuedAsset =>
        val previousValue = rates.get(issuedAsset)
        rates -= issuedAsset
        previousValue
      }
  }
} 
Example 23
Source File: LevelDBDataSource.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.db.dataSource

import java.io.File

import org.iq80.leveldb.{DB, Options, WriteOptions}
import org.iq80.leveldb.impl.Iq80DBFactory


class LevelDBDataSource(
                         private var db: DB,
                         private val levelDbConfig: LevelDbConfig
                       )
  extends DataSource {

  
  override def destroy(): Unit = {
    try {
      close()
    } finally {
      Iq80DBFactory.factory.destroy(new File(levelDbConfig.path), null) // Options are not being used ¯\_(ツ)_/¯
    }
  }
}

trait LevelDbConfig {
  val createIfMissing: Boolean
  val paranoidChecks: Boolean
  val verifyChecksums: Boolean
  val path: String
}

object LevelDBDataSource {

  private def createDB(levelDbConfig: LevelDbConfig): DB = {
    import levelDbConfig._

    val options = new Options()
      .createIfMissing(createIfMissing)
      .paranoidChecks(paranoidChecks) // raise an error as soon as it detects an internal corruption
      .verifyChecksums(verifyChecksums) // force checksum verification of all data that is read from the file system on behalf of a particular read

    Iq80DBFactory.factory.open(new File(path), options)
  }

  def apply(levelDbConfig: LevelDbConfig): LevelDBDataSource = {
    new LevelDBDataSource(createDB(levelDbConfig), levelDbConfig)
  }
} 
Example 24
Source File: WriteExchangeTransactionActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors.tx

import akka.actor.{Actor, Props}
import com.wavesplatform.dex.db.DbKeys
import com.wavesplatform.dex.db.leveldb.{DBExt, RW}
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.transaction.ExchangeTransaction
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.model.Events._
import org.iq80.leveldb.DB

class WriteExchangeTransactionActor(db: DB) extends Actor with ScorexLogging {

  import WriteExchangeTransactionActor._

  override def receive: Receive = {
    case ExchangeTransactionCreated(tx) => saveExchangeTx(tx)
  }

  private def saveExchangeTx(tx: ExchangeTransaction): Unit = db.readWrite { rw =>
    log.trace(s"Appending ${tx.id()} to orders [${tx.buyOrder.idStr()}, ${tx.sellOrder.idStr()}]")
    val txKey = DbKeys.exchangeTransaction(tx.id())
    if (!rw.has(txKey)) {
      rw.put(txKey, Some(tx))
      appendTxId(rw, tx.buyOrder.id(), tx.id())
      appendTxId(rw, tx.sellOrder.id(), tx.id())
    }
  }
}

object WriteExchangeTransactionActor {

  def name: String = "WriteExchangeTransactionActor"

  def props(db: DB): Props = Props(new WriteExchangeTransactionActor(db))

  def appendTxId(rw: RW, orderId: ByteStr, txId: ByteStr): Unit = {
    val key       = DbKeys.orderTxIdsSeqNr(orderId)
    val nextSeqNr = rw.get(key) + 1
    rw.put(key, nextSeqNr)
    rw.put(DbKeys.orderTxId(orderId, nextSeqNr), txId)
  }
} 
Example 25
Source File: RW.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db.leveldb

import com.wavesplatform.dex.metrics.LevelDBStats
import com.wavesplatform.dex.metrics.LevelDBStats.DbHistogramExt
import org.iq80.leveldb.{DB, ReadOptions, WriteBatch}

class RW(db: DB, readOptions: ReadOptions, batch: WriteBatch) extends ReadOnlyDB(db, readOptions) {

  def put[V](key: Key[V], value: V): Unit = {
    val bytes = key.encode(value)
    LevelDBStats.write.recordTagged(key, bytes)
    batch.put(key.keyBytes, bytes)
  }

  
  def inc(key: Key[Int]): Int = {
    val newValue = get(key) + 1
    put(key, newValue)
    newValue
  }

  def delete[V](key: Key[V]): Unit = batch.delete(key.keyBytes)
} 
Example 26
Source File: ReadOnlyDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db.leveldb

import com.wavesplatform.dex.metrics.LevelDBStats
import com.wavesplatform.dex.metrics.LevelDBStats.DbHistogramExt
import org.iq80.leveldb.{DB, DBIterator, ReadOptions}

import scala.annotation.tailrec

class ReadOnlyDB(db: DB, readOptions: ReadOptions) {

  def get[V](key: Key[V]): V = {
    val bytes = db.get(key.keyBytes, readOptions)
    LevelDBStats.read.recordTagged(key, bytes)
    key.parse(bytes)
  }

  def has[V](key: Key[V]): Boolean = {
    val bytes = db.get(key.keyBytes, readOptions)
    LevelDBStats.read.recordTagged(key, bytes)
    bytes != null
  }

  def iterator: DBIterator                                 = db.iterator(readOptions)
  def iterateOver(prefix: Short)(f: DBEntry => Unit): Unit = db.iterateOver(prefix)(f)

  def read[T](keyName: String, prefix: Array[Byte], seek: Array[Byte], n: Int)(deserialize: DBEntry => T): Vector[T] = {
    val iter = iterator
    @tailrec def loop(aux: Vector[T], restN: Int, totalBytesRead: Long): (Vector[T], Long) = {
      if (restN > 0 && iter.hasNext) {
        val elem = iter.next()
        if (elem.getKey.startsWith(prefix)) loop(aux :+ deserialize(elem), restN - 1, totalBytesRead + elem.getValue.length)
        else (aux, totalBytesRead)
      } else (aux, totalBytesRead)
    }

    try {
      iter.seek(seek)
      val (r, totalBytesRead) = loop(Vector.empty, n, 0)
      LevelDBStats.read.recordTagged(keyName, totalBytesRead)
      r
    } finally iter.close()
  }
} 
Example 27
Source File: package.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.io.File
import java.util.{Map => JMap}

import com.google.common.primitives.Shorts
import com.wavesplatform.dex.domain.utils.ScorexLogging
import org.iq80.leveldb.{DB, Options, ReadOptions}

package object leveldb extends ScorexLogging {

  def openDB(path: String, recreate: Boolean = false): DB = {

    log.debug(s"Open DB at $path")

    val file    = new File(path)
    val options = new Options().createIfMissing(true).paranoidChecks(true)

    if (recreate) {
      LevelDBFactory.factory.destroy(file, options)
    }

    file.getAbsoluteFile.getParentFile.mkdirs()
    LevelDBFactory.factory.open(file, options)
  }

  final type DBEntry = JMap.Entry[Array[Byte], Array[Byte]]

  implicit class DBExt(val db: DB) extends AnyVal {

    def readOnly[A](f: ReadOnlyDB => A): A = {
      val snapshot = db.getSnapshot
      try f(new ReadOnlyDB(db, new ReadOptions().snapshot(snapshot)))
      finally snapshot.close()
    }

    
    def readWrite[A](f: RW => A): A = {
      val snapshot    = db.getSnapshot
      val readOptions = new ReadOptions().snapshot(snapshot)
      val batch       = db.createWriteBatch()
      val rw          = new RW(db, readOptions, batch)
      try {
        val r = f(rw)
        db.write(batch)
        r
      } finally {
        batch.close()
        snapshot.close()
      }
    }

    def get[A](key: Key[A]): A    = key.parse(db.get(key.keyBytes))
    def has(key: Key[_]): Boolean = db.get(key.keyBytes) != null

    def iterateOver(prefix: Short)(f: DBEntry => Unit): Unit = iterateOver(Shorts.toByteArray(prefix))(f)

    def iterateOver(prefix: Array[Byte])(f: DBEntry => Unit): Unit = {
      val iterator = db.iterator()
      try {
        iterator.seek(prefix)
        while (iterator.hasNext && iterator.peekNext().getKey.startsWith(prefix)) f(iterator.next())
      } finally iterator.close()
    }
  }
} 
Example 28
Source File: OrderDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import com.wavesplatform.dex.db.leveldb.DBExt
import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.domain.order.Order.Id
import com.wavesplatform.dex.domain.transaction.ExchangeTransaction
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.model.OrderInfo.FinalOrderInfo
import com.wavesplatform.dex.model.{OrderInfo, OrderStatus}
import org.iq80.leveldb.DB


trait OrderDB {
  def containsInfo(id: Order.Id): Boolean
  def status(id: Order.Id): OrderStatus.Final
  def saveOrderInfo(id: Order.Id, sender: Address, oi: OrderInfo[OrderStatus.Final]): Unit
  def saveOrder(o: Order): Unit
  def get(id: Order.Id): Option[Order]
  def getFinalizedOrders(owner: Address, maybePair: Option[AssetPair]): Seq[(Order.Id, OrderInfo[OrderStatus])]
  def getOrderInfo(id: Order.Id): Option[FinalOrderInfo]
  def transactionsByOrder(orderId: ByteStr): Seq[ExchangeTransaction]
}

object OrderDB {
  case class Settings(maxOrders: Int)

  def apply(settings: Settings, db: DB): OrderDB = new OrderDB with ScorexLogging {
    override def containsInfo(id: Order.Id): Boolean = db.readOnly(_.has(DbKeys.orderInfo(id)))

    override def status(id: Order.Id): OrderStatus.Final = db.readOnly { ro =>
      ro.get(DbKeys.orderInfo(id)).fold[OrderStatus.Final](OrderStatus.NotFound)(_.status)
    }

    override def saveOrder(o: Order): Unit = db.readWrite { rw =>
      val k = DbKeys.order(o.id())
      if (!rw.has(k)) {
        rw.put(k, Some(o))
      }
    }

    override def get(id: Order.Id): Option[Order] = db.readOnly(_.get(DbKeys.order(id)))

    override def saveOrderInfo(id: Order.Id, sender: Address, oi: FinalOrderInfo): Unit = {
      val orderInfoKey = DbKeys.orderInfo(id)
      if (!db.has(orderInfoKey)) {
        db.readWrite { rw =>
          val newCommonSeqNr = rw.inc(DbKeys.finalizedCommonSeqNr(sender))
          rw.put(DbKeys.finalizedCommon(sender, newCommonSeqNr), Some(id))

          val newPairSeqNr = rw.inc(DbKeys.finalizedPairSeqNr(sender, oi.assetPair))
          rw.put(DbKeys.finalizedPair(sender, oi.assetPair, newPairSeqNr), Some(id))
          if (newPairSeqNr > settings.maxOrders) // Indexes start with 1, so if maxOrders=100 and newPairSeqNr=101, we delete 1 (the first)
            rw.get(DbKeys.finalizedPair(sender, oi.assetPair, newPairSeqNr - settings.maxOrders))
              .map(DbKeys.order)
              .foreach(x => rw.delete(x))

          rw.put(orderInfoKey, Some(oi))
        }
      }
    }

    override def getFinalizedOrders(owner: Address, maybePair: Option[AssetPair]): Seq[(Order.Id, OrderInfo[OrderStatus])] =
      db.readOnly { ro =>
        val (seqNr, key) = maybePair match {
          case Some(p) =>
            (ro.get(DbKeys.finalizedPairSeqNr(owner, p)), DbKeys.finalizedPair(owner, p, _: Int))
          case None =>
            (ro.get(DbKeys.finalizedCommonSeqNr(owner)), DbKeys.finalizedCommon(owner, _: Int))
        }

        (for {
          offset <- 0 until math.min(seqNr, settings.maxOrders)
          id     <- db.get(key(seqNr - offset))
          oi     <- db.get(DbKeys.orderInfo(id))
        } yield id -> oi).sorted
      }

    override def getOrderInfo(id: Id): Option[FinalOrderInfo] = db.readOnly(_.get(DbKeys.orderInfo(id)))

    override def transactionsByOrder(orderId: Id): Seq[ExchangeTransaction] = db.readOnly { ro =>
      for {
        seqNr <- 1 to ro.get(DbKeys.orderTxIdsSeqNr(orderId))
        txId = ro.get(DbKeys.orderTxId(orderId, seqNr))
        tx <- ro.get(DbKeys.exchangeTransaction(txId))
      } yield tx
    }
  }

  implicit def orderInfoOrdering[S <: OrderStatus]: Ordering[(ByteStr, OrderInfo[S])] = Ordering.by { case (id, oi) => (-oi.timestamp, id) }
} 
Example 29
Source File: LocalQueueStore.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import com.google.common.primitives.{Longs, Shorts}
import com.wavesplatform.dex.db.DbKeys._
import com.wavesplatform.dex.db.leveldb.{DBExt, ReadOnlyDB}
import com.wavesplatform.dex.queue.{QueueEvent, QueueEventWithMeta}
import org.iq80.leveldb.{DB, ReadOptions}

class LocalQueueStore(db: DB) {

  private val newestIdx        = new AtomicLong(db.get(lqNewestIdx))
  private val inMemQueue       = new ConcurrentLinkedQueue[QueueEventWithMeta]
  private var startInMemOffset = Option.empty[QueueEventWithMeta.Offset]

  def enqueue(event: QueueEvent, timestamp: Long): QueueEventWithMeta.Offset = {
    val offset   = newestIdx.incrementAndGet()
    val eventKey = lpqElement(offset)

    val x = QueueEventWithMeta(offset, timestamp, event)
    db.readWrite { rw =>
      rw.put(eventKey, Some(x))
      rw.put(lqNewestIdx, offset)
    }

    inMemQueue.add(x)
    if (startInMemOffset.isEmpty) startInMemOffset = Some(offset)
    offset
  }

  def getFrom(offset: QueueEventWithMeta.Offset, maxElements: Int): Vector[QueueEventWithMeta] = {
    if (startInMemOffset.exists(_ <= offset)) {
      if (inMemQueue.isEmpty) Vector.empty
      else {
        val xs    = Vector.newBuilder[QueueEventWithMeta]
        var added = 0

        while (!inMemQueue.isEmpty && added < maxElements) Option(inMemQueue.poll()).foreach { x =>
          xs += x
          added += 1
        }

        xs.result()
      }
    } else
      new ReadOnlyDB(db, new ReadOptions())
        .read(LqElementKeyName, LqElementPrefixBytes, lpqElement(math.max(offset, 0)).keyBytes, Int.MaxValue) { e =>
          val offset = Longs.fromByteArray(e.getKey.slice(Shorts.BYTES, Shorts.BYTES + Longs.BYTES))
          lpqElement(offset).parse(e.getValue).getOrElse(throw new RuntimeException(s"Can't find a queue event at $offset"))
        }
  }

  def newestOffset: Option[QueueEventWithMeta.Offset] = {
    val idx      = newestIdx.get()
    val eventKey = lpqElement(idx)
    eventKey.parse(db.get(eventKey.keyBytes)).map(_.offset)
  }

  def dropUntil(offset: QueueEventWithMeta.Offset): Unit = db.readWrite { rw =>
    val oldestIdx = math.max(db.get(lqOldestIdx), 0)
    (oldestIdx until offset).foreach { offset =>
      rw.delete(lpqElement(offset))
    }
    rw.put(lqOldestIdx, offset)
  }

} 
Example 30
Source File: AssetsStorage.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.dex.db.leveldb.DBExt
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.asset.Asset.IssuedAsset
import com.wavesplatform.dex.effect.{FutureResult, liftValueAsync}
import com.wavesplatform.dex.grpc.integration.dto.BriefAssetDescription
import org.iq80.leveldb.DB

trait AssetsStorage {
  def put(asset: IssuedAsset, item: BriefAssetDescription): Unit
  def get(asset: IssuedAsset): Option[BriefAssetDescription]
}

object AssetsStorage {

  val wavesLifted: FutureResult[BriefAssetDescription] = liftValueAsync { BriefAssetDescription.wavesDescription }

  def cache(inner: AssetsStorage): AssetsStorage = new AssetsStorage {

    private val assetsCache = new ConcurrentHashMap[Asset, BriefAssetDescription]

    def put(asset: Asset.IssuedAsset, item: BriefAssetDescription): Unit = {
      inner.put(asset, item)
      assetsCache.putIfAbsent(asset, item)
    }

    def get(asset: Asset.IssuedAsset): Option[BriefAssetDescription] = Option {
      assetsCache.computeIfAbsent(asset, inner.get(_).orNull)
    }
  }

  def levelDB(db: DB): AssetsStorage = new AssetsStorage {
    def put(asset: IssuedAsset, record: BriefAssetDescription): Unit = db.readWrite(_.put(DbKeys.asset(asset), Some(record)))
    def get(asset: IssuedAsset): Option[BriefAssetDescription]       = db.readOnly(_.get(DbKeys.asset(asset)))
  }

  def inMem: AssetsStorage = new AssetsStorage {
    private val assetsCache                                        = new ConcurrentHashMap[Asset, BriefAssetDescription]
    def put(asset: IssuedAsset, item: BriefAssetDescription): Unit = assetsCache.putIfAbsent(asset, item)
    def get(asset: IssuedAsset): Option[BriefAssetDescription]     = Option(assetsCache get asset)
  }

  final implicit class Ops(val self: AssetsStorage) extends AnyVal {

    def get(asset: Asset): Option[BriefAssetDescription] = asset.fold(BriefAssetDescription.someWavesDescription)(self.get)

    def unsafeGet(asset: Asset): BriefAssetDescription = {
      get(asset).getOrElse(throw new RuntimeException(s"Unknown asset: ${asset.toString}"))
    }

    def unsafeGetDecimals(asset: Asset): Int      = unsafeGet(asset).decimals
    def unsafeGetHasScript(asset: Asset): Boolean = unsafeGet(asset).hasScript
  }
} 
Example 31
Source File: OrderBookSnapshotDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentHashMap

import cats.instances.option.catsStdInstancesForOption
import cats.syntax.apply.catsSyntaxTuple2Semigroupal
import com.wavesplatform.dex.db.leveldb.{DBExt, Key}
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.model.OrderBookSnapshot
import com.wavesplatform.dex.queue.QueueEventWithMeta.Offset
import org.iq80.leveldb.DB

trait OrderBookSnapshotDB {
  def get(assetPair: AssetPair): Option[(Offset, OrderBookSnapshot)]
  def update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]): Unit
  def delete(assetPair: AssetPair): Unit
}

object OrderBookSnapshotDB {
  def apply(db: DB): OrderBookSnapshotDB = new OrderBookSnapshotDB {
    override def get(assetPair: AssetPair): Option[(Offset, OrderBookSnapshot)] = db.readOnly { ro =>
      val (obOffsetKey, obKey) = keys(assetPair)
      (ro.get(obOffsetKey), ro.get(obKey)).tupled
    }

    override def update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]): Unit = db.readWrite { rw =>
      val (obOffsetKey, obKey) = keys(assetPair)
      rw.put(obOffsetKey, Some(offset))
      newSnapshot.foreach(x => rw.put(obKey, Some(x)))
    }

    override def delete(assetPair: AssetPair): Unit = db.readWrite { rw =>
      val (obOffsetKey, obKey) = keys(assetPair)
      rw.delete(obOffsetKey)
      rw.delete(obKey)
    }

    private def keys(assetPair: AssetPair): (Key[Option[Offset]], Key[Option[OrderBookSnapshot]]) =
      (DbKeys.orderBookSnapshotOffset(assetPair), DbKeys.orderBookSnapshot(assetPair))
  }

  def inMem: OrderBookSnapshotDB = new OrderBookSnapshotDB {
    private val snapshots = new ConcurrentHashMap[AssetPair, (Offset, OrderBookSnapshot)]()

    override def get(assetPair: AssetPair): Option[(Offset, OrderBookSnapshot)] = Option(snapshots.get(assetPair))

    override def update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]): Unit =
      snapshots.compute(assetPair, (_, current) => (offset, newSnapshot.getOrElse(current._2)))

    override def delete(assetPair: AssetPair): Unit = snapshots.remove(assetPair)
  }
} 
Example 32
Source File: AssetPairsDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import com.wavesplatform.dex.db.leveldb.DBExt
import com.wavesplatform.dex.domain.asset.AssetPair
import org.iq80.leveldb.DB

trait AssetPairsDB {
  def add(pair: AssetPair): Unit
  def remove(pair: AssetPair): Unit
  def all(): Set[AssetPair]
}

object AssetPairsDB {

  def apply(db: DB): AssetPairsDB = new AssetPairsDB {

    def add(pair: AssetPair): Unit    = db.readWrite(_.put(DbKeys.assetPair(pair), ()))
    def remove(pair: AssetPair): Unit = db.readWrite(_.delete(DbKeys.assetPair(pair)))

    def all(): Set[AssetPair] = db.readOnly { ro =>
      val r = Set.newBuilder[AssetPair]

      ro.iterateOver(DbKeys.AssetPairsPrefix) { pair =>
        r += AssetPair.fromBytes(pair.getKey.drop(2))
      }

      r.result()
    }
  }
} 
Example 33
Source File: ShuffleTestAccessor.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.shuffle

import java.io.File
import java.util.concurrent.ConcurrentMap

import org.apache.hadoop.yarn.api.records.ApplicationId
import org.fusesource.leveldbjni.JniDBFactory
import org.iq80.leveldb.{DB, Options}

import org.apache.spark.network.shuffle.ExternalShuffleBlockResolver.AppExecId
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo


object ShuffleTestAccessor {

  def getBlockResolver(handler: ExternalShuffleBlockHandler): ExternalShuffleBlockResolver = {
    handler.blockManager
  }

  def getExecutorInfo(
      appId: ApplicationId,
      execId: String,
      resolver: ExternalShuffleBlockResolver
  ): Option[ExecutorShuffleInfo] = {
    val id = new AppExecId(appId.toString, execId)
    Option(resolver.executors.get(id))
  }

  def registeredExecutorFile(resolver: ExternalShuffleBlockResolver): File = {
    resolver.registeredExecutorFile
  }

  def shuffleServiceLevelDB(resolver: ExternalShuffleBlockResolver): DB = {
    resolver.db
  }

  def reloadRegisteredExecutors(
    file: File): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    val options: Options = new Options
    options.createIfMissing(true)
    val factory = new JniDBFactory
    val db = factory.open(file, options)
    val result = ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
    db.close()
    result
  }

  def reloadRegisteredExecutors(
      db: DB): ConcurrentMap[ExternalShuffleBlockResolver.AppExecId, ExecutorShuffleInfo] = {
    ExternalShuffleBlockResolver.reloadRegisteredExecutors(db)
  }
} 
Example 34
Source File: WithDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.nio.file.Files

import com.wavesplatform.dex.db.leveldb.LevelDBFactory
import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.util.Implicits._
import com.wavesplatform.dex.util.TestHelpers
import monix.reactive.subjects.Subject
import org.iq80.leveldb.{DB, Options}
import org.scalatest.{BeforeAndAfterEach, Suite}

trait WithDB extends BeforeAndAfterEach { this: Suite =>

  private val path                  = Files.createTempDirectory("lvl").toAbsolutePath
  private var currentDBInstance: DB = _

  def db: DB = currentDBInstance

  protected val ignoreSpendableBalanceChanged: Subject[(Address, Asset), (Address, Asset)] = Subject.empty

  override def beforeEach(): Unit = {
    currentDBInstance = LevelDBFactory.factory.open(path.toFile, new Options().createIfMissing(true))
    super.beforeEach()
  }

  override def afterEach(): Unit =
    try {
      super.afterEach()
      db.close()
    } finally {
      TestHelpers.deleteRecursively(path)
    }

  protected def tempDb(f: DB => Any): Any = {
    val path = Files.createTempDirectory("lvl-temp").toAbsolutePath
    val db   = LevelDBFactory.factory.open(path.toFile, new Options().createIfMissing(true))
    try {
      f(db)
    } finally {
      db.close()
      TestHelpers.deleteRecursively(path)
    }
  }
} 
Example 35
Source File: VersionedLDBKVStore.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.storage.leveldb

import io.iohk.iodb.ByteArrayWrapper
import org.iq80.leveldb.{DB, ReadOptions}

import scala.collection.mutable
import scala.util.{Failure, Success, Try}



  def rollbackTo(versionId: VersionId): Try[Unit] = {
    val ro = new ReadOptions()
    ro.snapshot(db.getSnapshot)
    Option(db.get(VersionsKey)) match {
      case Some(bytes) =>
        val batch = db.createWriteBatch()
        try {
          val versionsToRollBack = bytes
            .grouped(Constants.HashLength)
            .takeWhile(ByteArrayWrapper(_) != ByteArrayWrapper(versionId))

          versionsToRollBack
            .foldLeft(Seq.empty[(Array[Byte], ChangeSet)]) { case (acc, verId) =>
              val changeSetOpt = Option(db.get(verId, ro)).flatMap { changeSetBytes =>
                ChangeSetSerializer.parseBytesTry(changeSetBytes.tail).toOption
              }
              require(changeSetOpt.isDefined, s"Inconsistent versioned storage state")
              acc ++ changeSetOpt.toSeq.map(verId -> _)
            }
            .foreach { case (verId, changeSet) => // revert all changes (from newest version to the targeted one)
              changeSet.insertedKeys.foreach(k => batch.delete(k))
              changeSet.removed.foreach { case (k, v) =>
                batch.put(k, v)
              }
              changeSet.altered.foreach { case (k, oldV) =>
                batch.put(k, oldV)
              }
              batch.delete(verId)
            }

          val wrappedVersionId = ByteArrayWrapper(versionId)
          val updatedVersions = bytes
            .grouped(Constants.HashLength)
            .map(ByteArrayWrapper.apply)
            .dropWhile(_ != wrappedVersionId)
            .foldLeft(Array.empty[Byte]) { case (acc, arr) =>
              acc ++ arr.data
            }

          versionsToRollBack.foreach(batch.delete) // eliminate rolled back versions
          batch.put(VersionsKey, updatedVersions)

          db.write(batch)
          Success(())
        } finally {
          batch.close()
          ro.snapshot().close()
        }
      case None =>
        Failure(new Exception(s"Version ${Algos.encode(versionId)} not found"))
    }
  }

  def versions: Seq[VersionId] = Option(db.get(VersionsKey))
    .toSeq
    .flatMap(_.grouped(Constants.HashLength))

  def versionIdExists(versionId: VersionId): Boolean =
    versions.exists(ByteArrayWrapper(_) == ByteArrayWrapper(versionId))

}

object VersionedLDBKVStore {
  type VersionId = Array[Byte]
} 
Example 36
Source File: KVStore.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.storage.leveldb

import org.iq80.leveldb.{DB, ReadOptions}

import scala.collection.mutable



trait KVStore extends AutoCloseable {

  type K = Array[Byte]
  type V = Array[Byte]

  protected val db: DB

  def get(key: K): Option[V] =
    Option(db.get(key))

  def getAll(cond: (K, V) => Boolean): Seq[(K, V)] = {
    val ro = new ReadOptions()
    ro.snapshot(db.getSnapshot)
    val iter = db.iterator(ro)
    try {
      iter.seekToFirst()
      val bf = mutable.ArrayBuffer.empty[(K, V)]
      while (iter.hasNext) {
        val next = iter.next()
        val key = next.getKey
        val value = next.getValue
        if (cond(key, value)) bf += (key -> value)
      }
      bf.toList
    } finally {
      iter.close()
      ro.snapshot().close()
    }
  }

  def getAll: Seq[(K, V)] = getAll((_, _) => true)

  def getOrElse(key: K, default: => V): V =
    get(key).getOrElse(default)

  def get(keys: Seq[K]): Seq[(K, Option[V])] = {
    val bf = mutable.ArrayBuffer.empty[(K, Option[V])]
    keys.foreach(k => bf += (k -> get(k)))
    bf
  }

  override def close(): Unit = db.close()

} 
Example 37
Source File: SnapshotDownloadControllerStorageAPI.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.view.fast.sync

import com.typesafe.scalalogging.StrictLogging
import encry.settings.EncryAppSettings
import encry.view.fast.sync.SnapshotHolder.SnapshotManifest.ChunkId
import org.encryfoundation.common.utils.Algos
import org.iq80.leveldb.DB

trait SnapshotDownloadControllerStorageAPI extends DBTryCatchFinallyProvider with StrictLogging {

  val storage: DB

  val settings: EncryAppSettings

  def nextGroupKey(n: Int): Array[Byte] = Algos.hash(s"next_group_key_$n")

  
  def getNextForRequest(groupNumber: Int): Either[Throwable, List[ChunkId]] =
    readWrite(
      (batch, readOptions, _) => {
        logger.debug(s"Going to get next group for request with number $groupNumber.")
        val res = storage.get(nextGroupKey(groupNumber), readOptions)
        val buffer: List[ChunkId] =
          if (res != null) {
            val value = res.grouped(32).toList.map(ChunkId @@ _)
            logger.debug(s"Gotten group is non empty. Elements number is ${value.size}.")
            logger.debug(s"First element of the group is: ${value.headOption.map(Algos.encode)}")
            batch.delete(nextGroupKey(groupNumber))
            value
          } else {
            logger.debug(s"Requested group is null")
            throw new Exception("Inconsistent snapshot download controller db state!")
          }
        buffer
      }
    )
} 
Example 38
Source File: DBTryCatchFinallyProvider.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.view.fast.sync

import com.typesafe.scalalogging.StrictLogging
import org.iq80.leveldb.{ DB, DBIterator, ReadOptions, WriteBatch }
import cats.syntax.either._

trait DBTryCatchFinallyProvider extends StrictLogging {

  val storage: DB

  def readWrite[Output](
    f: (WriteBatch, ReadOptions, DBIterator) => Output
  ): Either[Throwable, Output] = {
    val snapshot             = storage.getSnapshot
    val readOptions          = new ReadOptions().snapshot(snapshot)
    val batch: WriteBatch    = storage.createWriteBatch()
    val iterator: DBIterator = storage.iterator(readOptions)
    try {
      val output = f(batch, readOptions, iterator)
      storage.write(batch)
      output.asRight[Throwable]
    } catch {
      case error: Throwable =>
        logger.info(s"Error has occurred $error")
        error.asLeft[Output]
    } finally {
      iterator.close()
      readOptions.snapshot().close()
      batch.close()
    }
  }
} 
Example 39
Source File: AccStorage.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.api.http

import java.io.File
import cats.syntax.either._
import com.typesafe.scalalogging.StrictLogging
import encry.settings.EncryAppSettings
import encry.storage.VersionalStorage.StorageKey
import encry.storage.levelDb.versionalLevelDB.LevelDbFactory
import org.encryfoundation.common.utils.Algos
import org.iq80.leveldb.{DB, Options}
import scorex.utils.Random
import supertagged.TaggedType

trait AccStorage extends StrictLogging with AutoCloseable {

  val storage: DB

  val verifyPassword: String => Boolean = pass => {
    val salt = storage.get(AccStorage.SaltKey)
    val passHash = storage.get(AccStorage.PasswordHashKey)
    Algos.hash(pass.getBytes() ++ salt) sameElements passHash
  }

  def setPassword(pass: String): Either[Throwable, Unit] = {
    val batch = storage.createWriteBatch()
    val salt = Random.randomBytes()
    try {
      batch.put(AccStorage.PasswordHashKey, Algos.hash(pass.getBytes() ++ salt))
      batch.put(AccStorage.SaltKey, salt)
      storage.write(batch).asRight[Throwable]
    } catch {
      case err: Throwable => err.asLeft[Unit]
    }
    finally {
      batch.close()
    }
  }

  override def close(): Unit = storage.close()

}

object AccStorage extends StrictLogging {

  object PasswordHash extends TaggedType[Array[Byte]]
  object PasswordSalt extends TaggedType[Array[Byte]]

  type PasswordHash = PasswordHash.Type
  type PasswordSalt = PasswordSalt.Type

  val PasswordHashKey: StorageKey = StorageKey @@ Algos.hash("Password_Key")
  val SaltKey: StorageKey = StorageKey @@ Algos.hash("Salt_Key")

  def getDirStorage(settings: EncryAppSettings): File = new File(s"${settings.directory}/userKeys")

  def init(settings: EncryAppSettings): AccStorage = new AccStorage {
    override val storage: DB = LevelDbFactory.factory.open(getDirStorage(settings), new Options)
  }

} 
Example 40
Source File: WalletVersionalLevelDB.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.storage.levelDb.versionalLevelDB

import cats.instances.all._
import cats.syntax.semigroup._
import com.google.common.primitives.Longs
import com.typesafe.scalalogging.StrictLogging
import encry.settings.LevelDBSettings
import encry.storage.levelDb.versionalLevelDB.VersionalLevelDBCompanion._
import encry.utils.{BalanceCalculator, ByteStr}
import org.encryfoundation.common.modifiers.state.StateModifierSerializer
import org.encryfoundation.common.modifiers.state.box.Box.Amount
import org.encryfoundation.common.modifiers.state.box.EncryBaseBox
import org.encryfoundation.common.modifiers.state.box.TokenIssuingBox.TokenId
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.TaggedTypes.{ADKey, ModifierId}
import org.iq80.leveldb.DB
import scorex.crypto.hash.Digest32
import scala.util.Success

case class WalletVersionalLevelDB(db: DB, settings: LevelDBSettings) extends StrictLogging with AutoCloseable {

  import WalletVersionalLevelDBCompanion._

  val levelDb: VersionalLevelDB = VersionalLevelDB(db, settings)

  //todo: optimize this
  def getAllBoxes(maxQty: Int = -1): Seq[EncryBaseBox] = levelDb.getAll(maxQty)
    .filterNot(_._1 sameElements BALANCE_KEY)
    .map { case (key, bytes) => StateModifierSerializer.parseBytes(bytes, key.head) }
    .collect { case Success(box) => box }

  def getBoxById(id: ADKey): Option[EncryBaseBox] = levelDb.get(VersionalLevelDbKey @@ id.untag(ADKey))
    .flatMap(wrappedBx => StateModifierSerializer.parseBytes(wrappedBx, id.head).toOption)

  def getTokenBalanceById(id: TokenId): Option[Amount] = getBalances
    .find(_._1._2 == Algos.encode(id))
    .map(_._2)

  def containsBox(id: ADKey): Boolean = getBoxById(id).isDefined

  def rollback(modId: ModifierId): Unit = levelDb.rollbackTo(LevelDBVersion @@ modId.untag(ModifierId))

  def updateWallet(modifierId: ModifierId, newBxs: Seq[EncryBaseBox], spentBxs: Seq[EncryBaseBox],
                   intrinsicTokenId: ADKey): Unit = {
    val bxsToInsert: Seq[EncryBaseBox] = newBxs.filter(bx => !spentBxs.contains(bx))
    val newBalances: Map[(String, String), Amount] = {
      val toRemoveFromBalance = BalanceCalculator.balanceSheet(spentBxs, intrinsicTokenId)
        .map { case ((hash, key), value) => (hash, ByteStr(key)) -> value * -1 }
      val toAddToBalance = BalanceCalculator.balanceSheet(newBxs, intrinsicTokenId)
        .map { case ((hash, key), value) => (hash, ByteStr(key)) -> value }
      val prevBalance = getBalances.map { case ((hash, id), value) => (hash, ByteStr(Algos.decode(id).get)) -> value }
      (toAddToBalance |+| toRemoveFromBalance |+| prevBalance).map { case ((hash, tokenId), value) => (hash, tokenId.toString) -> value }
    }
    val newBalanceKeyValue = BALANCE_KEY -> VersionalLevelDbValue @@
      newBalances.foldLeft(Array.emptyByteArray) { case (acc, ((hash, tokenId), balance)) =>
        acc ++ Algos.decode(hash).get ++ Algos.decode(tokenId).get ++ Longs.toByteArray(balance)
      }
    levelDb.insert(LevelDbDiff(LevelDBVersion @@ modifierId.untag(ModifierId),
      newBalanceKeyValue :: bxsToInsert.map(bx => (VersionalLevelDbKey @@ bx.id.untag(ADKey),
        VersionalLevelDbValue @@ bx.bytes)).toList,
      spentBxs.map(elem => VersionalLevelDbKey @@ elem.id.untag(ADKey)))
    )
  }

  def getBalances: Map[(String, String), Amount] =
    levelDb.get(BALANCE_KEY)
      .map(_.sliding(72, 72)
        .map(ch => (Algos.encode(ch.take(32)), Algos.encode(ch.slice(32, 64))) -> Longs.fromByteArray(ch.takeRight(8)))
        .toMap).getOrElse(Map.empty)

  override def close(): Unit = levelDb.close()
}

object WalletVersionalLevelDBCompanion extends StrictLogging {

  val BALANCE_KEY: VersionalLevelDbKey =
    VersionalLevelDbKey @@ Algos.hash("BALANCE_KEY").untag(Digest32)

  val INIT_MAP: Map[VersionalLevelDbKey, VersionalLevelDbValue] = Map(
    BALANCE_KEY -> VersionalLevelDbValue @@ Array.emptyByteArray
  )

  def apply(levelDb: DB, settings: LevelDBSettings): WalletVersionalLevelDB = {
    val db = WalletVersionalLevelDB(levelDb, settings)
    db.levelDb.recoverOrInit(INIT_MAP)
    db
  }
} 
Example 41
Source File: SnapshotDownloadControllerStorageAPITests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.view.fast.sync

import encry.settings.EncryAppSettings
import encry.storage.levelDb.versionalLevelDB.LevelDbFactory
import encry.utils.FileHelper
import encry.view.fast.sync.SnapshotHolder.SnapshotManifest.ChunkId
import org.iq80.leveldb.{ DB, Options }
import org.scalatest.{ Matchers, WordSpecLike }
import scorex.utils.Random

class SnapshotDownloadControllerStorageAPITests extends WordSpecLike with Matchers {

  val settingsR: EncryAppSettings = EncryAppSettings.read()

  def init: SnapshotDownloadControllerStorageAPI = new SnapshotDownloadControllerStorageAPI {
    override val storage: DB                = LevelDbFactory.factory.open(FileHelper.getRandomTempDir, new Options)
    override val settings: EncryAppSettings = settingsR
  }

  "Inside SnapshotDownloadControllerStorageAPI class" should {
    "insert many should insert all ids correctly / split for groups with correct size" in {
      val api: SnapshotDownloadControllerStorageAPI = init
      val randomIds: List[ChunkId]                  = (1 to 20001).map(_ => Random.randomBytes()).toList.map(ChunkId @@ _)
      val groups                                    = randomIds.grouped(settingsR.snapshotSettings.chunksNumberPerRequestWhileFastSyncMod).toList
      val insertionsResult                          = api.insertMany(groups)
      insertionsResult.isRight shouldBe true
    }
    "get next for request should return batch if such exists / remove returned batch" in {
      val api: SnapshotDownloadControllerStorageAPI = init
      val randomIds: List[ChunkId]                  = (1 to 5000).map(_ => Random.randomBytes()).toList.map(ChunkId @@ _)
      val groups                                    = randomIds.grouped(settingsR.snapshotSettings.chunksNumberPerRequestWhileFastSyncMod).toList
      val _                                         = api.insertMany(groups)
      val groupsL                                   = randomIds.grouped(settingsR.snapshotSettings.chunksNumberPerRequestWhileFastSyncMod).toList
      (0 until groupsL.size).foreach { r =>
        val res = api.getNextForRequest(r)
        api.getNextForRequest(r).isLeft shouldBe true
        res.isRight shouldBe true
        res.right.get.nonEmpty shouldBe true
        res.right.get.head.sameElements(groupsL(r).head) shouldBe true
        res.right.get.forall(j => groupsL(r).exists(_.sameElements(j))) shouldBe true
        groupsL(r).forall(j => res.right.get.exists(_.sameElements(j))) shouldBe true
      }
    }
  }
} 
Example 42
Source File: RootNodesStorageTest.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.storage

import java.io.File

import encry.view.state.avlTree.utils.implicits.Instances._
import encry.modifiers.InstanceFactory
import encry.storage.VersionalStorage.{StorageKey, StorageValue, StorageVersion}
import encry.storage.levelDb.versionalLevelDB.{LevelDbFactory, VLDBWrapper, VersionalLevelDBCompanion}
import encry.utils.{EncryGenerator, FileHelper}
import encry.view.state.avlTree.AvlTree
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.TaggedTypes.Height
import org.iq80.leveldb.{DB, Options, ReadOptions}
import org.scalatest.{FunSuite, Matchers, PropSpec}
import scorex.utils.Random

import scala.util.{Random => SRandom}

class RootNodesStorageTest extends PropSpec with InstanceFactory with EncryGenerator with Matchers {

  def createAvl: AvlTree[StorageKey, StorageValue] = {
    val firstDir: File = FileHelper.getRandomTempDir
    val firstStorage: VLDBWrapper = {
      val levelDBInit = LevelDbFactory.factory.open(firstDir, new Options)
      VLDBWrapper(VersionalLevelDBCompanion(levelDBInit, settings.levelDB.copy(keySize = 33), keySize = 33))
    }
    val dir: File = FileHelper.getRandomTempDir
    val levelDb: DB = LevelDbFactory.factory.open(dir, new Options)
    AvlTree[StorageKey, StorageValue](firstStorage, RootNodesStorage.emptyRootStorage[StorageKey, StorageValue])
  }

  property("testRollback") {
    val avl: AvlTree[StorageKey, StorageValue] = createAvl
    val dir: File = FileHelper.getRandomTempDir
    val levelDb: DB = LevelDbFactory.factory.open(dir, new Options)
    val batch1 = levelDb.createWriteBatch()
    val readOptions1 = new ReadOptions()
    val rootNodesStorage = RootNodesStorage[StorageKey, StorageValue](levelDb, 10, dir)
    val (_, avlAfterInsertions, insertList) =
      (0 to SRandom.nextInt(1000) + 10).foldLeft(rootNodesStorage, avl, List.empty[(Height, (List[(StorageKey, StorageValue)], List[StorageKey]))]) {
      case ((rootStorage, previousAvl, insertionList), height) =>
        val version = StorageVersion @@ Random.randomBytes()
        val toInsert = (0 to SRandom.nextInt(100)).foldLeft(List.empty[(StorageKey, StorageValue)]) {
          case (list, _) => (StorageKey @@ Random.randomBytes() -> StorageValue @@ Random.randomBytes()) :: list
        }
        val previousInsertions = insertionList.lastOption.map(_._2._1).getOrElse(List.empty[(StorageKey, StorageValue)])
        val deletions = previousInsertions.take(1).map(_._1)
        val newAvl = previousAvl.insertAndDeleteMany(
          version,
          toInsert,
          deletions
        )
        val newRootStorage = rootStorage.insert(
          version,
          newAvl.rootNode,
          Height @@ height
        )
        (newRootStorage, newAvl, insertionList :+ (Height @@ height -> (toInsert -> deletions)))
    }
    val (_, rootNodeRestored) = rootNodesStorage.rollbackToSafePoint(insertList.dropWhile(_._1 != rootNodesStorage.safePointHeight).drop(1))
    (avlAfterInsertions.rootNode.hash sameElements rootNodeRestored.hash) shouldBe true
  }
}