Java Code Examples for org.rocksdb.WriteOptions

The following examples show how to use org.rocksdb.WriteOptions. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop-ozone   Source File: DBStoreBuilder.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Builds a DBStore instance and returns that.
 *
 * @return DBStore
 */
public DBStore build() throws IOException {
  if(StringUtil.isBlank(dbname) || (dbPath == null)) {
    LOG.error("Required Parameter missing.");
    throw new IOException("Required parameter is missing. Please make sure "
        + "sure Path and DB name is provided.");
  }
  processDBProfile();
  processTables();
  DBOptions options = getDbProfile();

  WriteOptions writeOptions = new WriteOptions();
  writeOptions.setSync(rocksDBConfiguration.getSyncOption());


  File dbFile = getDBFile();
  if (!dbFile.getParentFile().exists()) {
    throw new IOException("The DB destination directory should exist.");
  }
  return new RDBStore(dbFile, options, writeOptions, tables, registry);
}
 
Example 2
Source Project: Flink-CEPplus   Source File: RocksDBResource.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void before() throws Throwable {
	this.temporaryFolder = new TemporaryFolder();
	this.temporaryFolder.create();
	final File rocksFolder = temporaryFolder.newFolder();
	this.dbOptions = optionsFactory.createDBOptions(PredefinedOptions.DEFAULT.createDBOptions()).
		setCreateIfMissing(true);
	this.columnFamilyOptions = optionsFactory.createColumnOptions(PredefinedOptions.DEFAULT.createColumnOptions());
	this.writeOptions = new WriteOptions();
	this.writeOptions.disableWAL();
	this.readOptions = new ReadOptions();
	this.columnFamilyHandles = new ArrayList<>(1);
	this.rocksDB = RocksDB.open(
		dbOptions,
		rocksFolder.getAbsolutePath(),
		Collections.singletonList(new ColumnFamilyDescriptor("default".getBytes(), columnFamilyOptions)),
		columnFamilyHandles);
	this.batchWrapper = new RocksDBWriteBatchWrapper(rocksDB, writeOptions);
}
 
Example 3
Source Project: WeCross   Source File: RocksDBBlockHeaderStorage.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void writeBlockHeader(long blockNumber, byte[] blockHeader) {
    if (dbClosed == true) {
        logger.warn("Write RocksDB error: RocksDB has been closed");
        return;
    }

    String key = blockKeyPrefix + String.valueOf(blockNumber);

    try {
        WriteBatch writeBatch = new WriteBatch();
        writeBatch.put(numberKey.getBytes(), String.valueOf(blockNumber).getBytes());
        writeBatch.put(key.getBytes(), blockHeader);

        WriteOptions writeOptions = new WriteOptions();

        rocksDB.write(writeOptions, writeBatch);
        onBlockHeader(blockNumber, blockHeader);
    } catch (RocksDBException e) {
        logger.error("RocksDB write error", e);
    }
}
 
Example 4
Source Project: geowave   Source File: RocksDBClient.java    License: Apache License 2.0 6 votes vote down vote up
public synchronized RocksDBIndexTable getIndexTable(
    final String tableName,
    final short adapterId,
    final byte[] partition,
    final boolean requiresTimestamp) {
  if (indexWriteOptions == null) {
    RocksDB.loadLibrary();
    final int cores = Runtime.getRuntime().availableProcessors();
    indexWriteOptions =
        new Options().setCreateIfMissing(true).prepareForBulkLoad().setIncreaseParallelism(cores);
    indexReadOptions = new Options().setIncreaseParallelism(cores);
    batchWriteOptions =
        new WriteOptions().setDisableWAL(false).setNoSlowdown(false).setSync(false);
  }
  final String directory = subDirectory + "/" + tableName;
  return indexTableCache.get(
      (IndexCacheKey) keyCache.get(
          directory,
          d -> new IndexCacheKey(d, adapterId, partition, requiresTimestamp)));
}
 
Example 5
Source Project: geowave   Source File: RocksDBClient.java    License: Apache License 2.0 6 votes vote down vote up
public synchronized RocksDBDataIndexTable getDataIndexTable(
    final String tableName,
    final short adapterId) {
  if (indexWriteOptions == null) {
    RocksDB.loadLibrary();
    final int cores = Runtime.getRuntime().availableProcessors();
    indexWriteOptions =
        new Options().setCreateIfMissing(true).prepareForBulkLoad().setIncreaseParallelism(cores);
    indexReadOptions = new Options().setIncreaseParallelism(cores);
    batchWriteOptions =
        new WriteOptions().setDisableWAL(false).setNoSlowdown(false).setSync(false);
  }
  final String directory = subDirectory + "/" + tableName;
  return dataIndexTableCache.get(
      (DataIndexCacheKey) keyCache.get(directory, d -> new DataIndexCacheKey(d, adapterId)));
}
 
Example 6
Source Project: flink   Source File: RocksDBPerformanceTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void init() throws IOException {
	rocksDir = tmp.newFolder();

	// ensure the RocksDB library is loaded to a distinct location each retry
	NativeLibraryLoader.getInstance().loadLibrary(rocksDir.getAbsolutePath());

	options = new Options()
			.setCompactionStyle(CompactionStyle.LEVEL)
			.setLevelCompactionDynamicLevelBytes(true)
			.setIncreaseParallelism(4)
			.setUseFsync(false)
			.setMaxOpenFiles(-1)
			.setCreateIfMissing(true)
			.setMergeOperatorName(RocksDBKeyedStateBackend.MERGE_OPERATOR_NAME);

	writeOptions = new WriteOptions()
			.setSync(false)
			.setDisableWAL(true);
}
 
Example 7
Source Project: flink   Source File: RocksDBResource.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void before() throws Throwable {
	this.temporaryFolder = new TemporaryFolder();
	this.temporaryFolder.create();
	final File rocksFolder = temporaryFolder.newFolder();
	this.dbOptions = optionsFactory.createDBOptions(PredefinedOptions.DEFAULT.createDBOptions()).
		setCreateIfMissing(true);
	this.columnFamilyOptions = optionsFactory.createColumnOptions(PredefinedOptions.DEFAULT.createColumnOptions());
	this.writeOptions = new WriteOptions();
	this.writeOptions.disableWAL();
	this.readOptions = new ReadOptions();
	this.columnFamilyHandles = new ArrayList<>(1);
	this.rocksDB = RocksDB.open(
		dbOptions,
		rocksFolder.getAbsolutePath(),
		Collections.singletonList(new ColumnFamilyDescriptor("default".getBytes(), columnFamilyOptions)),
		columnFamilyHandles);
	this.batchWrapper = new RocksDBWriteBatchWrapper(rocksDB, writeOptions);
}
 
Example 8
private void updateByBatchInner(Map<byte[], byte[]> rows, WriteOptions options)
        throws Exception {
    if (quitIfNotAlive()) {
        return;
    }
    try (WriteBatch batch = new WriteBatch()) {
        for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) {
            if (entry.getValue() == null) {
                batch.delete(entry.getKey());
            } else {
                batch.put(entry.getKey(), entry.getValue());
            }
        }
        database.write(new WriteOptions(), batch);
    }
}
 
Example 9
Source Project: beam   Source File: SamzaTimerInternalsFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
private static KeyValueStore<ByteArray, byte[]> createStore(String name) {
  final Options options = new Options();
  options.setCreateIfMissing(true);

  RocksDbKeyValueStore rocksStore =
      new RocksDbKeyValueStore(
          new File(System.getProperty("java.io.tmpdir") + "/" + name),
          options,
          new MapConfig(),
          false,
          "beamStore",
          new WriteOptions(),
          new FlushOptions(),
          new KeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));

  return new SerializedKeyValueStore<>(
      rocksStore,
      new ByteArraySerdeFactory.ByteArraySerde(),
      new ByteSerde(),
      new SerializedKeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));
}
 
Example 10
Source Project: biomedicus   Source File: RocksDBSenseVectors.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void removeWord(int index) {
  try (WriteBatch writeBatch = new WriteBatch()) {
    try (RocksIterator rocksIterator = rocksDB.newIterator()) {
      rocksIterator.seekToFirst();
      while (rocksIterator.isValid()) {
        SparseVector sparseVector = new SparseVector(rocksIterator.value());
        sparseVector.remove(index);
        writeBatch.put(rocksIterator.key(), sparseVector.toBytes());
      }
    }
    rocksDB.write(new WriteOptions(), writeBatch);
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 11
Source Project: biomedicus   Source File: RocksDBSenseVectors.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void removeWords(Collection<Integer> indexes) {
  try (WriteBatch writeBatch = new WriteBatch()) {
    try (RocksIterator rocksIterator = rocksDB.newIterator()) {
      rocksIterator.seekToFirst();
      while (rocksIterator.isValid()) {
        SparseVector sparseVector = new SparseVector(rocksIterator.value());
        sparseVector.removeAll(indexes);
        writeBatch.put(rocksIterator.key(), sparseVector.toBytes());
      }
    }
    rocksDB.write(new WriteOptions(), writeBatch);
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 12
Source Project: geowave   Source File: RocksDBDataIndexTable.java    License: Apache License 2.0 6 votes vote down vote up
public RocksDBDataIndexTable(
    final Options writeOptions,
    final Options readOptions,
    final WriteOptions batchWriteOptions,
    final String subDirectory,
    final short adapterId,
    final boolean visibilityEnabled,
    final boolean compactOnWrite,
    final int batchSize) {
  super(
      writeOptions,
      readOptions,
      batchWriteOptions,
      subDirectory,
      adapterId,
      visibilityEnabled,
      compactOnWrite,
      batchSize);
}
 
Example 13
Source Project: flink   Source File: RocksDBPerformanceTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void init() throws IOException {
	rocksDir = tmp.newFolder();

	// ensure the RocksDB library is loaded to a distinct location each retry
	NativeLibraryLoader.getInstance().loadLibrary(rocksDir.getAbsolutePath());

	options = new Options()
			.setCompactionStyle(CompactionStyle.LEVEL)
			.setLevelCompactionDynamicLevelBytes(true)
			.setIncreaseParallelism(4)
			.setUseFsync(false)
			.setMaxOpenFiles(-1)
			.setCreateIfMissing(true)
			.setMergeOperatorName(RocksDBKeyedStateBackend.MERGE_OPERATOR_NAME);

	writeOptions = new WriteOptions()
			.setSync(false)
			.setDisableWAL(true);
}
 
Example 14
Source Project: flink   Source File: RocksDBWriteBatchWrapperTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory consumed exceeds the preconfigured value.
 */
@Test
public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws Exception {
	try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
		WriteOptions options = new WriteOptions().setDisableWAL(true);
		ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
		RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 200, 50)) {

		long initBatchSize = writeBatchWrapper.getDataSize();
		byte[] dummy = new byte[6];
		ThreadLocalRandom.current().nextBytes(dummy);
		// will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV
		// format is [handleType|kvType|keyLen|key|valueLen|value]
		// more information please ref write_batch.cc in RocksDB
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize + 16, writeBatchWrapper.getDataSize());
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize + 32, writeBatchWrapper.getDataSize());
		writeBatchWrapper.put(handle, dummy, dummy);
		// will flush all, then an empty write batch
		assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
	}
}
 
Example 15
Source Project: flink   Source File: RocksDBWriteBatchWrapperTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link RocksDBWriteBatchWrapper} flushes after the kv count exceeds the preconfigured value.
 */
@Test
public void testWriteBatchWrapperFlushAfterCountExceed() throws Exception {
	try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
		WriteOptions options = new WriteOptions().setDisableWAL(true);
		ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
		RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 100, 50000)) {
		long initBatchSize = writeBatchWrapper.getDataSize();
		byte[] dummy = new byte[2];
		ThreadLocalRandom.current().nextBytes(dummy);
		for (int i = 1; i < 100; ++i) {
			writeBatchWrapper.put(handle, dummy, dummy);
			// each kv consumes 8 bytes
			assertEquals(initBatchSize + 8 * i, writeBatchWrapper.getDataSize());
		}
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
	}
}
 
Example 16
Source Project: geowave   Source File: RocksDBIndexTable.java    License: Apache License 2.0 6 votes vote down vote up
public RocksDBIndexTable(
    final Options writeOptions,
    final Options readOptions,
    final WriteOptions batchWriteOptions,
    final String subDirectory,
    final short adapterId,
    final byte[] partition,
    final boolean requiresTimestamp,
    final boolean visibilityEnabled,
    final boolean compactOnWrite,
    final int batchSize) {
  super(
      writeOptions,
      readOptions,
      batchWriteOptions,
      subDirectory,
      adapterId,
      visibilityEnabled,
      compactOnWrite,
      batchSize);
  this.requiresTimestamp = requiresTimestamp;
  this.partition = partition;
}
 
Example 17
Source Project: geowave   Source File: AbstractRocksDBTable.java    License: Apache License 2.0 6 votes vote down vote up
public AbstractRocksDBTable(
    final Options writeOptions,
    final Options readOptions,
    final WriteOptions batchWriteOptions,
    final String subDirectory,
    final short adapterId,
    final boolean visibilityEnabled,
    final boolean compactOnWrite,
    final int batchSize) {
  super();
  this.writeOptions = writeOptions;
  this.readOptions = readOptions;
  this.batchWriteOptions = batchWriteOptions;
  this.subDirectory = subDirectory;
  this.adapterId = adapterId;
  exists = new File(subDirectory).exists();
  this.visibilityEnabled = visibilityEnabled;
  this.compactOnWrite = compactOnWrite;
  this.batchSize = batchSize;
  batchWrite = batchSize > 1;
}
 
Example 18
Source Project: hadoop-ozone   Source File: RocksDBStore.java    License: Apache License 2.0 5 votes vote down vote up
public RocksDBStore(File dbFile, Options options) throws IOException {
  Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
  RocksDB.loadLibrary();
  dbOptions = options;
  dbLocation = dbFile;
  writeOptions = new WriteOptions();
  try {
    db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
    if (dbOptions.statistics() != null) {
      Map<String, String> jmxProperties = new HashMap<String, String>();
      jmxProperties.put("dbName", dbFile.getName());
      statMBeanName = HddsUtils.registerWithJmxProperties(
          "Ozone", "RocksDbStore", jmxProperties,
          RocksDBStoreMBean.create(dbOptions.statistics(), dbFile.getName()));
      if (statMBeanName == null) {
        LOG.warn("jmx registration failed during RocksDB init, db path :{}",
            dbFile.getAbsolutePath());
      }
    }
  } catch (RocksDBException e) {
    String msg = "Failed init RocksDB, db path : " + dbFile.getAbsolutePath()
        + ", " + "exception :" + (e.getCause() == null ?
        e.getClass().getCanonicalName() + " " + e.getMessage() :
        e.getCause().getClass().getCanonicalName() + " " +
            e.getCause().getMessage());
    throw new IOException(msg, e);
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("RocksDB successfully opened.");
    LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
    LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
    LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
    LOG.debug("[Option] compressionType= {}", options.compressionType());
    LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
    LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
  }
}
 
Example 19
Source Project: hadoop-ozone   Source File: RDBBatchOperation.java    License: Apache License 2.0 5 votes vote down vote up
public void commit(RocksDB db, WriteOptions writeOptions) throws IOException {
  try {
    db.write(writeOptions, writeBatch);
  } catch (RocksDBException e) {
    throw new IOException("Unable to write the batch.", e);
  }
}
 
Example 20
Source Project: jelectrum   Source File: JRocksDB.java    License: MIT License 5 votes vote down vote up
public JRocksDB(Config config, EventLog log)
  throws Exception
{
  super(config);

  this.log = log;

  config.require("rocksdb_path");

  String path = config.get("rocksdb_path");

  RocksDB.loadLibrary();
  Options options = new Options();

  options.setIncreaseParallelism(16);
  options.setCreateIfMissing(true);
  options.setAllowMmapReads(true);
  //options.setAllowMmapWrites(true);

  sharedWriteOptions = new WriteOptions();
  sharedWriteOptions.setDisableWAL(true);
  sharedWriteOptions.setSync(false);

  db = RocksDB.open(options, path);

  open();
}
 
Example 21
Source Project: hadoop-ozone   Source File: RDBTable.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Constructs a TableStore.
 *
 * @param db - DBstore that we are using.
 * @param handle - ColumnFamily Handle.
 * @param writeOptions - RocksDB write Options.
 */
RDBTable(RocksDB db, ColumnFamilyHandle handle,
    WriteOptions writeOptions, RDBMetrics rdbMetrics) {
  this.db = db;
  this.handle = handle;
  this.writeOptions = writeOptions;
  this.rdbMetrics = rdbMetrics;
}
 
Example 22
/**
 * Get Delta updates from OM through RPC call and apply to local OM DB as
 * well as accumulate in a buffer.
 * @param fromSequenceNumber from sequence number to request from.
 * @param omdbUpdatesHandler OM DB updates handler to buffer updates.
 * @throws IOException when OM RPC request fails.
 * @throws RocksDBException when writing to RocksDB fails.
 */
@VisibleForTesting
void getAndApplyDeltaUpdatesFromOM(
    long fromSequenceNumber, OMDBUpdatesHandler omdbUpdatesHandler)
    throws IOException, RocksDBException {
  DBUpdatesRequest dbUpdatesRequest = DBUpdatesRequest.newBuilder()
      .setSequenceNumber(fromSequenceNumber).build();
  DBUpdates dbUpdates = ozoneManagerClient.getDBUpdates(dbUpdatesRequest);
  if (null != dbUpdates) {
    RDBStore rocksDBStore = (RDBStore) omMetadataManager.getStore();
    RocksDB rocksDB = rocksDBStore.getDb();
    int numUpdates = dbUpdates.getData().size();
    LOG.info("Number of updates received from OM : {}", numUpdates);
    if (numUpdates > 0) {
      metrics.incrNumUpdatesInDeltaTotal(numUpdates);
    }
    for (byte[] data : dbUpdates.getData()) {
      try (WriteBatch writeBatch = new WriteBatch(data)) {
        writeBatch.iterate(omdbUpdatesHandler);
        try (RDBBatchOperation rdbBatchOperation =
                 new RDBBatchOperation(writeBatch)) {
          try (WriteOptions wOpts = new WriteOptions()) {
            rdbBatchOperation.commit(rocksDB, wOpts);
          }
        }
      }
    }
  }
}
 
Example 23
Source Project: besu   Source File: RocksDBColumnarKeyValueStorage.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Transaction<ColumnFamilyHandle> startTransaction() throws StorageException {
  throwIfClosed();
  final WriteOptions options = new WriteOptions();
  return new SegmentedKeyValueStorageTransactionTransitionValidatorDecorator<>(
      new RocksDbTransaction(db.beginTransaction(options), options));
}
 
Example 24
Source Project: besu   Source File: RocksDBKeyValueStorage.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public KeyValueStorageTransaction startTransaction() throws StorageException {
  throwIfClosed();
  final WriteOptions options = new WriteOptions();
  return new KeyValueStorageTransactionTransitionValidatorDecorator(
      new RocksDBTransaction(db.beginTransaction(options), options, rocksDBMetrics));
}
 
Example 25
Source Project: Flink-CEPplus   Source File: RocksDBWriteBatchWrapper.java    License: Apache License 2.0 5 votes vote down vote up
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) {
	Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY,
		"capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY);

	this.db = rocksDB;
	this.options = options;
	this.capacity = capacity;
	this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES);
}
 
Example 26
Source Project: Flink-CEPplus   Source File: RocksDBWriteBatchWrapper.java    License: Apache License 2.0 5 votes vote down vote up
public void flush() throws RocksDBException {
	if (options != null) {
		db.write(options, batch);
	} else {
		// use the default WriteOptions, if wasn't provided.
		try (WriteOptions writeOptions = new WriteOptions()) {
			db.write(writeOptions, batch);
		}
	}
	batch.clear();
}
 
Example 27
Source Project: jstorm   Source File: RocksDbHdfsState.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void putBatch(Map<K, V> batch) {
    try {
        WriteBatch writeBatch = new WriteBatch();
        for (Map.Entry<K, V> entry : batch.entrySet()) {
            writeBatch.put(serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue()));
        }
        rocksDb.write(new WriteOptions(), writeBatch);
    } catch (RocksDBException e) {
        LOG.error("Failed to put batch={}", batch);
        throw new RuntimeException(e.getMessage());
    }
}
 
Example 28
Source Project: sofa-jraft   Source File: RocksDBLogStorage.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean init(final LogStorageOptions opts) {
    Requires.requireNonNull(opts.getConfigurationManager(), "Null conf manager");
    Requires.requireNonNull(opts.getLogEntryCodecFactory(), "Null log entry codec factory");
    this.writeLock.lock();
    try {
        if (this.db != null) {
            LOG.warn("RocksDBLogStorage init() already.");
            return true;
        }
        this.logEntryDecoder = opts.getLogEntryCodecFactory().decoder();
        this.logEntryEncoder = opts.getLogEntryCodecFactory().encoder();
        Requires.requireNonNull(this.logEntryDecoder, "Null log entry decoder");
        Requires.requireNonNull(this.logEntryEncoder, "Null log entry encoder");
        this.dbOptions = createDBOptions();
        if (this.openStatistics) {
            this.statistics = new DebugStatistics();
            this.dbOptions.setStatistics(this.statistics);
        }

        this.writeOptions = new WriteOptions();
        this.writeOptions.setSync(this.sync);
        this.totalOrderReadOptions = new ReadOptions();
        this.totalOrderReadOptions.setTotalOrderSeek(true);

        return initAndLoad(opts.getConfigurationManager());
    } catch (final RocksDBException e) {
        LOG.error("Fail to init RocksDBLogStorage, path={}.", this.path, e);
        return false;
    } finally {
        this.writeLock.unlock();
    }

}
 
Example 29
Source Project: flink   Source File: RocksDBWriteBatchWrapper.java    License: Apache License 2.0 5 votes vote down vote up
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) {
	Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY,
		"capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY);

	this.db = rocksDB;
	this.options = options;
	this.capacity = capacity;
	this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES);
}
 
Example 30
Source Project: flink   Source File: RocksDBWriteBatchWrapper.java    License: Apache License 2.0 5 votes vote down vote up
public void flush() throws RocksDBException {
	if (options != null) {
		db.write(options, batch);
	} else {
		// use the default WriteOptions, if wasn't provided.
		try (WriteOptions writeOptions = new WriteOptions()) {
			db.write(writeOptions, batch);
		}
	}
	batch.clear();
}