org.rocksdb.Options Java Examples

The following examples show how to use org.rocksdb.Options. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBStdSessions.java    From hugegraph with Apache License 2.0 6 votes vote down vote up
public RocksDBStdSessions(HugeConfig config, String database, String store,
                          String dataPath, String walPath)
                          throws RocksDBException {
    super(config, database, store);

    // Init options
    Options options = new Options();
    RocksDBStdSessions.initOptions(config, options, options,
                                   options, options);
    options.setWalDir(walPath);

    this.sstFileManager = new SstFileManager(Env.getDefault());
    options.setSstFileManager(this.sstFileManager);

    /*
     * Open RocksDB at the first time
     * Don't merge old CFs, we expect a clear DB when using this one
     */
    this.rocksdb = RocksDB.open(options, dataPath);

    this.cfs = new ConcurrentHashMap<>();
    this.refCount = new AtomicInteger(1);
}
 
Example #2
Source File: TestKeyValueContainer.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Test
public void testRocksDBCreateUsesCachedOptions() throws Exception {
  int initialSize = MetadataStoreBuilder.CACHED_OPTS.size();

  // Create Container 1
  keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
  Assert.assertTrue("Rocks DB options should be cached.",
      MetadataStoreBuilder.CACHED_OPTS.containsKey(conf));

  Options opts = MetadataStoreBuilder.CACHED_OPTS.get(conf);

  // Create Container 2
  keyValueContainerData = new KeyValueContainerData(2L,
      layout,
      (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(),
      datanodeId.toString());

  keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
  keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);

  assertEquals(initialSize + 1, MetadataStoreBuilder.CACHED_OPTS.size());
  Options cachedOpts = MetadataStoreBuilder.CACHED_OPTS.get(conf);
  assertSame("Cache object should not be updated.", opts, cachedOpts);
}
 
Example #3
Source File: RocksDBPerformanceTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void init() throws IOException {
	rocksDir = tmp.newFolder();

	// ensure the RocksDB library is loaded to a distinct location each retry
	NativeLibraryLoader.getInstance().loadLibrary(rocksDir.getAbsolutePath());

	options = new Options()
			.setCompactionStyle(CompactionStyle.LEVEL)
			.setLevelCompactionDynamicLevelBytes(true)
			.setIncreaseParallelism(4)
			.setUseFsync(false)
			.setMaxOpenFiles(-1)
			.setCreateIfMissing(true)
			.setMergeOperatorName(RocksDBKeyedStateBackend.MERGE_OPERATOR_NAME);

	writeOptions = new WriteOptions()
			.setSync(false)
			.setDisableWAL(true);
}
 
Example #4
Source File: SamzaTimerInternalsFactoryTest.java    From beam with Apache License 2.0 6 votes vote down vote up
private static KeyValueStore<ByteArray, byte[]> createStore(String name) {
  final Options options = new Options();
  options.setCreateIfMissing(true);

  RocksDbKeyValueStore rocksStore =
      new RocksDbKeyValueStore(
          new File(System.getProperty("java.io.tmpdir") + "/" + name),
          options,
          new MapConfig(),
          false,
          "beamStore",
          new WriteOptions(),
          new FlushOptions(),
          new KeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));

  return new SerializedKeyValueStore<>(
      rocksStore,
      new ByteArraySerdeFactory.ByteArraySerde(),
      new ByteSerde(),
      new SerializedKeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));
}
 
Example #5
Source File: EzRocksDbTable.java    From ezdb with Apache License 2.0 6 votes vote down vote up
public EzRocksDbTable(File path, EzRocksDbFactory factory,
		Serde<H> hashKeySerde, Serde<R> rangeKeySerde, Serde<V> valueSerde,
		Comparator<byte[]> hashKeyComparator,
		Comparator<byte[]> rangeKeyComparator) {
	this.hashKeySerde = hashKeySerde;
	this.rangeKeySerde = rangeKeySerde;
	this.valueSerde = valueSerde;
	this.hashKeyComparator = hashKeyComparator;
	this.rangeKeyComparator = rangeKeyComparator;

	this.options = new Options();

	options.setCreateIfMissing(true);
	options.setComparator(new EzRocksDbComparator(hashKeyComparator,
			rangeKeyComparator));
	
	try {
		this.db = factory.open(path, options);
	} catch (IOException e) {
		throw new DbException(e);
	}
}
 
Example #6
Source File: RocksDBBlockHeaderStorageFactory.java    From WeCross with Apache License 2.0 6 votes vote down vote up
@Override
public BlockHeaderStorage newBlockHeaderStorage(String path) {
    RocksDBBlockHeaderStorage rocksDBBlockHeaderStorage = new RocksDBBlockHeaderStorage();
    Options options = new Options();
    options.setCreateIfMissing(true);
    options.setCreateMissingColumnFamilies(true);

    String dbPath = basePath + "/" + path;
    try {
        File dir = new File(dbPath);
        if (!dir.exists()) {
            dir.mkdirs();
        } else {
            if (!dir.isDirectory()) {
                logger.error("File {} exists and isn't dir", dbPath);
            }
        }

        RocksDB rocksDB = RocksDB.open(options, dbPath);
        rocksDBBlockHeaderStorage.setRocksDB(rocksDB);
    } catch (RocksDBException e) {
        logger.error("RocksDB open failed", e);
    }
    return rocksDBBlockHeaderStorage;
}
 
Example #7
Source File: TestRocksDbKeyValueReader.java    From samza with Apache License 2.0 5 votes vote down vote up
@BeforeClass
static public void createRocksDb() throws IOException, RocksDBException {
  if (Files.exists(dirPath)) {
    removeRecursiveDirectory(dirPath);
  }
  Files.createDirectories(dirPath);
  Options options = new Options().setCreateIfMissing(true);
  db = RocksDB.open(options, dirPath.toString());
  db.put("testString".getBytes(), "this is string".getBytes());
  db.put(ByteBuffer.allocate(4).putInt(123).array(), ByteBuffer.allocate(4).putInt(456).array());
}
 
Example #8
Source File: EzRocksDbJniFactory.java    From ezdb with Apache License 2.0 5 votes vote down vote up
@Override
public RocksDB open(File path, Options options) throws IOException {
	try {
		return RocksDB.open(options, path.getAbsolutePath());
	} catch (RocksDBException e) {
		throw new IOException(e);
	}
}
 
Example #9
Source File: RocksDbDataStoreFactory.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
@Override
public KnownWordsDataStore createKnownWordsDataStore(int id) {
  RocksDB.loadLibrary();
  try (Options options = new Options().setCreateIfMissing(true).prepareForBulkLoad()) {
    Files.createDirectories(dbPath);
    RocksDB rocksDB = RocksDB.open(options, dbPath.resolve(getWordsName(id)).toString());
    rocksDBS.add(rocksDB);
    RocksDB candidatesDB = RocksDB.open(options, dbPath.resolve(getCandidatesName(id)).toString());
    rocksDBS.add(candidatesDB);

    return new RocksDbKnownWordsDataStore(rocksDB, candidatesDB);
  } catch (RocksDBException | IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example #10
Source File: RocksDBSegmentStore.java    From kylin with Apache License 2.0 5 votes vote down vote up
@Override
public void init() {
    Options options = getOptions().setCreateIfMissing(true);
    try {
        String dataPath = dataSegmentFolder.getAbsolutePath() + "/data";
        db = RocksDB.open(options, dataPath);
    } catch (RocksDBException e) {
        logger.error("init rocks db fail");
    }
}
 
Example #11
Source File: RocksDbCacheOperator.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public RocksDbCacheOperator(TopologyContext context, String cacheDir) {
    this.stormConf = context.getStormConf();

    this.maxFlushSize = ConfigExtension.getTransactionCacheBatchFlushSize(stormConf);

    Options rocksDbOpt = new Options();
    rocksDbOpt.setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
    long bufferSize =
            ConfigExtension.getTransactionCacheBlockSize(stormConf) != null ? ConfigExtension.getTransactionCacheBlockSize(stormConf) : (1 * SizeUnit.GB);
    rocksDbOpt.setWriteBufferSize(bufferSize);
    int maxBufferNum = ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) != null ? ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) : 3;
    rocksDbOpt.setMaxWriteBufferNumber(maxBufferNum);

    // Config for log of RocksDb
    rocksDbOpt.setMaxLogFileSize(1073741824); // 1G
    rocksDbOpt.setKeepLogFileNum(1);
    rocksDbOpt.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
    
    try {
        Map<Object, Object> conf = new HashMap<Object, Object>();
        conf.put(ROCKSDB_ROOT_DIR, cacheDir);
        conf.put(ROCKSDB_RESET, true);
        initDir(conf);
        initDb(null, rocksDbOpt);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    kryo = new Kryo();
    output = new Output(200, 2000000000);
    input = new Input(1);

    LOG.info("Finished rocksDb cache init: maxFlushSize={}, bufferSize={}, maxBufferNum={}", maxFlushSize, bufferSize, maxBufferNum);
}
 
Example #12
Source File: BackupEngineTest.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
@Test
    public void backupDb() throws RocksDBException {
//        String originPath = dbFolder.getRoot().getAbsolutePath();
//        String backupPath = backupFolder.getRoot().getAbsolutePath();
        String originPath = "/tmp/rocksdb";
        String backupPath = "/tmp/rocksdb_backup";
        System.out.println("originPath=" + originPath);
        System.out.println("backupPath=" + backupPath);
        // Open empty database.
        try (final Options opt = new Options().setCreateIfMissing(true);
             final RocksDB db = RocksDB.open(opt, originPath)) {

            // Fill database with some test values
            prepareDatabase(db);

            try (RocksIterator it = db.newIterator()) {
                for (it.seekToFirst(); it.isValid(); it.next()) {
                    System.out.println(originPath + ":" + new String(it.key()) + ":" + new String(it.value()));
                }
            }

            // Create two backups
            try (final BackupableDBOptions bopt = new BackupableDBOptions(backupPath);
                 final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
                be.createNewBackup(db, false);
                be.createNewBackup(db, true);
                verifyNumberOfValidBackups(be, 2);
            }
        }
    }
 
Example #13
Source File: RocksDBStore.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
public RocksDBStore(File dbFile, Options options) throws IOException {
  Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
  RocksDB.loadLibrary();
  dbOptions = options;
  dbLocation = dbFile;
  writeOptions = new WriteOptions();
  try {
    db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
    if (dbOptions.statistics() != null) {
      Map<String, String> jmxProperties = new HashMap<String, String>();
      jmxProperties.put("dbName", dbFile.getName());
      statMBeanName = HddsUtils.registerWithJmxProperties(
          "Ozone", "RocksDbStore", jmxProperties,
          RocksDBStoreMBean.create(dbOptions.statistics(), dbFile.getName()));
      if (statMBeanName == null) {
        LOG.warn("jmx registration failed during RocksDB init, db path :{}",
            dbFile.getAbsolutePath());
      }
    }
  } catch (RocksDBException e) {
    String msg = "Failed init RocksDB, db path : " + dbFile.getAbsolutePath()
        + ", " + "exception :" + (e.getCause() == null ?
        e.getClass().getCanonicalName() + " " + e.getMessage() :
        e.getCause().getClass().getCanonicalName() + " " +
            e.getCause().getMessage());
    throw new IOException(msg, e);
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("RocksDB successfully opened.");
    LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
    LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
    LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
    LOG.debug("[Option] compressionType= {}", options.compressionType());
    LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
    LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
  }
}
 
Example #14
Source File: BackupEngineTest.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
@Test
    public void backupDb() throws RocksDBException {
//        String originPath = dbFolder.getRoot().getAbsolutePath();
//        String backupPath = backupFolder.getRoot().getAbsolutePath();
        String originPath = "/tmp/rocksdb";
        String backupPath = "/tmp/rocksdb_backup";
        System.out.println("originPath=" + originPath);
        System.out.println("backupPath=" + backupPath);
        // Open empty database.
        try (final Options opt = new Options().setCreateIfMissing(true);
             final RocksDB db = RocksDB.open(opt, originPath)) {

            // Fill database with some test values
            prepareDatabase(db);

            try (RocksIterator it = db.newIterator()) {
                for (it.seekToFirst(); it.isValid(); it.next()) {
                    System.out.println(originPath + ":" + new String(it.key()) + ":" + new String(it.value()));
                }
            }

            // Create two backups
            try (final BackupableDBOptions bopt = new BackupableDBOptions(backupPath);
                 final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
                be.createNewBackup(db, false);
                be.createNewBackup(db, true);
                verifyNumberOfValidBackups(be, 2);
            }
        }
    }
 
Example #15
Source File: EzRocksDb.java    From ezdb with Apache License 2.0 5 votes vote down vote up
@Override
public void deleteTable(String tableName) {
  try {
    synchronized (cache) {
      cache.remove(tableName);
      factory.destroy(getFile(tableName), new Options());
    }
  } catch (IOException e) {
    throw new DbException(e);
  }
}
 
Example #16
Source File: RocksDBClient.java    From geowave with Apache License 2.0 5 votes vote down vote up
public synchronized RocksDBMetadataTable getMetadataTable(final MetadataType type) {
  if (metadataOptions == null) {
    RocksDB.loadLibrary();
    metadataOptions = new Options().setCreateIfMissing(true).optimizeForSmallDb();
  }
  final String directory = subDirectory + "/" + type.name();
  return metadataTableCache.get(
      keyCache.get(directory, d -> new CacheKey(d, type.equals(MetadataType.STATS))));
}
 
Example #17
Source File: DataStore.java    From outbackcdx with Apache License 2.0 5 votes vote down vote up
private void configureColumnFamily(Options cfOptions) throws RocksDBException {
    BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    tableConfig.setBlockSize(22 * 1024); // approximately compresses to < 8 kB

    cfOptions.setCompactionStyle(CompactionStyle.LEVEL);
    cfOptions.setWriteBufferSize(64 * 1024 * 1024);
    cfOptions.setTargetFileSizeBase(64 * 1024 * 1024);
    cfOptions.setMaxBytesForLevelBase(512 * 1024 * 1024);
    cfOptions.setTargetFileSizeMultiplier(2);
    cfOptions.setCompressionType(CompressionType.SNAPPY_COMPRESSION);
    cfOptions.setTableFormatConfig(tableConfig);
}
 
Example #18
Source File: RocksDbDataStoreFactory.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
@Override
public SuffixDataStore createSuffixDataStore(int id) {
  RocksDB.loadLibrary();
  try (Options options = new Options().setCreateIfMissing(true).prepareForBulkLoad()) {
    Files.createDirectories(dbPath);
    RocksDB rocksDB = RocksDB.open(options, dbPath.resolve(getSuffixesName(id)).toString());
    rocksDBS.add(rocksDB);

    return new RocksDbSuffixDataStore(rocksDB);
  } catch (RocksDBException | IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example #19
Source File: JRocksDB.java    From jelectrum with MIT License 5 votes vote down vote up
public JRocksDB(Config config, EventLog log)
  throws Exception
{
  super(config);

  this.log = log;

  config.require("rocksdb_path");

  String path = config.get("rocksdb_path");

  RocksDB.loadLibrary();
  Options options = new Options();

  options.setIncreaseParallelism(16);
  options.setCreateIfMissing(true);
  options.setAllowMmapReads(true);
  //options.setAllowMmapWrites(true);

  sharedWriteOptions = new WriteOptions();
  sharedWriteOptions.setDisableWAL(true);
  sharedWriteOptions.setSync(false);

  db = RocksDB.open(options, path);

  open();
}
 
Example #20
Source File: RocksDBNormalizerModel.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
RocksDBNormalizerModel(Path dbPath) {
  RocksDB.loadLibrary();

  try (Options options = new Options().setInfoLogLevel(InfoLogLevel.ERROR_LEVEL)) {
    db = RocksDB.openReadOnly(options, dbPath.toString());
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example #21
Source File: BackupEngineTest.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
@Test
public void deleteBackup() throws RocksDBException {
    // Open empty database.
    try (final Options opt = new Options().setCreateIfMissing(true);
         final RocksDB db = RocksDB.open(opt,
                 dbFolder.getRoot().getAbsolutePath())) {
        // Fill database with some test values
        prepareDatabase(db);
        // Create two backups
        try (final BackupableDBOptions bopt = new BackupableDBOptions(
                backupFolder.getRoot().getAbsolutePath());
             final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) {
            be.createNewBackup(db, false);
            be.createNewBackup(db, true);
            final List<BackupInfo> backupInfo =
                    verifyNumberOfValidBackups(be, 2);
            // Delete the first backup
            be.deleteBackup(backupInfo.get(0).backupId());
            final List<BackupInfo> newBackupInfo =
                    verifyNumberOfValidBackups(be, 1);

            // The second backup must remain.
            assertThat(newBackupInfo.get(0).backupId()).
                    isEqualTo(backupInfo.get(1).backupId());
        }
    }
}
 
Example #22
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 5 votes vote down vote up
@Override
public Options prepareForBulkLoad() {
    /* From https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ
     *
     * Q: What's the fastest way to load data into RocksDB?
     *
     * A: A fast way to direct insert data to the DB:
     *
     *  1. using single writer thread and insert in sorted order
     *  2. batch hundreds of keys into one write batch
     *  3. use vector memtable
     *  4. make sure options.max_background_flushes is at least 4
     *  5. before inserting the data,
     *       disable automatic compaction,
     *       set options.level0_file_num_compaction_trigger,
     *           options.level0_slowdown_writes_trigger
     *           and options.level0_stop_writes_trigger to very large.
     *     After inserting all the data, issue a manual compaction.
     *
     * 3-5 will be automatically done if you call Options::PrepareForBulkLoad() to your option
     */
    // (1) not in our control
    // (2) is done via bulk-loading API
    // (3) skipping because, not done in actual PrepareForBulkLoad() code in https://github.com/facebook/rocksdb/blob/master/options/options.cc
    //columnFamilyOptions.setMemTableConfig(new VectorMemTableConfig());
    // (4-5) below:
    dbOptions.setMaxBackgroundFlushes(4);
    columnFamilyOptions.setDisableAutoCompactions(true);
    columnFamilyOptions.setLevel0FileNumCompactionTrigger(1 << 30);
    columnFamilyOptions.setLevel0SlowdownWritesTrigger(1 << 30);
    columnFamilyOptions.setLevel0StopWritesTrigger(1 << 30);
    return this;
}
 
Example #23
Source File: TestRocksDbKeyValueStoreJava.java    From samza with Apache License 2.0 5 votes vote down vote up
@Test
public void testIterate() throws Exception {
  Config config = new MapConfig();
  Options options = new Options();
  options.setCreateIfMissing(true);

  File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
  RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore",
      new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));

  ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
  String prefix = "prefix";
  for (int i = 0; i < 100; i++) {
    store.put(genKey(outputStream, prefix, i), genValue());
  }

  byte[] firstKey = genKey(outputStream, prefix, 0);
  byte[] lastKey = genKey(outputStream, prefix, 1000);
  KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
  // Make sure the cached Iterable won't change when new elements are added
  store.put(genKey(outputStream, prefix, 200), genValue());
  KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
  assertTrue(Iterators.size(iterator) == 100);
  iterator.close();
  List<Integer> keys = new ArrayList<>();
  KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
  while (iterator2.hasNext()) {
    Entry<byte[], byte[]> entry = iterator2.next();
    int key = Ints.fromByteArray(Arrays.copyOfRange(entry.getKey(), prefix.getBytes().length, entry.getKey().length));
    keys.add(key);
  }
  assertEquals(keys, IntStream.rangeClosed(0, 99).boxed().collect(Collectors.toList()));
  iterator2.close();

  outputStream.close();
  snapshot.close();
  store.close();
}
 
Example #24
Source File: RocksDBStdSessions.java    From hugegraph with Apache License 2.0 5 votes vote down vote up
public static Set<String> listCFs(String path) throws RocksDBException {
    Set<String> cfs = new HashSet<>();

    List<byte[]> oldCFs = RocksDB.listColumnFamilies(new Options(), path);
    if (oldCFs.isEmpty()) {
        cfs.add("default");
    } else {
        for (byte[] oldCF : oldCFs) {
            cfs.add(decode(oldCF));
        }
    }
    return cfs;
}
 
Example #25
Source File: RocksDBLookupTable.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public RocksDBLookupTable(TableDesc tableDesc, String[] keyColumns, String dbPath) {
    this.options = new Options();
    this.rowEncoder = new RocksDBLookupRowEncoder(tableDesc, keyColumns);
    try {
        this.rocksDB = RocksDB.openReadOnly(options, dbPath);
    } catch (RocksDBException e) {
        throw new IllegalStateException("cannot open rocks db in path:" + dbPath, e);
    }
}
 
Example #26
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setEnableThreadTracking(final boolean enableThreadTracking) {
    dbOptions.setEnableThreadTracking(enableThreadTracking);
    return this;
}
 
Example #27
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setUseFsync(final boolean useFsync) {
    dbOptions.setUseFsync(useFsync);
    return this;
}
 
Example #28
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setRandomAccessMaxBufferSize(final long randomAccessMaxBufferSize) {
    dbOptions.setRandomAccessMaxBufferSize(randomAccessMaxBufferSize);
    return this;
}
 
Example #29
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setWriteBufferManager(final WriteBufferManager writeBufferManager) {
    dbOptions.setWriteBufferManager(writeBufferManager);
    return this;
}
 
Example #30
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setCompactionReadaheadSize(final long compactionReadaheadSize) {
    dbOptions.setCompactionReadaheadSize(compactionReadaheadSize);
    return this;
}