org.rocksdb.util.SizeUnit Java Examples

The following examples show how to use org.rocksdb.util.SizeUnit. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StorageOptionsFactory.java    From sofa-jraft with Apache License 2.0 6 votes vote down vote up
public static BlockBasedTableConfig getDefaultRocksDBTableConfig() {
    // See https://github.com/sofastack/sofa-jraft/pull/156
    return new BlockBasedTableConfig() //
        // Begin to use partitioned index filters
        // https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters#how-to-use-it
        .setIndexType(IndexType.kTwoLevelIndexSearch) //
        .setFilter(new BloomFilter(16, false)) //
        .setPartitionFilters(true) //
        .setMetadataBlockSize(8 * SizeUnit.KB) //
        .setCacheIndexAndFilterBlocks(false) //
        .setCacheIndexAndFilterBlocksWithHighPriority(true) //
        .setPinL0FilterAndIndexBlocksInCache(true) //
        // End of partitioned index filters settings.
        .setBlockSize(4 * SizeUnit.KB)//
        .setBlockCacheSize(512 * SizeUnit.MB) //
        .setCacheNumShardBits(8);
}
 
Example #2
Source File: RocksDBManager.java    From nuls-v2 with MIT License 6 votes vote down vote up
/**
 * 获得公共的数据库连接属性.
 *
 * @param createIfMissing 是否默认表
 * @return 数据库连接属性
 */
private static synchronized Options getCommonOptions(final boolean createIfMissing) {
    Options options = new Options();

    options.setCreateIfMissing(createIfMissing);
    /**
     * 优化读取性能方案
     */
    options.setAllowMmapReads(true);
    options.setCompressionType(CompressionType.NO_COMPRESSION);
    options.setMaxOpenFiles(-1);
    BlockBasedTableConfig tableOption = new BlockBasedTableConfig();
    tableOption.setNoBlockCache(true);
    tableOption.setBlockRestartInterval(4);
    tableOption.setFilterPolicy(new BloomFilter(10, true));
    options.setTableFormatConfig(tableOption);

    options.setMaxBackgroundCompactions(16);
    options.setNewTableReaderForCompactionInputs(true);
    //为压缩的输入,打开RocksDB层的预读取
    options.setCompactionReadaheadSize(128 * SizeUnit.KB);
    options.setNewTableReaderForCompactionInputs(true);

    return options;
}
 
Example #3
Source File: TableConfig.java    From KitDB with Apache License 2.0 5 votes vote down vote up
private static BlockBasedTableConfig createTableConfig() {
    return new BlockBasedTableConfig() //
            .setBlockSize(4 * SizeUnit.KB) //
            .setFilter(new BloomFilter(16, false)) //
            .setCacheIndexAndFilterBlocks(true) //
            .setBlockCacheSize(128 * SizeUnit.MB) //
            .setCacheNumShardBits(8);
}
 
Example #4
Source File: RocksDbCacheOperator.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public RocksDbCacheOperator(TopologyContext context, String cacheDir) {
    this.stormConf = context.getStormConf();

    this.maxFlushSize = ConfigExtension.getTransactionCacheBatchFlushSize(stormConf);

    Options rocksDbOpt = new Options();
    rocksDbOpt.setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
    long bufferSize =
            ConfigExtension.getTransactionCacheBlockSize(stormConf) != null ? ConfigExtension.getTransactionCacheBlockSize(stormConf) : (1 * SizeUnit.GB);
    rocksDbOpt.setWriteBufferSize(bufferSize);
    int maxBufferNum = ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) != null ? ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) : 3;
    rocksDbOpt.setMaxWriteBufferNumber(maxBufferNum);

    // Config for log of RocksDb
    rocksDbOpt.setMaxLogFileSize(1073741824); // 1G
    rocksDbOpt.setKeepLogFileNum(1);
    rocksDbOpt.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
    
    try {
        Map<Object, Object> conf = new HashMap<Object, Object>();
        conf.put(ROCKSDB_ROOT_DIR, cacheDir);
        conf.put(ROCKSDB_RESET, true);
        initDir(conf);
        initDb(null, rocksDbOpt);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    kryo = new Kryo();
    output = new Output(200, 2000000000);
    input = new Input(1);

    LOG.info("Finished rocksDb cache init: maxFlushSize={}, bufferSize={}, maxBufferNum={}", maxFlushSize, bufferSize, maxBufferNum);
}
 
Example #5
Source File: RocksDbOptionsFactory.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public ColumnFamilyOptions createColumnFamilyOptions(ColumnFamilyOptions currentOptions) {
    if (currentOptions == null)
        currentOptions = new ColumnFamilyOptions();
    currentOptions.setMergeOperator(new StringAppendOperator());

    BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
    tableOptions.setBlockSize(32 * SizeUnit.KB);

    // Set memory table size
    currentOptions.setMaxWriteBufferNumber(4);
    currentOptions.setWriteBufferSize(64 * SizeUnit.MB);

    // Set block cache size
    tableOptions.setBlockCacheSize(64 * SizeUnit.MB);
    tableOptions.setFilter(new BloomFilter(DEFAULT_BLOOM_FILTER_BITS, false));
    // Put all index into block cache
    tableOptions.setCacheIndexAndFilterBlocks(true);
    /*
    tableOptions.setIndexType(IndexType.kHashSearch);
    tableOptions.setWholeKeyFiltering(false);*/

    /*
    currentOptions.useFixedLengthPrefixExtractor(Integer.SIZE / Byte.SIZE * 2);
    currentOptions.setMemtablePrefixBloomBits(10000000);
    currentOptions.setMemtablePrefixBloomProbes(6);
    */

    currentOptions.setTableFormatConfig(tableOptions);

    currentOptions.setTargetFileSizeBase(64 * SizeUnit.MB);
    currentOptions.setCompactionStyle(CompactionStyle.LEVEL);
    currentOptions.setLevelZeroFileNumCompactionTrigger(4);
    currentOptions.setLevelZeroSlowdownWritesTrigger(20);
    currentOptions.setLevelZeroStopWritesTrigger(30);
    currentOptions.setNumLevels(4);
    currentOptions.setMaxBytesForLevelBase(64 * 4 * SizeUnit.MB);
    return currentOptions;
}
 
Example #6
Source File: RocksdbFailStore.java    From light-task-scheduler with Apache License 2.0 5 votes vote down vote up
@Override
protected void init() throws FailStoreException {
    try {
        options = new Options();
        options.setCreateIfMissing(true)
                .setWriteBufferSize(8 * SizeUnit.KB)
                .setMaxWriteBufferNumber(3)
                .setMaxBackgroundCompactions(10)
                .setCompressionType(CompressionType.SNAPPY_COMPRESSION)
                .setCompactionStyle(CompactionStyle.UNIVERSAL);

        Filter bloomFilter = new BloomFilter(10);
        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
        tableConfig.setBlockCacheSize(64 * SizeUnit.KB)
                .setFilter(bloomFilter)
                .setCacheNumShardBits(6)
                .setBlockSizeDeviation(5)
                .setBlockRestartInterval(10)
                .setCacheIndexAndFilterBlocks(true)
                .setHashIndexAllowCollision(false)
                .setBlockCacheCompressedSize(64 * SizeUnit.KB)
                .setBlockCacheCompressedNumShardBits(10);

        options.setTableFormatConfig(tableConfig);
    } catch (Exception e) {
        throw new FailStoreException(e);
    }
}
 
Example #7
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetConfigurableOptions() throws Exception  {
	DefaultConfigurableOptionsFactory customizedOptions = new DefaultConfigurableOptionsFactory()
		.setMaxBackgroundThreads(4)
		.setMaxOpenFiles(-1)
		.setCompactionStyle(CompactionStyle.LEVEL)
		.setUseDynamicLevelSize(true)
		.setTargetFileSizeBase("4MB")
		.setMaxSizeLevelBase("128 mb")
		.setWriteBufferSize("128 MB")
		.setMaxWriteBufferNumber(4)
		.setMinWriteBufferNumberToMerge(3)
		.setBlockSize("64KB")
		.setBlockCacheSize("512mb");

	try (RocksDBResourceContainer optionsContainer =
			new RocksDBResourceContainer(PredefinedOptions.DEFAULT, customizedOptions)) {

		DBOptions dbOptions = optionsContainer.getDbOptions();
		assertEquals(-1, dbOptions.maxOpenFiles());

		ColumnFamilyOptions columnOptions = optionsContainer.getColumnOptions();
		assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
		assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
		assertEquals(4 * SizeUnit.MB, columnOptions.targetFileSizeBase());
		assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
		assertEquals(4, columnOptions.maxWriteBufferNumber());
		assertEquals(3, columnOptions.minWriteBufferNumberToMerge());

		BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
		assertEquals(64 * SizeUnit.KB, tableConfig.blockSize());
		assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
	}
}
 
Example #8
Source File: LocalDictionaryStore.java    From kylin with Apache License 2.0 5 votes vote down vote up
public void init(String[] cfs) throws Exception {
    logger.debug("Checking streaming dict local store for {} at {}.", cubeName, String.join(", ", cfs));
    if (!dictPath.exists() && dictPath.mkdirs()) {
        logger.warn("Create {} failed.", dictPath);
    }
    // maybe following options is naive, should improve in the future
    try (DBOptions options = new DBOptions()
            .setCreateIfMissing(true)
            .setCreateMissingColumnFamilies(true)
            .setMaxBackgroundCompactions(5)
            .setWritableFileMaxBufferSize(400 * SizeUnit.KB)) {
        String dataPath = dictPath.getAbsolutePath() + "/data";
        List<ColumnFamilyDescriptor> columnFamilyDescriptorList = new ArrayList<>();
        List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>(); // to be fill in
        for (String family : cfs) {
            ColumnFamilyDescriptor columnFamilyDescriptor = new ColumnFamilyDescriptor(
                    family.getBytes(StandardCharsets.UTF_8));
            columnFamilyDescriptorList.add(columnFamilyDescriptor);
        }
        logger.debug("Try to open rocksdb {}.", dataPath);
        db = RocksDB.open(options, dataPath, columnFamilyDescriptorList, columnFamilyHandleList);
        Preconditions.checkNotNull(db, "RocksDB cannot created for some reasons.");
        for (int i = 0; i < columnFamilyHandleList.size(); i++) {
            columnFamilyHandleMap.put(new ByteArray(cfs[i].getBytes(StandardCharsets.UTF_8)),
                    columnFamilyHandleList.get(i));
        }
    } catch (Exception e) {
        logger.error("Init rocks db failed.", e);
        throw e;
    }
    logger.debug("Init local dict succeed.");
}
 
Example #9
Source File: RocksDBLookupBuilder.java    From kylin with Apache License 2.0 5 votes vote down vote up
public RocksDBLookupBuilder(TableDesc tableDesc, String[] keyColumns, String dbPath) {
    this.tableDesc = tableDesc;
    this.encoder = new RocksDBLookupRowEncoder(tableDesc, keyColumns);
    this.dbPath = dbPath;
    this.writeBatchSize = 500;
    this.options = new Options();
    options.setCreateIfMissing(true).setWriteBufferSize(8 * SizeUnit.KB).setMaxWriteBufferNumber(3)
            .setMaxBackgroundCompactions(5).setCompressionType(CompressionType.SNAPPY_COMPRESSION)
            .setCompactionStyle(CompactionStyle.UNIVERSAL);

}
 
Example #10
Source File: RocksDBStateBackendConfigTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetConfigurableOptions() throws Exception  {
	String checkpointPath = tempFolder.newFolder().toURI().toString();
	RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);

	assertNull(rocksDbBackend.getOptions());

	DefaultConfigurableOptionsFactory customizedOptions = new DefaultConfigurableOptionsFactory()
		.setMaxBackgroundThreads(4)
		.setMaxOpenFiles(-1)
		.setCompactionStyle(CompactionStyle.LEVEL)
		.setUseDynamicLevelSize(true)
		.setTargetFileSizeBase("4MB")
		.setMaxSizeLevelBase("128 mb")
		.setWriteBufferSize("128 MB")
		.setMaxWriteBufferNumber(4)
		.setMinWriteBufferNumberToMerge(3)
		.setBlockSize("64KB")
		.setBlockCacheSize("512mb");

	rocksDbBackend.setOptions(customizedOptions);

	try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
		assertEquals(-1, dbOptions.maxOpenFiles());
	}

	try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
		assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
		assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
		assertEquals(4 * SizeUnit.MB, columnOptions.targetFileSizeBase());
		assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
		assertEquals(4, columnOptions.maxWriteBufferNumber());
		assertEquals(3, columnOptions.minWriteBufferNumberToMerge());

		BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
		assertEquals(64 * SizeUnit.KB, tableConfig.blockSize());
		assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
	}
}
 
Example #11
Source File: LocalDictionaryStore.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public void init(String[] cfs) throws Exception {
    logger.debug("Checking streaming dict local store for {} at {}.", cubeName, String.join(", ", cfs));
    if (!dictPath.exists() && dictPath.mkdirs()) {
        logger.warn("Create {} failed.", dictPath);
    }
    // maybe following options is naive, should improve in the future
    try (DBOptions options = new DBOptions()
            .setCreateIfMissing(true)
            .setCreateMissingColumnFamilies(true)
            .setMaxBackgroundCompactions(5)
            .setWritableFileMaxBufferSize(400 * SizeUnit.KB)) {
        String dataPath = dictPath.getAbsolutePath() + "/data";
        List<ColumnFamilyDescriptor> columnFamilyDescriptorList = new ArrayList<>();
        List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<>(); // to be fill in
        for (String family : cfs) {
            ColumnFamilyDescriptor columnFamilyDescriptor = new ColumnFamilyDescriptor(
                    family.getBytes(StandardCharsets.UTF_8));
            columnFamilyDescriptorList.add(columnFamilyDescriptor);
        }
        logger.debug("Try to open rocksdb {}.", dataPath);
        db = RocksDB.open(options, dataPath, columnFamilyDescriptorList, columnFamilyHandleList);
        Preconditions.checkNotNull(db, "RocksDB cannot created for some reasons.");
        for (int i = 0; i < columnFamilyHandleList.size(); i++) {
            columnFamilyHandleMap.put(new ByteArray(cfs[i].getBytes(StandardCharsets.UTF_8)),
                    columnFamilyHandleList.get(i));
        }
    } catch (Exception e) {
        logger.error("Init rocks db failed.", e);
        throw e;
    }
    logger.debug("Init local dict succeed.");
}
 
Example #12
Source File: RocksDBLookupBuilder.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public RocksDBLookupBuilder(TableDesc tableDesc, String[] keyColumns, String dbPath) {
    this.tableDesc = tableDesc;
    this.encoder = new RocksDBLookupRowEncoder(tableDesc, keyColumns);
    this.dbPath = dbPath;
    this.writeBatchSize = 500;
    this.options = new Options();
    options.setCreateIfMissing(true).setWriteBufferSize(8 * SizeUnit.KB).setMaxWriteBufferNumber(3)
            .setMaxBackgroundCompactions(5).setCompressionType(CompressionType.SNAPPY_COMPRESSION)
            .setCompactionStyle(CompactionStyle.UNIVERSAL);

}
 
Example #13
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetConfigurableOptions() throws Exception  {
	String checkpointPath = tempFolder.newFolder().toURI().toString();
	RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);

	assertNull(rocksDbBackend.getOptions());

	DefaultConfigurableOptionsFactory customizedOptions = new DefaultConfigurableOptionsFactory()
		.setMaxBackgroundThreads(4)
		.setMaxOpenFiles(-1)
		.setCompactionStyle(CompactionStyle.LEVEL)
		.setUseDynamicLevelSize(true)
		.setTargetFileSizeBase("4MB")
		.setMaxSizeLevelBase("128 mb")
		.setWriteBufferSize("128 MB")
		.setMaxWriteBufferNumber(4)
		.setMinWriteBufferNumberToMerge(3)
		.setBlockSize("64KB")
		.setBlockCacheSize("512mb");

	rocksDbBackend.setOptions(customizedOptions);

	try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
		assertEquals(-1, dbOptions.maxOpenFiles());
	}

	try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
		assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
		assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
		assertEquals(4 * SizeUnit.MB, columnOptions.targetFileSizeBase());
		assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
		assertEquals(4, columnOptions.maxWriteBufferNumber());
		assertEquals(3, columnOptions.minWriteBufferNumberToMerge());

		BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
		assertEquals(64 * SizeUnit.KB, tableConfig.blockSize());
		assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
	}
}
 
Example #14
Source File: NodeTest.java    From sofa-jraft with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupNodeTest() {
    StorageOptionsFactory.registerRocksDBTableFormatConfig(RocksDBLogStorage.class, StorageOptionsFactory
        .getDefaultRocksDBTableConfig().setBlockCacheSize(256 * SizeUnit.MB));
    dumpThread = new DumpThread();
    dumpThread.setName("NodeTest-DumpThread");
    dumpThread.setDaemon(true);
    dumpThread.start();
}
 
Example #15
Source File: RocksdbStoreTest.java    From jmqtt with Apache License 2.0 4 votes vote down vote up
@Before
public void before(){
    RocksDB.loadLibrary();
    Options options = new Options();
    options.setCreateIfMissing(true)
            .setWriteBufferSize(64 * SizeUnit.KB)
            .setMaxWriteBufferNumber(3)
            .setMaxBackgroundCompactions(10)
            .setCompressionType(CompressionType.NO_COMPRESSION)
            .setCompactionStyle(CompactionStyle.UNIVERSAL);
    Filter bloomFilter = new BloomFilter(100);
    ReadOptions readOptions = new ReadOptions().setFillCache(false);
    RateLimiter rateLimiter = new RateLimiter(10000000, 10000, 10);

    options.setMemTableConfig(
            new HashSkipListMemTableConfig()
                    .setHeight(4)
                    .setBranchingFactor(4)
                    .setBucketCount(2000000));

    options.setMemTableConfig(
            new HashLinkedListMemTableConfig()
                    .setBucketCount(100000));
    options.setMemTableConfig(
            new VectorMemTableConfig().setReservedSize(10000));

    options.setMemTableConfig(new SkipListMemTableConfig());

    options.setTableFormatConfig(new PlainTableConfig());
    // Plain-Table requires mmap read
    options.setAllowMmapReads(true);

    options.setRateLimiter(rateLimiter);
    final BlockBasedTableConfig table_options = new BlockBasedTableConfig();
    table_options.setBlockCacheSize(64 * SizeUnit.KB)
            .setFilter(bloomFilter)
            .setCacheNumShardBits(6)
            .setBlockSizeDeviation(5)
            .setBlockRestartInterval(10)
            .setCacheIndexAndFilterBlocks(true)
            .setHashIndexAllowCollision(false)
            .setBlockCacheCompressedSize(64 * SizeUnit.KB)
            .setBlockCacheCompressedNumShardBits(10);

    options.setTableFormatConfig(table_options);
    try {
        rocksDB = RocksDB.open(options,"db");
    } catch (RocksDBException e) {
        e.printStackTrace();
    }
}
 
Example #16
Source File: TableConfig.java    From KitDB with Apache License 2.0 4 votes vote down vote up
private static BlockBasedTableConfig createDefTableConfig() {
    return new BlockBasedTableConfig() //
            .setBlockSize(4 * SizeUnit.KB) //
            .setFilter(new BloomFilter(16, false));//
}
 
Example #17
Source File: TableConfig.java    From KitDB with Apache License 2.0 4 votes vote down vote up
public static ColumnFamilyOptions getDefaultRocksDBColumnFamilyOptions() {
    ColumnFamilyOptions opts = new ColumnFamilyOptions();

    // Flushing options:
    // write_buffer_size sets the size of a single mem_table. Once mem_table exceeds
    // this size, it is marked immutable and a new one is created.
    opts.setWriteBufferSize(64 * SizeUnit.MB);

    // Flushing options:
    // max_write_buffer_number sets the maximum number of mem_tables, both active
    // and immutable.  If the active mem_table fills up and the total number of
    // mem_tables is larger than max_write_buffer_number we stall further writes.
    // This may happen if the flush process is slower than the write rate.
    opts.setMaxWriteBufferNumber(3);

    // Flushing options:
    // min_write_buffer_number_to_merge is the minimum number of mem_tables to be
    // merged before flushing to storage. For example, if this option is set to 2,
    // immutable mem_tables are only flushed when there are two of them - a single
    // immutable mem_table will never be flushed.  If multiple mem_tables are merged
    // together, less data may be written to storage since two updates are merged to
    // a single key. However, every Get() must traverse all immutable mem_tables
    // linearly to check if the key is there. Setting this option too high may hurt
    // read performance.
    opts.setMinWriteBufferNumberToMerge(1);

    // Level Style Compaction:
    // level0_file_num_compaction_trigger -- Once level 0 reaches this number of
    // files, L0->L1 compaction is triggered. We can therefore estimate level 0
    // size in stable state as
    // write_buffer_size * min_write_buffer_number_to_merge * level0_file_num_compaction_trigger.
    opts.setLevel0FileNumCompactionTrigger(10);

    // Soft limit on number of level-0 files. We start slowing down writes at this
    // point. A value 0 means that no writing slow down will be triggered by number
    // of files in level-0.
    opts.setLevel0SlowdownWritesTrigger(20);

    // Maximum number of level-0 files.  We stop writes at this point.
    opts.setLevel0StopWritesTrigger(40);

    // Level Style Compaction:
    // max_bytes_for_level_base and max_bytes_for_level_multiplier
    //  -- max_bytes_for_level_base is total size of level 1. As mentioned, we
    // recommend that this be around the size of level 0. Each subsequent level
    // is max_bytes_for_level_multiplier larger than previous one. The default
    // is 10 and we do not recommend changing that.
    opts.setMaxBytesForLevelBase(512 * SizeUnit.MB);

    // Level Style Compaction:
    // target_file_size_base and target_file_size_multiplier
    //  -- Files in level 1 will have target_file_size_base bytes. Each next
    // level's file size will be target_file_size_multiplier bigger than previous
    // one. However, by default target_file_size_multiplier is 1, so files in all
    // L1..LMax levels are equal. Increasing target_file_size_base will reduce total
    // number of database files, which is generally a good thing. We recommend setting
    // target_file_size_base to be max_bytes_for_level_base / 10, so that there are
    // 10 files in level 1.
    opts.setTargetFileSizeBase(64 * SizeUnit.MB);

    // If prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
    // create prefix bloom for memtable with the size of
    // write_buffer_size * memtable_prefix_bloom_size_ratio.
    // If it is larger than 0.25, it is santinized to 0.25.
    opts.setMemtablePrefixBloomSizeRatio(0.125);

    // Seems like the rocksDB jni for Windows doesn't come linked with any of the
    // compression type
    //if (!Platform.isWindows()) {
    opts.setCompressionType(CompressionType.LZ4_COMPRESSION) //
            .setCompactionStyle(CompactionStyle.LEVEL) //
            .optimizeLevelStyleCompaction();
    //}
    return opts;
}
 
Example #18
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigurableOptionsFromConfig() throws IOException {
	Configuration configuration = new Configuration();
	DefaultConfigurableOptionsFactory defaultOptionsFactory = new DefaultConfigurableOptionsFactory();
	assertTrue(defaultOptionsFactory.configure(configuration).getConfiguredOptions().isEmpty());

	// verify illegal configuration
	{
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "-1");

		verifyIllegalArgument(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "0KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "1BB");
		verifyIllegalArgument(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "-1KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_SIZE, "0MB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "0");

		verifyIllegalArgument(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "1");

		verifyIllegalArgument(RocksDBConfigurableOptions.COMPACTION_STYLE, "LEV");
	}

	// verify legal configuration
	{
		configuration.setString(RocksDBConfigurableOptions.COMPACTION_STYLE, "level");
		configuration.setString(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "TRUE");
		configuration.setString(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "8 mb");
		configuration.setString(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "128MB");
		configuration.setString(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "4");
		configuration.setString(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "4");
		configuration.setString(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "2");
		configuration.setString(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "64 MB");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_SIZE, "4 kb");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "512 mb");

		DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
		optionsFactory.configure(configuration);
		String checkpointPath = tempFolder.newFolder().toURI().toString();
		RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);
		rocksDbBackend.setOptions(optionsFactory);

		try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
			assertEquals(-1, dbOptions.maxOpenFiles());
		}

		try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
			assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
			assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
			assertEquals(8 * SizeUnit.MB, columnOptions.targetFileSizeBase());
			assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
			assertEquals(4, columnOptions.maxWriteBufferNumber());
			assertEquals(2, columnOptions.minWriteBufferNumberToMerge());
			assertEquals(64 * SizeUnit.MB, columnOptions.writeBufferSize());

			BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
			assertEquals(4 * SizeUnit.KB, tableConfig.blockSize());
			assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
		}
	}
}
 
Example #19
Source File: RaftServer.java    From sofa-registry with Apache License 2.0 4 votes vote down vote up
private NodeOptions initNodeOptions(RaftServerConfig raftServerConfig) {

        NodeOptions nodeOptions = new NodeOptions();

        nodeOptions.setElectionTimeoutMs(raftServerConfig.getElectionTimeoutMs());

        nodeOptions.setDisableCli(false);

        nodeOptions.setSnapshotIntervalSecs(raftServerConfig.getSnapshotIntervalSecs());

        nodeOptions.setInitialConf(initConf);

        nodeOptions.setFsm(this.fsm);

        nodeOptions.setLogUri(dataPath + File.separator + "log");
        nodeOptions.setRaftMetaUri(dataPath + File.separator + "raft_meta");
        nodeOptions.setSnapshotUri(dataPath + File.separator + "snapshot");

        if (raftServerConfig.isEnableMetrics()) {
            nodeOptions.setEnableMetrics(raftServerConfig.isEnableMetrics());
        }

        // See https://github.com/sofastack/sofa-jraft/pull/156
        final BlockBasedTableConfig conf = new BlockBasedTableConfig() //
            // Begin to use partitioned index filters
            // https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters#how-to-use-it
            .setIndexType(IndexType.kTwoLevelIndexSearch) //
            .setFilter(new BloomFilter(16, false)) //
            .setPartitionFilters(true) //
            .setMetadataBlockSize(8 * SizeUnit.KB) //
            .setCacheIndexAndFilterBlocks(false) //
            .setCacheIndexAndFilterBlocksWithHighPriority(true) //
            .setPinL0FilterAndIndexBlocksInCache(true) //
            // End of partitioned index filters settings.
            .setBlockSize(4 * SizeUnit.KB)//
            .setBlockCacheSize(raftServerConfig.getRockDBCacheSize() * SizeUnit.MB) //
            .setCacheNumShardBits(8);

        StorageOptionsFactory.registerRocksDBTableFormatConfig(RocksDBLogStorage.class, conf);

        return nodeOptions;
    }
 
Example #20
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigurableOptionsFromConfig() throws Exception {
	Configuration configuration = new Configuration();
	DefaultConfigurableOptionsFactory defaultOptionsFactory = new DefaultConfigurableOptionsFactory();
	assertTrue(defaultOptionsFactory.configure(configuration).getConfiguredOptions().isEmpty());

	// verify illegal configuration
	{
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "-1");

		verifyIllegalArgument(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "0KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "1BB");
		verifyIllegalArgument(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "-1KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_SIZE, "0MB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "0");

		verifyIllegalArgument(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "1");

		verifyIllegalArgument(RocksDBConfigurableOptions.COMPACTION_STYLE, "LEV");
	}

	// verify legal configuration
	{
		configuration.setString(RocksDBConfigurableOptions.COMPACTION_STYLE.key(), "level");
		configuration.setString(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE.key(), "TRUE");
		configuration.setString(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE.key(), "8 mb");
		configuration.setString(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE.key(), "128MB");
		configuration.setString(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS.key(), "4");
		configuration.setString(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER.key(), "4");
		configuration.setString(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE.key(), "2");
		configuration.setString(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE.key(), "64 MB");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_SIZE.key(), "4 kb");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE.key(), "512 mb");

		DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
		optionsFactory.configure(configuration);

		try (RocksDBResourceContainer optionsContainer =
				new RocksDBResourceContainer(PredefinedOptions.DEFAULT, optionsFactory)) {

			DBOptions dbOptions = optionsContainer.getDbOptions();
			assertEquals(-1, dbOptions.maxOpenFiles());

			ColumnFamilyOptions columnOptions = optionsContainer.getColumnOptions();
			assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
			assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
			assertEquals(8 * SizeUnit.MB, columnOptions.targetFileSizeBase());
			assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
			assertEquals(4, columnOptions.maxWriteBufferNumber());
			assertEquals(2, columnOptions.minWriteBufferNumberToMerge());
			assertEquals(64 * SizeUnit.MB, columnOptions.writeBufferSize());

			BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
			assertEquals(4 * SizeUnit.KB, tableConfig.blockSize());
			assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
		}
	}
}
 
Example #21
Source File: RocksDbUnitTest.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {
    Map conf = JStormHelper.LoadConf(args[0]);
    putNum = JStormUtils.parseInt(conf.get("put.number"), 100);
    isFlush = JStormUtils.parseBoolean(conf.get("is.flush"), true);
    isCheckpoint = JStormUtils.parseBoolean(conf.get("is.checkpoint"), true);
    sleepTime = JStormUtils.parseInt(conf.get("sleep.time"), 5000);
    compactionInterval = JStormUtils.parseInt(conf.get("compaction.interval"), 30000);
    flushInterval = JStormUtils.parseInt(conf.get("flush.interval"), 3000);
    isCompaction = JStormUtils.parseBoolean(conf.get("is.compaction"), true);
    fileSizeBase = JStormUtils.parseLong(conf.get("file.size.base"), 10 * SizeUnit.KB);
    levelNum = JStormUtils.parseInt(conf.get("db.level.num"), 1);
    compactionTriggerNum = JStormUtils.parseInt(conf.get("db.compaction.trigger.num"), 4);
    LOG.info("Conf={}", conf);
    
    RocksDB db;
    File file = new File(cpPath);
    file.mkdirs();

    List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
    try {
        Options options = new Options();
        options.setCreateMissingColumnFamilies(true);
        options.setCreateIfMissing(true);
        options.setTargetFileSizeBase(fileSizeBase);
        options.setMaxBackgroundFlushes(2);
        options.setMaxBackgroundCompactions(2);
        options.setCompactionStyle(CompactionStyle.LEVEL);
        options.setNumLevels(levelNum);
        options.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);

        DBOptions dbOptions = new DBOptions();
        dbOptions.setCreateMissingColumnFamilies(true);
        dbOptions.setCreateIfMissing(true);
        dbOptions.setMaxBackgroundFlushes(2);
        dbOptions.setMaxBackgroundCompactions(2);
        ColumnFamilyOptions familyOptions = new ColumnFamilyOptions();
        familyOptions.setTargetFileSizeBase(fileSizeBase);
        familyOptions.setCompactionStyle(CompactionStyle.LEVEL);
        familyOptions.setNumLevels(levelNum);
        familyOptions.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum);
        List<byte[]> families = RocksDB.listColumnFamilies(options, dbPath);
        List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
        if (families != null) {
            for (byte[] bytes : families) {
                columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, familyOptions));
                LOG.info("Load colum family of {}", new String(bytes));
            }
        }
        
        if (columnFamilyDescriptors.size() > 0) {
            db = RocksDB.open(dbOptions, dbPath, columnFamilyDescriptors, columnFamilyHandles);
        } else {
            db = RocksDB.open(options, dbPath);
        }
    } catch (RocksDBException e) {
        LOG.error("Failed to open db", e);
        return;
    }

    rocksDbTest(db, columnFamilyHandles);
    
    db.close();
}
 
Example #22
Source File: RocksDbOptionsFactory.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@Override
public Options createOptions(Options currentOptions) {
    if (currentOptions == null)
        currentOptions = new Options();
    currentOptions.setCreateIfMissing(true);
    currentOptions.setCreateMissingColumnFamilies(true);
    currentOptions.setMergeOperator(new StringAppendOperator());

    BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
    tableOptions.setBlockSize(32 * SizeUnit.KB);

    // Set memory table size
    currentOptions.setMaxWriteBufferNumber(4);
    currentOptions.setWriteBufferSize(64 * SizeUnit.MB);

    // Set block cache size
    tableOptions.setBlockCacheSize(64 * SizeUnit.MB);
    tableOptions.setFilter(new BloomFilter(DEFAULT_BLOOM_FILTER_BITS, false));
    // Put all index into block cache
    tableOptions.setCacheIndexAndFilterBlocks(true);

    /*
    tableOptions.setIndexType(IndexType.kHashSearch);
    tableOptions.setWholeKeyFiltering(false);
    currentOptions.setMemTableConfig(new HashLinkedListMemTableConfig());
    currentOptions.useFixedLengthPrefixExtractor(Integer.SIZE / Byte.SIZE * 2);
    currentOptions.setMemtablePrefixBloomBits(10000000);
    currentOptions.setMemtablePrefixBloomProbes(6);
    */

    currentOptions.setTableFormatConfig(tableOptions);

    //currentOptions.setStatsDumpPeriodSec(300);
    //currentOptions.createStatistics();

    currentOptions.setTargetFileSizeBase(64 * SizeUnit.MB);
    currentOptions.setAllowOsBuffer(true);
    currentOptions.setMaxOpenFiles(-1);
    currentOptions.setMaxBackgroundFlushes(2);
    currentOptions.setMaxBackgroundCompactions(2);
    currentOptions.setCompactionStyle(CompactionStyle.LEVEL);
    currentOptions.setLevelZeroFileNumCompactionTrigger(4);
    currentOptions.setLevelZeroSlowdownWritesTrigger(20);
    currentOptions.setLevelZeroStopWritesTrigger(30);
    currentOptions.setNumLevels(4);
    currentOptions.setMaxBytesForLevelBase(64 * 4 * SizeUnit.MB);
    currentOptions.setAllowOsBuffer(false);
    return currentOptions;
}
 
Example #23
Source File: StorageOptionsFactory.java    From sofa-jraft with Apache License 2.0 4 votes vote down vote up
public static ColumnFamilyOptions getDefaultRocksDBColumnFamilyOptions() {
    final ColumnFamilyOptions opts = new ColumnFamilyOptions();

    // Flushing options:
    // write_buffer_size sets the size of a single mem_table. Once mem_table exceeds
    // this size, it is marked immutable and a new one is created.
    opts.setWriteBufferSize(64 * SizeUnit.MB);

    // Flushing options:
    // max_write_buffer_number sets the maximum number of mem_tables, both active
    // and immutable.  If the active mem_table fills up and the total number of
    // mem_tables is larger than max_write_buffer_number we stall further writes.
    // This may happen if the flush process is slower than the write rate.
    opts.setMaxWriteBufferNumber(3);

    // Flushing options:
    // min_write_buffer_number_to_merge is the minimum number of mem_tables to be
    // merged before flushing to storage. For example, if this option is set to 2,
    // immutable mem_tables are only flushed when there are two of them - a single
    // immutable mem_table will never be flushed.  If multiple mem_tables are merged
    // together, less data may be written to storage since two updates are merged to
    // a single key. However, every Get() must traverse all immutable mem_tables
    // linearly to check if the key is there. Setting this option too high may hurt
    // read performance.
    opts.setMinWriteBufferNumberToMerge(1);

    // Level Style Compaction:
    // level0_file_num_compaction_trigger -- Once level 0 reaches this number of
    // files, L0->L1 compaction is triggered. We can therefore estimate level 0
    // size in stable state as
    // write_buffer_size * min_write_buffer_number_to_merge * level0_file_num_compaction_trigger.
    opts.setLevel0FileNumCompactionTrigger(10);

    // Soft limit on number of level-0 files. We start slowing down writes at this
    // point. A value 0 means that no writing slow down will be triggered by number
    // of files in level-0.
    opts.setLevel0SlowdownWritesTrigger(20);

    // Maximum number of level-0 files.  We stop writes at this point.
    opts.setLevel0StopWritesTrigger(40);

    // Level Style Compaction:
    // max_bytes_for_level_base and max_bytes_for_level_multiplier
    //  -- max_bytes_for_level_base is total size of level 1. As mentioned, we
    // recommend that this be around the size of level 0. Each subsequent level
    // is max_bytes_for_level_multiplier larger than previous one. The default
    // is 10 and we do not recommend changing that.
    opts.setMaxBytesForLevelBase(512 * SizeUnit.MB);

    // Level Style Compaction:
    // target_file_size_base and target_file_size_multiplier
    //  -- Files in level 1 will have target_file_size_base bytes. Each next
    // level's file size will be target_file_size_multiplier bigger than previous
    // one. However, by default target_file_size_multiplier is 1, so files in all
    // L1..LMax levels are equal. Increasing target_file_size_base will reduce total
    // number of database files, which is generally a good thing. We recommend setting
    // target_file_size_base to be max_bytes_for_level_base / 10, so that there are
    // 10 files in level 1.
    opts.setTargetFileSizeBase(64 * SizeUnit.MB);

    // If prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
    // create prefix bloom for memtable with the size of
    // write_buffer_size * memtable_prefix_bloom_size_ratio.
    // If it is larger than 0.25, it is santinized to 0.25.
    opts.setMemtablePrefixBloomSizeRatio(0.125);

    // Seems like the rocksDB jni for Windows doesn't come linked with any of the
    // compression type
    if (!Platform.isWindows()) {
        opts.setCompressionType(CompressionType.LZ4_COMPRESSION) //
            .setCompactionStyle(CompactionStyle.LEVEL) //
            .optimizeLevelStyleCompaction();
    }

    // https://github.com/facebook/rocksdb/pull/5744
    opts.setForceConsistencyChecks(true);

    return opts;
}
 
Example #24
Source File: RocksDBStateBackendConfigTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigurableOptionsFromConfig() throws IOException {
	Configuration configuration = new Configuration();
	DefaultConfigurableOptionsFactory defaultOptionsFactory = new DefaultConfigurableOptionsFactory();
	assertTrue(defaultOptionsFactory.configure(configuration).getConfiguredOptions().isEmpty());

	// verify illegal configuration
	{
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "-1");

		verifyIllegalArgument(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "0KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "1BB");
		verifyIllegalArgument(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "-1KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_SIZE, "0MB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "0");

		verifyIllegalArgument(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "1");

		verifyIllegalArgument(RocksDBConfigurableOptions.COMPACTION_STYLE, "LEV");
	}

	// verify legal configuration
	{
		configuration.setString(RocksDBConfigurableOptions.COMPACTION_STYLE, "level");
		configuration.setString(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "TRUE");
		configuration.setString(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "8 mb");
		configuration.setString(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "128MB");
		configuration.setString(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "4");
		configuration.setString(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "4");
		configuration.setString(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "2");
		configuration.setString(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "64 MB");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_SIZE, "4 kb");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "512 mb");

		DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
		optionsFactory.configure(configuration);
		String checkpointPath = tempFolder.newFolder().toURI().toString();
		RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);
		rocksDbBackend.setOptions(optionsFactory);

		try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
			assertEquals(-1, dbOptions.maxOpenFiles());
		}

		try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
			assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
			assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
			assertEquals(8 * SizeUnit.MB, columnOptions.targetFileSizeBase());
			assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
			assertEquals(4, columnOptions.maxWriteBufferNumber());
			assertEquals(2, columnOptions.minWriteBufferNumberToMerge());
			assertEquals(64 * SizeUnit.MB, columnOptions.writeBufferSize());

			BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
			assertEquals(4 * SizeUnit.KB, tableConfig.blockSize());
			assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
		}
	}
}