org.rocksdb.BlockBasedTableConfig Java Examples

The following examples show how to use org.rocksdb.BlockBasedTableConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StorageOptionsFactory.java    From sofa-jraft with Apache License 2.0 6 votes vote down vote up
public static BlockBasedTableConfig getDefaultRocksDBTableConfig() {
    // See https://github.com/sofastack/sofa-jraft/pull/156
    return new BlockBasedTableConfig() //
        // Begin to use partitioned index filters
        // https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters#how-to-use-it
        .setIndexType(IndexType.kTwoLevelIndexSearch) //
        .setFilter(new BloomFilter(16, false)) //
        .setPartitionFilters(true) //
        .setMetadataBlockSize(8 * SizeUnit.KB) //
        .setCacheIndexAndFilterBlocks(false) //
        .setCacheIndexAndFilterBlocksWithHighPriority(true) //
        .setPinL0FilterAndIndexBlocksInCache(true) //
        // End of partitioned index filters settings.
        .setBlockSize(4 * SizeUnit.KB)//
        .setBlockCacheSize(512 * SizeUnit.MB) //
        .setCacheNumShardBits(8);
}
 
Example #2
Source File: StorageOptionsFactory.java    From sofa-jraft with Apache License 2.0 6 votes vote down vote up
private static BlockBasedTableConfig copyTableFormatConfig(final BlockBasedTableConfig cfg) {
    return new BlockBasedTableConfig() //
        .setNoBlockCache(cfg.noBlockCache()) //
        .setBlockCacheSize(cfg.blockCacheSize()) //
        .setCacheNumShardBits(cfg.cacheNumShardBits()) //
        .setBlockSize(cfg.blockSize()) //
        .setBlockSizeDeviation(cfg.blockSizeDeviation()) //
        .setBlockRestartInterval(cfg.blockRestartInterval()) //
        .setWholeKeyFiltering(cfg.wholeKeyFiltering()) //
        .setCacheIndexAndFilterBlocks(cfg.cacheIndexAndFilterBlocks()) //
        .setCacheIndexAndFilterBlocksWithHighPriority(cfg.cacheIndexAndFilterBlocksWithHighPriority()) //
        .setPinL0FilterAndIndexBlocksInCache(cfg.pinL0FilterAndIndexBlocksInCache()) //
        .setPartitionFilters(cfg.partitionFilters()) //
        .setMetadataBlockSize(cfg.metadataBlockSize()) //
        .setPinTopLevelIndexAndFilter(cfg.pinTopLevelIndexAndFilter()) //
        .setHashIndexAllowCollision(cfg.hashIndexAllowCollision()) //
        .setBlockCacheCompressedSize(cfg.blockCacheCompressedSize()) //
        .setBlockCacheCompressedNumShardBits(cfg.blockCacheCompressedNumShardBits()) //
        .setChecksumType(cfg.checksumType()) //
        .setIndexType(cfg.indexType()) //
        .setFormatVersion(cfg.formatVersion());
}
 
Example #3
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetConfigurableOptions() throws Exception  {
	DefaultConfigurableOptionsFactory customizedOptions = new DefaultConfigurableOptionsFactory()
		.setMaxBackgroundThreads(4)
		.setMaxOpenFiles(-1)
		.setCompactionStyle(CompactionStyle.LEVEL)
		.setUseDynamicLevelSize(true)
		.setTargetFileSizeBase("4MB")
		.setMaxSizeLevelBase("128 mb")
		.setWriteBufferSize("128 MB")
		.setMaxWriteBufferNumber(4)
		.setMinWriteBufferNumberToMerge(3)
		.setBlockSize("64KB")
		.setBlockCacheSize("512mb");

	try (RocksDBResourceContainer optionsContainer =
			new RocksDBResourceContainer(PredefinedOptions.DEFAULT, customizedOptions)) {

		DBOptions dbOptions = optionsContainer.getDbOptions();
		assertEquals(-1, dbOptions.maxOpenFiles());

		ColumnFamilyOptions columnOptions = optionsContainer.getColumnOptions();
		assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
		assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
		assertEquals(4 * SizeUnit.MB, columnOptions.targetFileSizeBase());
		assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
		assertEquals(4, columnOptions.maxWriteBufferNumber());
		assertEquals(3, columnOptions.minWriteBufferNumberToMerge());

		BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
		assertEquals(64 * SizeUnit.KB, tableConfig.blockSize());
		assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
	}
}
 
Example #4
Source File: RocksdbMap.java    From Lealone-Plugins with Apache License 2.0 5 votes vote down vote up
public RocksdbMap(String name, StorageDataType keyType, StorageDataType valueType, RocksdbStorage storage) {
    super(name, keyType, valueType, storage);

    Options options = new Options();
    options.setCreateIfMissing(true);
    BlockBasedTableConfig config = new BlockBasedTableConfig();
    options.setTableFormatConfig(config);
    dbPath = storage.getStoragePath() + File.separator + name;
    try {
        db = RocksDB.open(options, dbPath);
    } catch (RocksDBException e) {
        throw ioe(e, "Failed to open " + dbPath);
    }
    setMaxKey(lastKey());
}
 
Example #5
Source File: RocksDBStateBackendConfigTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetConfigurableOptions() throws Exception  {
	String checkpointPath = tempFolder.newFolder().toURI().toString();
	RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);

	assertNull(rocksDbBackend.getOptions());

	DefaultConfigurableOptionsFactory customizedOptions = new DefaultConfigurableOptionsFactory()
		.setMaxBackgroundThreads(4)
		.setMaxOpenFiles(-1)
		.setCompactionStyle(CompactionStyle.LEVEL)
		.setUseDynamicLevelSize(true)
		.setTargetFileSizeBase("4MB")
		.setMaxSizeLevelBase("128 mb")
		.setWriteBufferSize("128 MB")
		.setMaxWriteBufferNumber(4)
		.setMinWriteBufferNumberToMerge(3)
		.setBlockSize("64KB")
		.setBlockCacheSize("512mb");

	rocksDbBackend.setOptions(customizedOptions);

	try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
		assertEquals(-1, dbOptions.maxOpenFiles());
	}

	try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
		assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
		assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
		assertEquals(4 * SizeUnit.MB, columnOptions.targetFileSizeBase());
		assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
		assertEquals(4, columnOptions.maxWriteBufferNumber());
		assertEquals(3, columnOptions.minWriteBufferNumberToMerge());

		BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
		assertEquals(64 * SizeUnit.KB, tableConfig.blockSize());
		assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
	}
}
 
Example #6
Source File: StorageOptionsFactory.java    From sofa-jraft with Apache License 2.0 5 votes vote down vote up
/**
 * Users can register a custom rocksdb BlockBasedTableConfig, then the related
 * classes will get their options by the key of their own class name.  If
 * the user does not register a config, a default config will be provided.
 *
 * @param cls the key of BlockBasedTableConfig
 * @param cfg the BlockBasedTableConfig
 */
public static void registerRocksDBTableFormatConfig(final Class<?> cls, final BlockBasedTableConfig cfg) {
    Requires.requireNonNull(cls, "cls");
    Requires.requireNonNull(cfg, "cfg");
    if (tableFormatConfigTable.putIfAbsent(cls.getName(), cfg) != null) {
        throw new IllegalStateException("TableFormatConfig with class key [" + cls.getName()
                                        + "] has already been registered");
    }
}
 
Example #7
Source File: StorageOptionsFactory.java    From sofa-jraft with Apache License 2.0 5 votes vote down vote up
/**
 * Get a new default TableFormatConfig or a copy of the exist ableFormatConfig.
 *
 * @param cls the key of TableFormatConfig
 * @return new default TableFormatConfig or a copy of the exist TableFormatConfig
 */
public static BlockBasedTableConfig getRocksDBTableFormatConfig(final Class<?> cls) {
    Requires.requireNonNull(cls, "cls");
    BlockBasedTableConfig cfg = tableFormatConfigTable.get(cls.getName());
    if (cfg == null) {
        final BlockBasedTableConfig newCfg = getDefaultRocksDBTableConfig();
        cfg = tableFormatConfigTable.putIfAbsent(cls.getName(), newCfg);
        if (cfg == null) {
            cfg = newCfg;
        }
    }
    return copyTableFormatConfig(cfg);
}
 
Example #8
Source File: RocksDBLogStorage.java    From sofa-jraft with Apache License 2.0 5 votes vote down vote up
public static ColumnFamilyOptions createColumnFamilyOptions() {
    final BlockBasedTableConfig tConfig = StorageOptionsFactory
            .getRocksDBTableFormatConfig(RocksDBLogStorage.class);
    return StorageOptionsFactory.getRocksDBColumnFamilyOptions(RocksDBLogStorage.class) //
            .useFixedLengthPrefixExtractor(8) //
            .setTableFormatConfig(tConfig) //
            .setMergeOperator(new StringAppendOperator());
}
 
Example #9
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetConfigurableOptions() throws Exception  {
	String checkpointPath = tempFolder.newFolder().toURI().toString();
	RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);

	assertNull(rocksDbBackend.getOptions());

	DefaultConfigurableOptionsFactory customizedOptions = new DefaultConfigurableOptionsFactory()
		.setMaxBackgroundThreads(4)
		.setMaxOpenFiles(-1)
		.setCompactionStyle(CompactionStyle.LEVEL)
		.setUseDynamicLevelSize(true)
		.setTargetFileSizeBase("4MB")
		.setMaxSizeLevelBase("128 mb")
		.setWriteBufferSize("128 MB")
		.setMaxWriteBufferNumber(4)
		.setMinWriteBufferNumberToMerge(3)
		.setBlockSize("64KB")
		.setBlockCacheSize("512mb");

	rocksDbBackend.setOptions(customizedOptions);

	try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
		assertEquals(-1, dbOptions.maxOpenFiles());
	}

	try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
		assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
		assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
		assertEquals(4 * SizeUnit.MB, columnOptions.targetFileSizeBase());
		assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
		assertEquals(4, columnOptions.maxWriteBufferNumber());
		assertEquals(3, columnOptions.minWriteBufferNumberToMerge());

		BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
		assertEquals(64 * SizeUnit.KB, tableConfig.blockSize());
		assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
	}
}
 
Example #10
Source File: RocksDBWrapper.java    From aion with MIT License 5 votes vote down vote up
private BlockBasedTableConfig setupBlockBasedTableConfig() {
    BlockBasedTableConfig bbtc = new BlockBasedTableConfig();
    bbtc.setBlockSize(BLOCK_SIZE);
    bbtc.setCacheIndexAndFilterBlocks(true);
    bbtc.setPinL0FilterAndIndexBlocksInCache(true);
    bbtc.setFilterPolicy(new BloomFilter(BLOOMFILTER_BITS_PER_KEY, false));
    return bbtc;
}
 
Example #11
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigurableOptionsFromConfig() throws Exception {
	Configuration configuration = new Configuration();
	DefaultConfigurableOptionsFactory defaultOptionsFactory = new DefaultConfigurableOptionsFactory();
	assertTrue(defaultOptionsFactory.configure(configuration).getConfiguredOptions().isEmpty());

	// verify illegal configuration
	{
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "-1");

		verifyIllegalArgument(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "0KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "1BB");
		verifyIllegalArgument(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "-1KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_SIZE, "0MB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "0");

		verifyIllegalArgument(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "1");

		verifyIllegalArgument(RocksDBConfigurableOptions.COMPACTION_STYLE, "LEV");
	}

	// verify legal configuration
	{
		configuration.setString(RocksDBConfigurableOptions.COMPACTION_STYLE.key(), "level");
		configuration.setString(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE.key(), "TRUE");
		configuration.setString(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE.key(), "8 mb");
		configuration.setString(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE.key(), "128MB");
		configuration.setString(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS.key(), "4");
		configuration.setString(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER.key(), "4");
		configuration.setString(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE.key(), "2");
		configuration.setString(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE.key(), "64 MB");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_SIZE.key(), "4 kb");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE.key(), "512 mb");

		DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
		optionsFactory.configure(configuration);

		try (RocksDBResourceContainer optionsContainer =
				new RocksDBResourceContainer(PredefinedOptions.DEFAULT, optionsFactory)) {

			DBOptions dbOptions = optionsContainer.getDbOptions();
			assertEquals(-1, dbOptions.maxOpenFiles());

			ColumnFamilyOptions columnOptions = optionsContainer.getColumnOptions();
			assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
			assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
			assertEquals(8 * SizeUnit.MB, columnOptions.targetFileSizeBase());
			assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
			assertEquals(4, columnOptions.maxWriteBufferNumber());
			assertEquals(2, columnOptions.minWriteBufferNumberToMerge());
			assertEquals(64 * SizeUnit.MB, columnOptions.writeBufferSize());

			BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
			assertEquals(4 * SizeUnit.KB, tableConfig.blockSize());
			assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
		}
	}
}
 
Example #12
Source File: RocksDBConfigParser.java    From journalkeeper with Apache License 2.0 4 votes vote down vote up
public static Options parse(Properties properties) {
    Options options = new Options();
    options.setCompressionType(CompressionType.LZ4_COMPRESSION)
            .setCompactionStyle(CompactionStyle.LEVEL);

    BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
    options.setTableFormatConfig(tableOptions);

    for (String key : properties.stringPropertyNames()) {
        String prefix = null;
        Object configInstance = null;

        if (key.startsWith(RocksDBConfigs.OPTIONS_PREFIX)) {
            prefix = RocksDBConfigs.OPTIONS_PREFIX;
            configInstance = options;
        } else if (key.startsWith(RocksDBConfigs.TABLE_OPTIONS_PREFIX)) {
            prefix = RocksDBConfigs.TABLE_OPTIONS_PREFIX;
            configInstance = tableOptions;
        } else {
            continue;
        }

        String fieldKey = key.substring(prefix.length(), key.length());
        String value = properties.getProperty(key);

        try {
            Method setterMethod = findSetterMethod(configInstance.getClass(), fieldKey);
            if (setterMethod == null) {
                logger.warn("parse config error, method not found, key: {}, value: {}", key, value);
                continue;
            }
            setterMethod.invoke(configInstance, PropertyUtils.convert(value, setterMethod.getParameters()[0].getType()));
        } catch (Exception e) {
            logger.error("parse config error, key: {}, value: {}", key, value, e);
        }
    }

    if (properties.containsKey(RocksDBConfigs.FILTER_BITSPER_KEY)) {
        tableOptions.setFilterPolicy(new BloomFilter(
                PropertyUtils.convertInt(properties.getProperty(RocksDBConfigs.FILTER_BITSPER_KEY), 0)));
    }

    return options;
}
 
Example #13
Source File: RocksDBCache.java    From kcache with Apache License 2.0 4 votes vote down vote up
private void openDB() {
    // initialize the default rocksdb options

    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
    userSpecifiedOptions.setComparator(new RocksDBKeySliceComparator<>(keySerde, comparator));

    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    cache = new LRUCache(BLOCK_CACHE_SIZE);
    tableConfig.setBlockCache(cache);
    tableConfig.setBlockSize(BLOCK_SIZE);

    filter = new BloomFilter();
    tableConfig.setFilterPolicy(filter);

    userSpecifiedOptions.optimizeFiltersForHits();
    userSpecifiedOptions.setTableFormatConfig(tableConfig);
    userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
    userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
    userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
    userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    userSpecifiedOptions.setCreateIfMissing(true);
    userSpecifiedOptions.setErrorIfExists(false);
    userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation of setIncreaseParallelism affects the number
    // of compaction threads but not flush threads (the latter remains one). Also
    // the parallelism value needs to be at least two because of the code in
    // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
    // subtracts one from the value passed to determine the number of compaction threads
    // (this could be a bug in the RocksDB code and their devs have been contacted).
    userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));

    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);

    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);

    dbDir = new File(new File(rootDir, parentDir), name);

    try {
        Files.createDirectories(dbDir.getParentFile().toPath());
        Files.createDirectories(dbDir.getAbsoluteFile().toPath());
    } catch (final IOException fatal) {
        throw new CacheInitializationException("Could not create directories", fatal);
    }

    openRocksDB(dbOptions, columnFamilyOptions);
    open = true;
}
 
Example #14
Source File: RocksDbDataSourceImpl.java    From gsc-core with GNU Lesser General Public License v3.0 4 votes vote down vote up
public void initDB(RocksDbSettings settings) {
    resetDbLock.writeLock().lock();
    try {
        if (isAlive()) {
            return;
        }

        Preconditions.checkNotNull(dataBaseName, "no name set to the dbStore");

        try (Options options = new Options()) {

            // most of these options are suggested by https://github.com/facebook/rocksdb/wiki/Set-Up-Options

            // general options
            if (settings.isEnableStatistics()) {
                options.setStatistics(new Statistics());
                options.setStatsDumpPeriodSec(60);
            }
            options.setCreateIfMissing(true);
            options.setIncreaseParallelism(1);
            options.setLevelCompactionDynamicLevelBytes(true);
            options.setMaxOpenFiles(settings.getMaxOpenFiles());

            // general options supported user config
            options.setNumLevels(settings.getLevelNumber());
            options.setMaxBytesForLevelMultiplier(settings.getMaxBytesForLevelMultiplier());
            options.setMaxBytesForLevelBase(settings.getMaxBytesForLevelBase());
            options.setMaxBackgroundCompactions(settings.getCompactThreads());
            options.setLevel0FileNumCompactionTrigger(settings.getLevel0FileNumCompactionTrigger());
            options.setTargetFileSizeMultiplier(settings.getTargetFileSizeMultiplier());
            options.setTargetFileSizeBase(settings.getTargetFileSizeBase());

            // table options
            final BlockBasedTableConfig tableCfg;
            options.setTableFormatConfig(tableCfg = new BlockBasedTableConfig());
            tableCfg.setBlockSize(settings.getBlockSize());
            tableCfg.setBlockCacheSize(32 * 1024 * 1024);
            tableCfg.setCacheIndexAndFilterBlocks(true);
            tableCfg.setPinL0FilterAndIndexBlocksInCache(true);
            tableCfg.setFilter(new BloomFilter(10, false));

            // read options
            readOpts = new ReadOptions();
            readOpts = readOpts.setPrefixSameAsStart(true)
                    .setVerifyChecksums(false);

            try {
                logger.debug("Opening database");
                final Path dbPath = getDbPath();
                if (!Files.isSymbolicLink(dbPath.getParent())) {
                    Files.createDirectories(dbPath.getParent());
                }

                try {
                    database = RocksDB.open(options, dbPath.toString());
                } catch (RocksDBException e) {
                    logger.error(e.getMessage(), e);
                    throw new RuntimeException("Failed to initialize database", e);
                }

                alive = true;

            } catch (IOException ioe) {
                logger.error(ioe.getMessage(), ioe);
                throw new RuntimeException("Failed to initialize database", ioe);
            }

            logger.debug("<~ RocksDbDataSource.initDB(): " + dataBaseName);
        }
    } finally {
        resetDbLock.writeLock().unlock();
    }
}
 
Example #15
Source File: RocksDbInstanceFactory.java    From teku with Apache License 2.0 4 votes vote down vote up
private static BlockBasedTableConfig createBlockBasedTableConfig(
    final RocksDbConfiguration config) {
  final LRUCache cache = new LRUCache(config.getCacheCapacity());
  return new BlockBasedTableConfig().setBlockCache(cache);
}
 
Example #16
Source File: RocksDBStateBackendConfigTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigurableOptionsFromConfig() throws IOException {
	Configuration configuration = new Configuration();
	DefaultConfigurableOptionsFactory defaultOptionsFactory = new DefaultConfigurableOptionsFactory();
	assertTrue(defaultOptionsFactory.configure(configuration).getConfiguredOptions().isEmpty());

	// verify illegal configuration
	{
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "-1");

		verifyIllegalArgument(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "0KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "1BB");
		verifyIllegalArgument(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "-1KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_SIZE, "0MB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "0");

		verifyIllegalArgument(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "1");

		verifyIllegalArgument(RocksDBConfigurableOptions.COMPACTION_STYLE, "LEV");
	}

	// verify legal configuration
	{
		configuration.setString(RocksDBConfigurableOptions.COMPACTION_STYLE, "level");
		configuration.setString(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "TRUE");
		configuration.setString(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "8 mb");
		configuration.setString(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "128MB");
		configuration.setString(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "4");
		configuration.setString(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "4");
		configuration.setString(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "2");
		configuration.setString(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "64 MB");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_SIZE, "4 kb");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "512 mb");

		DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
		optionsFactory.configure(configuration);
		String checkpointPath = tempFolder.newFolder().toURI().toString();
		RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);
		rocksDbBackend.setOptions(optionsFactory);

		try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
			assertEquals(-1, dbOptions.maxOpenFiles());
		}

		try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
			assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
			assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
			assertEquals(8 * SizeUnit.MB, columnOptions.targetFileSizeBase());
			assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
			assertEquals(4, columnOptions.maxWriteBufferNumber());
			assertEquals(2, columnOptions.minWriteBufferNumberToMerge());
			assertEquals(64 * SizeUnit.MB, columnOptions.writeBufferSize());

			BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
			assertEquals(4 * SizeUnit.KB, tableConfig.blockSize());
			assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
		}
	}
}
 
Example #17
Source File: RaftServer.java    From sofa-registry with Apache License 2.0 4 votes vote down vote up
private NodeOptions initNodeOptions(RaftServerConfig raftServerConfig) {

        NodeOptions nodeOptions = new NodeOptions();

        nodeOptions.setElectionTimeoutMs(raftServerConfig.getElectionTimeoutMs());

        nodeOptions.setDisableCli(false);

        nodeOptions.setSnapshotIntervalSecs(raftServerConfig.getSnapshotIntervalSecs());

        nodeOptions.setInitialConf(initConf);

        nodeOptions.setFsm(this.fsm);

        nodeOptions.setLogUri(dataPath + File.separator + "log");
        nodeOptions.setRaftMetaUri(dataPath + File.separator + "raft_meta");
        nodeOptions.setSnapshotUri(dataPath + File.separator + "snapshot");

        if (raftServerConfig.isEnableMetrics()) {
            nodeOptions.setEnableMetrics(raftServerConfig.isEnableMetrics());
        }

        // See https://github.com/sofastack/sofa-jraft/pull/156
        final BlockBasedTableConfig conf = new BlockBasedTableConfig() //
            // Begin to use partitioned index filters
            // https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters#how-to-use-it
            .setIndexType(IndexType.kTwoLevelIndexSearch) //
            .setFilter(new BloomFilter(16, false)) //
            .setPartitionFilters(true) //
            .setMetadataBlockSize(8 * SizeUnit.KB) //
            .setCacheIndexAndFilterBlocks(false) //
            .setCacheIndexAndFilterBlocksWithHighPriority(true) //
            .setPinL0FilterAndIndexBlocksInCache(true) //
            // End of partitioned index filters settings.
            .setBlockSize(4 * SizeUnit.KB)//
            .setBlockCacheSize(raftServerConfig.getRockDBCacheSize() * SizeUnit.MB) //
            .setCacheNumShardBits(8);

        StorageOptionsFactory.registerRocksDBTableFormatConfig(RocksDBLogStorage.class, conf);

        return nodeOptions;
    }
 
Example #18
Source File: RocksRawKVStore.java    From sofa-jraft with Apache License 2.0 4 votes vote down vote up
private static ColumnFamilyOptions createColumnFamilyOptions() {
    final BlockBasedTableConfig tConfig = StorageOptionsFactory.getRocksDBTableFormatConfig(RocksRawKVStore.class);
    return StorageOptionsFactory.getRocksDBColumnFamilyOptions(RocksRawKVStore.class) //
        .setTableFormatConfig(tConfig) //
        .setMergeOperator(new StringAppendOperator());
}
 
Example #19
Source File: RocksDBStateBackendConfigTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigurableOptionsFromConfig() throws IOException {
	Configuration configuration = new Configuration();
	DefaultConfigurableOptionsFactory defaultOptionsFactory = new DefaultConfigurableOptionsFactory();
	assertTrue(defaultOptionsFactory.configure(configuration).getConfiguredOptions().isEmpty());

	// verify illegal configuration
	{
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "-1");
		verifyIllegalArgument(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "-1");

		verifyIllegalArgument(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "0KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "1BB");
		verifyIllegalArgument(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "-1KB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_SIZE, "0MB");
		verifyIllegalArgument(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "0");

		verifyIllegalArgument(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "1");

		verifyIllegalArgument(RocksDBConfigurableOptions.COMPACTION_STYLE, "LEV");
	}

	// verify legal configuration
	{
		configuration.setString(RocksDBConfigurableOptions.COMPACTION_STYLE, "level");
		configuration.setString(RocksDBConfigurableOptions.USE_DYNAMIC_LEVEL_SIZE, "TRUE");
		configuration.setString(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, "8 mb");
		configuration.setString(RocksDBConfigurableOptions.MAX_SIZE_LEVEL_BASE, "128MB");
		configuration.setString(RocksDBConfigurableOptions.MAX_BACKGROUND_THREADS, "4");
		configuration.setString(RocksDBConfigurableOptions.MAX_WRITE_BUFFER_NUMBER, "4");
		configuration.setString(RocksDBConfigurableOptions.MIN_WRITE_BUFFER_NUMBER_TO_MERGE, "2");
		configuration.setString(RocksDBConfigurableOptions.WRITE_BUFFER_SIZE, "64 MB");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_SIZE, "4 kb");
		configuration.setString(RocksDBConfigurableOptions.BLOCK_CACHE_SIZE, "512 mb");

		DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
		optionsFactory.configure(configuration);
		String checkpointPath = tempFolder.newFolder().toURI().toString();
		RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);
		rocksDbBackend.setOptions(optionsFactory);

		try (DBOptions dbOptions = rocksDbBackend.getDbOptions()) {
			assertEquals(-1, dbOptions.maxOpenFiles());
		}

		try (ColumnFamilyOptions columnOptions = rocksDbBackend.getColumnOptions()) {
			assertEquals(CompactionStyle.LEVEL, columnOptions.compactionStyle());
			assertTrue(columnOptions.levelCompactionDynamicLevelBytes());
			assertEquals(8 * SizeUnit.MB, columnOptions.targetFileSizeBase());
			assertEquals(128 * SizeUnit.MB, columnOptions.maxBytesForLevelBase());
			assertEquals(4, columnOptions.maxWriteBufferNumber());
			assertEquals(2, columnOptions.minWriteBufferNumberToMerge());
			assertEquals(64 * SizeUnit.MB, columnOptions.writeBufferSize());

			BlockBasedTableConfig tableConfig = (BlockBasedTableConfig) columnOptions.tableFormatConfig();
			assertEquals(4 * SizeUnit.KB, tableConfig.blockSize());
			assertEquals(512 * SizeUnit.MB, tableConfig.blockCacheSize());
		}
	}
}
 
Example #20
Source File: RocksDBKeyValueStorage.java    From besu with Apache License 2.0 4 votes vote down vote up
private BlockBasedTableConfig createBlockBasedTableConfig(final RocksDBConfiguration config) {
  final LRUCache cache = new LRUCache(config.getCacheCapacity());
  return new BlockBasedTableConfig().setBlockCache(cache);
}
 
Example #21
Source File: RocksDBColumnarKeyValueStorage.java    From besu with Apache License 2.0 4 votes vote down vote up
private BlockBasedTableConfig createBlockBasedTableConfig(final RocksDBConfiguration config) {
  final LRUCache cache = new LRUCache(config.getCacheCapacity());
  return new BlockBasedTableConfig().setBlockCache(cache);
}
 
Example #22
Source File: MetadataStoreBuilder.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
public MetadataStore build() throws IOException {
  if (dbFile == null) {
    throw new IllegalArgumentException("Failed to build metadata store, "
        + "dbFile is required but not found");
  }

  // Build db store based on configuration
  final ConfigurationSource conf = optionalConf.orElse(DEFAULT_CONF);

  if (dbType == null) {
    LOG.debug("dbType is null, using ");
    dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
            OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
    LOG.debug("dbType is null, using dbType {} from ozone configuration",
        dbType);
  } else {
    LOG.debug("Using dbType {} for metastore", dbType);
  }
  if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) {
    Options options = new Options();
    options.createIfMissing(createIfMissing);
    if (cacheSize > 0) {
      options.cacheSize(cacheSize);
    }
    return new LevelDBStore(dbFile, options);
  } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) {
    org.rocksdb.Options opts;
    // Used cached options if config object passed down is the same
    if (CACHED_OPTS.containsKey(conf)) {
      opts = CACHED_OPTS.get(conf);
    } else {
      opts = new org.rocksdb.Options();
      if (cacheSize > 0) {
        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
        tableConfig.setBlockCacheSize(cacheSize);
        opts.setTableFormatConfig(tableConfig);
      }

      String rocksDbStat = conf.getTrimmed(
          OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
          OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);

      if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
        Statistics statistics = new Statistics();
        statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
        opts = opts.setStatistics(statistics);
      }
    }
    opts.setCreateIfMissing(createIfMissing);
    CACHED_OPTS.put(conf, opts);
    return new RocksDBStore(dbFile, opts);
  }
  
  throw new IllegalArgumentException("Invalid argument for "
      + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
      + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB
      + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB
      + ", but met " + dbType);
}