org.rocksdb.Env Java Examples

The following examples show how to use org.rocksdb.Env. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBStdSessions.java    From hugegraph with Apache License 2.0 6 votes vote down vote up
public RocksDBStdSessions(HugeConfig config, String database, String store,
                          String dataPath, String walPath)
                          throws RocksDBException {
    super(config, database, store);

    // Init options
    Options options = new Options();
    RocksDBStdSessions.initOptions(config, options, options,
                                   options, options);
    options.setWalDir(walPath);

    this.sstFileManager = new SstFileManager(Env.getDefault());
    options.setSstFileManager(this.sstFileManager);

    /*
     * Open RocksDB at the first time
     * Don't merge old CFs, we expect a clear DB when using this one
     */
    this.rocksdb = RocksDB.open(options, dataPath);

    this.cfs = new ConcurrentHashMap<>();
    this.refCount = new AtomicInteger(1);
}
 
Example #2
Source File: RocksDbInstanceFactory.java    From teku with Apache License 2.0 5 votes vote down vote up
private static DBOptions createDBOptions(
    final RocksDbConfiguration configuration, final Statistics stats) {
  return new DBOptions()
      .setCreateIfMissing(true)
      .setBytesPerSync(1048576L)
      .setWalBytesPerSync(1048576L)
      .setMaxBackgroundFlushes(2)
      .setDbWriteBufferSize(configuration.getWriteBufferCapacity())
      .setMaxOpenFiles(configuration.getMaxOpenFiles())
      .setMaxBackgroundCompactions(configuration.getMaxBackgroundCompactions())
      .setCreateMissingColumnFamilies(true)
      .setEnv(Env.getDefault().setBackgroundThreads(configuration.getBackgroundThreadCount()))
      .setStatistics(stats);
}
 
Example #3
Source File: RocksDBColumnarKeyValueStorage.java    From besu with Apache License 2.0 4 votes vote down vote up
public RocksDBColumnarKeyValueStorage(
    final RocksDBConfiguration configuration,
    final List<SegmentIdentifier> segments,
    final MetricsSystem metricsSystem,
    final RocksDBMetricsFactory rocksDBMetricsFactory)
    throws StorageException {

  try {
    final List<ColumnFamilyDescriptor> columnDescriptors =
        segments.stream()
            .map(segment -> new ColumnFamilyDescriptor(segment.getId()))
            .collect(Collectors.toList());
    columnDescriptors.add(
        new ColumnFamilyDescriptor(
            DEFAULT_COLUMN.getBytes(StandardCharsets.UTF_8),
            new ColumnFamilyOptions()
                .setTableFormatConfig(createBlockBasedTableConfig(configuration))));

    final Statistics stats = new Statistics();
    options =
        new DBOptions()
            .setCreateIfMissing(true)
            .setMaxOpenFiles(configuration.getMaxOpenFiles())
            .setMaxBackgroundCompactions(configuration.getMaxBackgroundCompactions())
            .setStatistics(stats)
            .setCreateMissingColumnFamilies(true)
            .setEnv(
                Env.getDefault().setBackgroundThreads(configuration.getBackgroundThreadCount()));

    txOptions = new TransactionDBOptions();
    final List<ColumnFamilyHandle> columnHandles = new ArrayList<>(columnDescriptors.size());
    db =
        TransactionDB.open(
            options,
            txOptions,
            configuration.getDatabaseDir().toString(),
            columnDescriptors,
            columnHandles);
    metrics = rocksDBMetricsFactory.create(metricsSystem, configuration, db, stats);
    final Map<Bytes, String> segmentsById =
        segments.stream()
            .collect(
                Collectors.toMap(
                    segment -> Bytes.wrap(segment.getId()), SegmentIdentifier::getName));

    final ImmutableMap.Builder<String, ColumnFamilyHandle> builder = ImmutableMap.builder();

    for (ColumnFamilyHandle columnHandle : columnHandles) {
      final String segmentName =
          requireNonNullElse(
              segmentsById.get(Bytes.wrap(columnHandle.getName())), DEFAULT_COLUMN);
      builder.put(segmentName, columnHandle);
    }
    columnHandlesByName = builder.build();
  } catch (final RocksDBException e) {
    throw new StorageException(e);
  }
}
 
Example #4
Source File: RocksRawKVStore.java    From sofa-jraft with Apache License 2.0 4 votes vote down vote up
private static DBOptions createDBOptions() {
    return StorageOptionsFactory.getRocksDBOptions(RocksRawKVStore.class) //
        .setEnv(Env.getDefault());
}
 
Example #5
Source File: BackupDB.java    From DDMQ with Apache License 2.0 4 votes vote down vote up
public static BackupState backup() {
    if (backuping) {
        LOGGER.info("is backuping, return");
        return BackupState.BEING_BACKUP;
    }

    LOGGER.info("start backup");
    backuping = true;
    try (final BackupableDBOptions bopt = new BackupableDBOptions(DB_PATH_BACKUP);
         final BackupEngine be = BackupEngine.open(Env.getDefault(), bopt)) {

        /**
         * Captures the state of the database in the latest backup
         *
         * @param db The database to backup
         * @param flushBeforeBackup When true, the Backup Engine will first issue a
         *                          memtable flush and only then copy the DB files to
         *                          the backup directory. Doing so will prevent log
         *                          files from being copied to the backup directory
         *                          (since flush will delete them).
         *                          When false, the Backup Engine will not issue a
         *                          flush before starting the backup. In that case,
         *                          the backup will also include log files
         *                          corresponding to live memtables. The backup will
         *                          always be consistent with the current state of the
         *                          database regardless of the flushBeforeBackup
         *                          parameter.
         *
         * Note - This method is not thread safe
         *
         * @throws RocksDBException thrown if a new backup could not be created
         */
        boolean flushBeforeBackup = false;
        be.createNewBackup(RDB.DB, flushBeforeBackup);

        List<BackupInfo> backupInfos = be.getBackupInfo();
        for (int i = 0; i < backupInfos.size(); i++) {
            LOGGER.info("backupInfo[{}}, backupId:{}, timestamp:{}, size:{}, numberFiles:{}", i, backupInfos.get(i).backupId(),
                    backupInfos.get(i).timestamp(), backupInfos.get(i).size(), backupInfos.get(i).numberFiles());
        }

        return BackupState.SUCCESS;
    } catch (RocksDBException e) {
        LOGGER.error("error while backup, path:{}, err:{}", DB_PATH_BACKUP, e.getMessage(), e);
        return BackupState.FAIL;
    } finally {
        backuping = false;
        LOGGER.info("end backup");
    }
}
 
Example #6
Source File: BackupDB.java    From DDMQ with Apache License 2.0 4 votes vote down vote up
public static RestoreState restore() throws RocksDBException {
    if (restoring) {
        LOGGER.info("is restoring, return");
        return RestoreState.BEING_RESTORE;
    }

    LOGGER.info("start restore");
    restoring = true;
    RocksDB restoreDB = null;
    try (final BackupableDBOptions bopt = new BackupableDBOptions(DB_PATH_BACKUP);
         final BackupEngine be = BackupEngine.open(Env.getDefault(), bopt)) {
        // restore db from first backup

        /**
         * @param keepLogFiles If true, restore won't overwrite the existing log files
         *   in wal_dir. It will also move all log files from archive directory to
         *   wal_dir. Use this option in combination with
         *   BackupableDBOptions::backup_log_files = false for persisting in-memory
         *   databases.
         *   Default: false
         */
        boolean keepLogFiles = false;
        be.restoreDbFromLatestBackup(DB_PATH_RESTORE, DB_PATH_RESTORE, new RestoreOptions(keepLogFiles));
        // open database again.
        restoreDB = RocksDB.open(OptionsConfig.DB_OPTIONS, DB_PATH_RESTORE, CFManager.CF_DESCRIPTORS, CFManager.CF_HANDLES);

        int i = 0;
        try (RocksIterator it = restoreDB.newIterator()) {
            for (it.seekToFirst(); it.isValid(); it.next()) {
                LOGGER.info("i:{}, key:{}, value:{}", i++, new String(it.key()), new String(it.value()));
                if (i == 10) {
                    break;
                }
            }
        }

        return RestoreState.SUCCESS;
    } catch (RocksDBException e) {
        LOGGER.error("error while restore, path:{}, err:{}", DB_PATH_RESTORE, e.getMessage(), e);
        return RestoreState.FAIL;
    } finally {
        if (restoreDB != null) {
            restoreDB.close();
        }

        restoring = false;
        LOGGER.info("end restore");
    }
}
 
Example #7
Source File: RocksDBStorageEngine.java    From HaloDB with Apache License 2.0 4 votes vote down vote up
@Override
public void open() {
    options = new Options().setCreateIfMissing(true);
    options.setStatsDumpPeriodSec(1000000);

    options.setWriteBufferSize(128l * 1024 * 1024);
    options.setMaxWriteBufferNumber(3);
    options.setMaxBackgroundCompactions(20);

    Env env = Env.getDefault();
    env.setBackgroundThreads(20, Env.COMPACTION_POOL);
    options.setEnv(env);

    // max size of L1 10 MB.
    options.setMaxBytesForLevelBase(10485760);
    options.setTargetFileSizeBase(67108864);

    options.setLevel0FileNumCompactionTrigger(4);
    options.setLevel0SlowdownWritesTrigger(6);
    options.setLevel0StopWritesTrigger(12);
    options.setNumLevels(6);
    options.setDeleteObsoleteFilesPeriodMicros(300000000);


    options.setAllowMmapReads(false);
    options.setCompressionType(CompressionType.SNAPPY_COMPRESSION);


    System.out.printf("maxBackgroundCompactions %d \n", options.maxBackgroundCompactions());
    System.out.printf("minWriteBufferNumberToMerge %d \n", options.minWriteBufferNumberToMerge());
    System.out.printf("maxWriteBufferNumberToMaintain %d \n", options.maxWriteBufferNumberToMaintain());


    System.out.printf("level0FileNumCompactionTrigger %d \n", options.level0FileNumCompactionTrigger());
    System.out.printf("maxBytesForLevelBase %d \n", options.maxBytesForLevelBase());
    System.out.printf("maxBytesForLevelMultiplier %f \n", options.maxBytesForLevelMultiplier());
    System.out.printf("targetFileSizeBase %d \n", options.targetFileSizeBase());
    System.out.printf("targetFileSizeMultiplier %d \n", options.targetFileSizeMultiplier());

    List<CompressionType> compressionLevels =
        Arrays.asList(
            CompressionType.NO_COMPRESSION,
            CompressionType.NO_COMPRESSION,
            CompressionType.SNAPPY_COMPRESSION,
            CompressionType.SNAPPY_COMPRESSION,
            CompressionType.SNAPPY_COMPRESSION,
            CompressionType.SNAPPY_COMPRESSION
        );

    options.setCompressionPerLevel(compressionLevels);

    System.out.printf("compressionPerLevel %s \n", options.compressionPerLevel());
    System.out.printf("numLevels %s \n", options.numLevels());

    writeOptions = new WriteOptions();
    writeOptions.setDisableWAL(true);

    System.out.printf("WAL is disabled - %s \n", writeOptions.disableWAL());

    try {
        db = RocksDB.open(options, dbDirectory.getPath());
    } catch (RocksDBException e) {
        e.printStackTrace();
    }

}
 
Example #8
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Options setEnv(final Env env) {
    dbOptions.setEnv(env);
    return this;
}
 
Example #9
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java    From kcache with Apache License 2.0 4 votes vote down vote up
@Override
public Env getEnv() {
    return dbOptions.getEnv();
}
 
Example #10
Source File: BackupDB.java    From DDMQ with Apache License 2.0 4 votes vote down vote up
public static BackupState backup() {
    if (backuping) {
        LOGGER.info("is backuping, return");
        return BackupState.BEING_BACKUP;
    }

    LOGGER.info("start backup");
    backuping = true;
    try (final BackupableDBOptions bopt = new BackupableDBOptions(DB_PATH_BACKUP);
         final BackupEngine be = BackupEngine.open(Env.getDefault(), bopt)) {

        /**
         * Captures the state of the database in the latest backup
         *
         * @param db The database to backup
         * @param flushBeforeBackup When true, the Backup Engine will first issue a
         *                          memtable flush and only then copy the DB files to
         *                          the backup directory. Doing so will prevent log
         *                          files from being copied to the backup directory
         *                          (since flush will delete them).
         *                          When false, the Backup Engine will not issue a
         *                          flush before starting the backup. In that case,
         *                          the backup will also include log files
         *                          corresponding to live memtables. The backup will
         *                          always be consistent with the current state of the
         *                          database regardless of the flushBeforeBackup
         *                          parameter.
         *
         * Note - This method is not thread safe
         *
         * @throws RocksDBException thrown if a new backup could not be created
         */
        boolean flushBeforeBackup = false;
        be.createNewBackup(RDB.DB, flushBeforeBackup);

        List<BackupInfo> backupInfos = be.getBackupInfo();
        for (int i = 0; i < backupInfos.size(); i++) {
            LOGGER.info("backupInfo[{}}, backupId:{}, timestamp:{}, size:{}, numberFiles:{}", i, backupInfos.get(i).backupId(),
                    backupInfos.get(i).timestamp(), backupInfos.get(i).size(), backupInfos.get(i).numberFiles());
        }

        return BackupState.SUCCESS;
    } catch (RocksDBException e) {
        LOGGER.error("error while backup, path:{}, err:{}", DB_PATH_BACKUP, e.getMessage(), e);
        return BackupState.FAIL;
    } finally {
        backuping = false;
        LOGGER.info("end backup");
    }
}
 
Example #11
Source File: BackupDB.java    From DDMQ with Apache License 2.0 4 votes vote down vote up
public static RestoreState restore() throws RocksDBException {
    if (restoring) {
        LOGGER.info("is restoring, return");
        return RestoreState.BEING_RESTORE;
    }

    LOGGER.info("start restore");
    restoring = true;
    RocksDB restoreDB = null;
    try (final BackupableDBOptions bopt = new BackupableDBOptions(DB_PATH_BACKUP);
         final BackupEngine be = BackupEngine.open(Env.getDefault(), bopt)) {
        // restore db from first backup

        /**
         * @param keepLogFiles If true, restore won't overwrite the existing log files
         *   in wal_dir. It will also move all log files from archive directory to
         *   wal_dir. Use this option in combination with
         *   BackupableDBOptions::backup_log_files = false for persisting in-memory
         *   databases.
         *   Default: false
         */
        boolean keepLogFiles = false;
        be.restoreDbFromLatestBackup(DB_PATH_RESTORE, DB_PATH_RESTORE, new RestoreOptions(keepLogFiles));
        // open database again.
        restoreDB = RocksDB.open(OptionsConfig.DB_OPTIONS, DB_PATH_RESTORE, CFManager.CF_DESCRIPTORS, CFManager.CF_HANDLES);

        int i = 0;
        try (RocksIterator it = restoreDB.newIterator()) {
            for (it.seekToFirst(); it.isValid(); it.next()) {
                LOGGER.info("i:{}, key:{}, value:{}", i++, new String(it.key()), new String(it.value()));
                if (i == 10) {
                    break;
                }
            }
        }

        return RestoreState.SUCCESS;
    } catch (RocksDBException e) {
        LOGGER.error("error while restore, path:{}, err:{}", DB_PATH_RESTORE, e.getMessage(), e);
        return RestoreState.FAIL;
    } finally {
        if (restoreDB != null) {
            restoreDB.close();
        }

        restoring = false;
        LOGGER.info("end restore");
    }
}