org.rocksdb.ColumnFamilyHandle Java Examples

The following examples show how to use org.rocksdb.ColumnFamilyHandle. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBCachingPriorityQueueSet.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
RocksDBCachingPriorityQueueSet(
	@Nonnegative int keyGroupId,
	@Nonnegative int keyGroupPrefixBytes,
	@Nonnull RocksDB db,
	@Nonnull ColumnFamilyHandle columnFamilyHandle,
	@Nonnull TypeSerializer<E> byteOrderProducingSerializer,
	@Nonnull DataOutputSerializer outputStream,
	@Nonnull DataInputDeserializer inputStream,
	@Nonnull RocksDBWriteBatchWrapper batchWrapper,
	@Nonnull OrderedByteArraySetCache orderedByteArraySetCache) {
	this.db = db;
	this.columnFamilyHandle = columnFamilyHandle;
	this.byteOrderProducingSerializer = byteOrderProducingSerializer;
	this.batchWrapper = batchWrapper;
	this.outputView = outputStream;
	this.inputView = inputStream;
	this.orderedCache = orderedByteArraySetCache;
	this.allElementsInCache = false;
	this.groupPrefixBytes = createKeyGroupBytes(keyGroupId, keyGroupPrefixBytes);
	this.seekHint = groupPrefixBytes;
	this.internalIndex = HeapPriorityQueueElement.NOT_CONTAINED;
}
 
Example #2
Source File: RocksDBNativeMetricMonitor.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Updates the value of metricView if the reference is still valid.
 */
private void setProperty(ColumnFamilyHandle handle, String property, RocksDBNativeMetricView metricView) {
	if (metricView.isClosed()) {
		return;
	}
	try {
		synchronized (lock) {
			if (rocksDB != null) {
				long value = rocksDB.getLongProperty(handle, property);
				metricView.setValue(value);
			}
		}
	} catch (RocksDBException e) {
		metricView.close();
		LOG.warn("Failed to read native metric %s from RocksDB", property, e);
	}
}
 
Example #3
Source File: RocksDBWriteBatchWrapperTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory consumed exceeds the preconfigured value.
 */
@Test
public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws Exception {
	try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
		WriteOptions options = new WriteOptions().setDisableWAL(true);
		ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
		RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 200, 50)) {

		long initBatchSize = writeBatchWrapper.getDataSize();
		byte[] dummy = new byte[6];
		ThreadLocalRandom.current().nextBytes(dummy);
		// will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV
		// format is [handleType|kvType|keyLen|key|valueLen|value]
		// more information please ref write_batch.cc in RocksDB
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize + 16, writeBatchWrapper.getDataSize());
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize + 32, writeBatchWrapper.getDataSize());
		writeBatchWrapper.put(handle, dummy, dummy);
		// will flush all, then an empty write batch
		assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
	}
}
 
Example #4
Source File: RocksDBIngester.java    From hugegraph with Apache License 2.0 6 votes vote down vote up
public List<String> ingest(Path path, ColumnFamilyHandle cf)
                           throws RocksDBException {
    SuffixFileVisitor visitor = new SuffixFileVisitor(".sst");
    try {
        Files.walkFileTree(path, visitor);
    } catch (IOException e) {
        throw new BackendException("Failed to walk path '%s'", e, path);
    }

    List<Path> files = visitor.files();
    List<String> ssts = new ArrayList<>(files.size());
    for (Path file : files) {
        File sst = file.toFile();
        if (sst.exists() && sst.length() > 0) {
            ssts.add(sst.getPath());
        }
    }
    this.ingest(cf, ssts);

    return ssts;
}
 
Example #5
Source File: RocksDBWriteBatchWrapperTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link RocksDBWriteBatchWrapper} flushes after the kv count exceeds the preconfigured value.
 */
@Test
public void testWriteBatchWrapperFlushAfterCountExceed() throws Exception {
	try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
		WriteOptions options = new WriteOptions().setDisableWAL(true);
		ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
		RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 100, 50000)) {
		long initBatchSize = writeBatchWrapper.getDataSize();
		byte[] dummy = new byte[2];
		ThreadLocalRandom.current().nextBytes(dummy);
		for (int i = 1; i < 100; ++i) {
			writeBatchWrapper.put(handle, dummy, dummy);
			// each kv consumes 8 bytes
			assertEquals(initBatchSize + 8 * i, writeBatchWrapper.getDataSize());
		}
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
	}
}
 
Example #6
Source File: PubchemTTLMerger.java    From act with GNU General Public License v3.0 6 votes vote down vote up
public static AbstractRDFHandler makeHandlerForDataFile(
    Pair<RocksDB, Map<COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles, File file) {
  PC_RDF_DATA_FILE_CONFIG config = getDataTypeForFile(file);
  if (config == null) {
    LOGGER.info("No handler config found for file %s", file.getAbsolutePath());
    return null;
  }
  LOGGER.info("Selected handler type %s for file %s", config.name(), file.getName());

  return new PCRDFHandler(
      dbAndHandles,
      config.columnFamily,
      config.keyType,
      config.valType,
      config.reverseSubjectAndObject,
      config.valueTransformer
  );
}
 
Example #7
Source File: RocksDBColumnarKeyValueStorageTest.java    From besu with Apache License 2.0 6 votes vote down vote up
@Test
public void twoSegmentsAreIndependent() throws Exception {
  final SegmentedKeyValueStorage<ColumnFamilyHandle> store = createSegmentedStore();

  final Transaction<ColumnFamilyHandle> tx = store.startTransaction();
  tx.put(
      store.getSegmentIdentifierByName(TestSegment.BAR),
      bytesFromHexString("0001"),
      bytesFromHexString("0FFF"));
  tx.commit();

  final Optional<byte[]> result =
      store.get(store.getSegmentIdentifierByName(TestSegment.FOO), bytesFromHexString("0001"));

  assertThat(result).isEmpty();

  store.close();
}
 
Example #8
Source File: RocksRawKVStore.java    From sofa-jraft with Apache License 2.0 6 votes vote down vote up
void ingestSstFiles(final EnumMap<SstColumnFamily, File> sstFileTable) {
    final Timer.Context timeCtx = getTimeContext("INGEST_SST_FILE");
    final Lock readLock = this.readWriteLock.readLock();
    readLock.lock();
    try {
        for (final Map.Entry<SstColumnFamily, File> entry : sstFileTable.entrySet()) {
            final SstColumnFamily sstColumnFamily = entry.getKey();
            final File sstFile = entry.getValue();
            final ColumnFamilyHandle columnFamilyHandle = findColumnFamilyHandle(sstColumnFamily);
            try (final IngestExternalFileOptions ingestOptions = new IngestExternalFileOptions()) {
                if (FileUtils.sizeOf(sstFile) == 0L) {
                    return;
                }
                final String filePath = sstFile.getAbsolutePath();
                LOG.info("Start ingest sst file {}.", filePath);
                this.db.ingestExternalFile(columnFamilyHandle, Collections.singletonList(filePath), ingestOptions);
            } catch (final RocksDBException e) {
                throw new StorageException("Fail to ingest sst file at path: " + sstFile, e);
            }
        }
    } finally {
        readLock.unlock();
        timeCtx.stop();
    }
}
 
Example #9
Source File: SQLConnection.java    From act with GNU General Public License v3.0 6 votes vote down vote up
public Pair<RocksDB, Map<String, ColumnFamilyHandle>> openSupportingIndex(File supportingIndex)
    throws RocksDBException {
  List<FromBrendaDB> instances = BrendaSupportingEntries.allFromBrendaDBInstances();
  List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(instances.size() + 1);
  columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes()));
  for (FromBrendaDB instance : instances) {
    columnFamilyDescriptors.add(new ColumnFamilyDescriptor(instance.getColumnFamilyName().getBytes()));
  }
  List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(columnFamilyDescriptors.size());

  DBOptions dbOptions = new DBOptions();
  dbOptions.setCreateIfMissing(false);
  RocksDB rocksDB = RocksDB.open(dbOptions, supportingIndex.getAbsolutePath(),
      columnFamilyDescriptors, columnFamilyHandles);
  Map<String, ColumnFamilyHandle> columnFamilyHandleMap = new HashMap<>(columnFamilyHandles.size());
  // TODO: can we zip these together more easily w/ Java 8?

  for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
    ColumnFamilyDescriptor cfd = columnFamilyDescriptors.get(i);
    ColumnFamilyHandle cfh = columnFamilyHandles.get(i);
    columnFamilyHandleMap.put(new String(cfd.columnFamilyName(), BrendaSupportingEntries.UTF8), cfh);
  }

  return Pair.of(rocksDB, columnFamilyHandleMap);
}
 
Example #10
Source File: RocksDBOperationUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ColumnFamilyHandle createColumnFamily(ColumnFamilyDescriptor columnDescriptor, RocksDB db) {
	try {
		return db.createColumnFamily(columnDescriptor);
	} catch (RocksDBException e) {
		IOUtils.closeQuietly(columnDescriptor.getOptions());
		throw new FlinkRuntimeException("Error creating ColumnFamilyHandle.", e);
	}
}
 
Example #11
Source File: RocksDBRestoreResult.java    From flink with Apache License 2.0 5 votes vote down vote up
public RocksDBRestoreResult(
	RocksDB db,
	ColumnFamilyHandle defaultColumnFamilyHandle,
	RocksDBNativeMetricMonitor nativeMetricMonitor,
	long lastCompletedCheckpointId,
	UUID backendUID,
	SortedMap<Long, Set<StateHandleID>> restoredSstFiles) {
	this.db = db;
	this.defaultColumnFamilyHandle = defaultColumnFamilyHandle;
	this.nativeMetricMonitor = nativeMetricMonitor;
	this.lastCompletedCheckpointId = lastCompletedCheckpointId;
	this.backendUID = backendUID;
	this.restoredSstFiles = restoredSstFiles;
}
 
Example #12
Source File: RocksDBReducingState.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBReducingState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		registerResult.f1.getStateSerializer(),
		stateDesc.getDefaultValue(),
		((ReducingStateDescriptor<SV>) stateDesc).getReduceFunction(),
		backend);
}
 
Example #13
Source File: RocksDBFoldingState.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code RocksDBFoldingState}.
 *
 * @param columnFamily The RocksDB column family that this state is associated to.
 * @param namespaceSerializer The serializer for the namespace.
 * @param valueSerializer The serializer for the state.
 * @param defaultValue The default value for the state.
 * @param foldFunction The fold function used for folding state.
 * @param backend The backend for which this state is bind to.
 */
private RocksDBFoldingState(
	ColumnFamilyHandle columnFamily,
	TypeSerializer<N> namespaceSerializer,
	TypeSerializer<ACC> valueSerializer,
	ACC defaultValue,
	FoldFunction<T, ACC> foldFunction,
	RocksDBKeyedStateBackend<K> backend) {

	super(columnFamily, namespaceSerializer, valueSerializer, defaultValue, backend);

	this.foldFunction = foldFunction;
}
 
Example #14
Source File: RocksDBOperationUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static void addColumnFamilyOptionsToCloseLater(
	List<ColumnFamilyOptions> columnFamilyOptions, ColumnFamilyHandle columnFamilyHandle) {
	try {
		if (columnFamilyHandle != null && columnFamilyHandle.getDescriptor() != null) {
			columnFamilyOptions.add(columnFamilyHandle.getDescriptor().getOptions());
		}
	} catch (RocksDBException e) {
		// ignore
	}
}
 
Example #15
Source File: RocksDBWriteBatchWrapper.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public void put(
	@Nonnull ColumnFamilyHandle handle,
	@Nonnull byte[] key,
	@Nonnull byte[] value) throws RocksDBException {

	batch.put(handle, key, value);

	if (batch.count() == capacity) {
		flush();
	}
}
 
Example #16
Source File: RocksDBMetronome.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Put the key / value pair into the database in the specified column family
 *
 * @param columnFamilyHandle the column family in to which to put the value
 * @param writeOptions       specification of options for write operations
 * @param key                the key to be inserted
 * @param value              the value to be associated with the specified key
 * @throws RocksDBException thrown if there is an error in the underlying library.
 */
public void put(final ColumnFamilyHandle columnFamilyHandle, WriteOptions writeOptions, final byte[] key, final byte[] value) throws RocksDBException {
    dbReadLock.lock();
    try {
        checkDbState();
        rocksDB.put(columnFamilyHandle, writeOptions, key, value);
    } finally {
        dbReadLock.unlock();
    }
}
 
Example #17
Source File: RocksDBWriteBatchWrapper.java    From flink with Apache License 2.0 5 votes vote down vote up
public void put(
	@Nonnull ColumnFamilyHandle handle,
	@Nonnull byte[] key,
	@Nonnull byte[] value) throws RocksDBException {

	batch.put(handle, key, value);

	flushIfNeeded();
}
 
Example #18
Source File: RocksDBNativeMetricMonitorTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testReturnsUnsigned() throws Throwable {
	RocksDBResource localRocksDBResource = new RocksDBResource();
	localRocksDBResource.before();

	SimpleMetricRegistry registry = new SimpleMetricRegistry();
	GenericMetricGroup group = new GenericMetricGroup(
		registry,
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(),
		OPERATOR_NAME
	);

	RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions();
	options.enableSizeAllMemTables();

	RocksDBNativeMetricMonitor monitor = new RocksDBNativeMetricMonitor(
		options,
		group,
		localRocksDBResource.getRocksDB()
	);

	ColumnFamilyHandle handle = rocksDBResource.createNewColumnFamily(COLUMN_FAMILY_NAME);
	monitor.registerColumnFamily(COLUMN_FAMILY_NAME, handle);
	RocksDBNativeMetricMonitor.RocksDBNativeMetricView view = registry.metrics.get(0);

	view.setValue(-1);
	BigInteger result = view.getValue();

	localRocksDBResource.after();

	Assert.assertEquals("Failed to interpret RocksDB result as an unsigned long", 1, result.signum());
}
 
Example #19
Source File: RocksDBNativeMetricMonitorTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testClosedGaugesDontRead() {
	SimpleMetricRegistry registry = new SimpleMetricRegistry();
	GenericMetricGroup group = new GenericMetricGroup(
		registry,
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(),
		OPERATOR_NAME
	);

	RocksDBNativeMetricOptions options = new RocksDBNativeMetricOptions();
	options.enableSizeAllMemTables();

	RocksDBNativeMetricMonitor monitor = new RocksDBNativeMetricMonitor(
		options,
		group,
		rocksDBResource.getRocksDB()
	);

	ColumnFamilyHandle handle = rocksDBResource.createNewColumnFamily(COLUMN_FAMILY_NAME);
	monitor.registerColumnFamily(COLUMN_FAMILY_NAME, handle);

	RocksDBNativeMetricMonitor.RocksDBNativeMetricView view = registry.metrics.get(0);

	view.close();
	view.update();

	Assert.assertEquals("Closed gauge still queried RocksDB", BigInteger.ZERO, view.getValue());
}
 
Example #20
Source File: RocksDBIncrementalRestoreOperation.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private RestoredDBInstance restoreDBInstanceFromStateHandle(
	IncrementalRemoteKeyedStateHandle restoreStateHandle,
	Path temporaryRestoreInstancePath) throws Exception {

	try (RocksDBStateDownloader rocksDBStateDownloader =
			new RocksDBStateDownloader(numberOfTransferringThreads)) {
		rocksDBStateDownloader.transferAllStateDataToDirectory(
			restoreStateHandle,
			temporaryRestoreInstancePath,
			cancelStreamRegistry);
	}

	KeyedBackendSerializationProxy<K> serializationProxy = readMetaData(restoreStateHandle.getMetaStateHandle());
	// read meta data
	List<StateMetaInfoSnapshot> stateMetaInfoSnapshots = serializationProxy.getStateMetaInfoSnapshots();

	List<ColumnFamilyDescriptor> columnFamilyDescriptors =
		createAndRegisterColumnFamilyDescriptors(stateMetaInfoSnapshots, false);

	List<ColumnFamilyHandle> columnFamilyHandles =
		new ArrayList<>(stateMetaInfoSnapshots.size() + 1);

	RocksDB restoreDb = RocksDBOperationUtils.openDB(
		temporaryRestoreInstancePath.getPath(),
		columnFamilyDescriptors,
		columnFamilyHandles,
		RocksDBOperationUtils.createColumnFamilyOptions(columnFamilyOptionsFactory, "default"),
		dbOptions);

	return new RestoredDBInstance(restoreDb, columnFamilyHandles, columnFamilyDescriptors, stateMetaInfoSnapshots);
}
 
Example #21
Source File: WindowedRocksDbHdfsState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void putBatch(TimeWindow window, Map<K, V> batch) {
    try {
        ColumnFamilyHandle handler = getColumnFamilyHandle(window);
        WriteBatch writeBatch = new WriteBatch();
        for (Map.Entry<K, V> entry : batch.entrySet()) {
            writeBatch.put(handler, serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue()));
        }
        rocksDb.write(new WriteOptions(), writeBatch);
    } catch (RocksDBException e) {
        LOG.error("Failed to put batch={} for window={}", batch, window);
        throw new RuntimeException(e.getMessage());
    }
}
 
Example #22
Source File: TestRocksDBStore.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Before
public void setUpStore() {
  ColumnFamilyHandle handle = rocksDBResource.get().getDefaultColumnFamily();
  final RocksMetaManager blobManager = new RocksMetaManager(rocksDBResource.dbPath, "test", BLOB_FILTER_SIZE);
  store = new RocksDBStore("test", new ColumnFamilyDescriptor("test".getBytes(UTF_8)), handle, rocksDBResource.get(), 4, blobManager);

  // Making sure test is repeatable
  Random random = new Random(42);
  for(int i = 0; i < 1 << 16; i++ ) {
    store.put(newRandomValue(random), newRandomValue(random));
  }
  store.put(specialKey, newRandomValue(random));
}
 
Example #23
Source File: RocksDBNativeMetricMonitor.java    From flink with Apache License 2.0 5 votes vote down vote up
private RocksDBNativeMetricView(
	ColumnFamilyHandle handle,
	@Nonnull String property
) {
	this.handle = handle;
	this.property = property;
	this.bigInteger = BigInteger.ZERO;
	this.closed = false;
}
 
Example #24
Source File: RocksDBValueState.java    From flink with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBValueState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		registerResult.f1.getStateSerializer(),
		stateDesc.getDefaultValue(),
		backend);
}
 
Example #25
Source File: Index.java    From outbackcdx with Apache License 2.0 5 votes vote down vote up
public Index(String name, RocksDB db, ColumnFamilyHandle defaultCF, ColumnFamilyHandle aliasCF, AccessControl accessControl, long scanCap, UrlCanonicalizer canonicalizer) {
    this.name = name;
    this.db = db;
    this.defaultCF = defaultCF;
    this.aliasCF = aliasCF;
    this.accessControl = accessControl;
    this.scanCap = scanCap;
    this.canonicalizer = canonicalizer;
}
 
Example #26
Source File: RocksDBListState.java    From flink with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <E, K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBListState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		(TypeSerializer<List<E>>) registerResult.f1.getStateSerializer(),
		(List<E>) stateDesc.getDefaultValue(),
		backend);
}
 
Example #27
Source File: RocksDBValueState.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBValueState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		registerResult.f1.getStateSerializer(),
		stateDesc.getDefaultValue(),
		backend);
}
 
Example #28
Source File: RocksDBRestoreResult.java    From flink with Apache License 2.0 5 votes vote down vote up
public RocksDBRestoreResult(
	RocksDB db,
	ColumnFamilyHandle defaultColumnFamilyHandle,
	RocksDBNativeMetricMonitor nativeMetricMonitor,
	long lastCompletedCheckpointId,
	UUID backendUID,
	SortedMap<Long, Set<StateHandleID>> restoredSstFiles) {
	this.db = db;
	this.defaultColumnFamilyHandle = defaultColumnFamilyHandle;
	this.nativeMetricMonitor = nativeMetricMonitor;
	this.lastCompletedCheckpointId = lastCompletedCheckpointId;
	this.backendUID = backendUID;
	this.restoredSstFiles = restoredSstFiles;
}
 
Example #29
Source File: RocksDBLogStorage.java    From sofa-jraft with Apache License 2.0 5 votes vote down vote up
private void openDB(final List<ColumnFamilyDescriptor> columnFamilyDescriptors) throws RocksDBException {
    final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();

    final File dir = new File(this.path);
    if (dir.exists() && !dir.isDirectory()) {
        throw new IllegalStateException("Invalid log path, it's a regular file: " + this.path);
    }
    this.db = RocksDB.open(this.dbOptions, this.path, columnFamilyDescriptors, columnFamilyHandles);

    assert (columnFamilyHandles.size() == 2);
    this.confHandle = columnFamilyHandles.get(0);
    this.defaultHandle = columnFamilyHandles.get(1);
}
 
Example #30
Source File: RocksDBIncrementalRestoreOperation.java    From flink with Apache License 2.0 5 votes vote down vote up
private RestoredDBInstance(
	@Nonnull RocksDB db,
	@Nonnull List<ColumnFamilyHandle> columnFamilyHandles,
	@Nonnull List<ColumnFamilyDescriptor> columnFamilyDescriptors,
	@Nonnull List<StateMetaInfoSnapshot> stateMetaInfoSnapshots) {
	this.db = db;
	this.defaultColumnFamilyHandle = columnFamilyHandles.remove(0);
	this.columnFamilyHandles = columnFamilyHandles;
	this.columnFamilyDescriptors = columnFamilyDescriptors;
	this.stateMetaInfoSnapshots = stateMetaInfoSnapshots;
	this.readOptions = RocksDBOperationUtils.createTotalOrderSeekReadOptions();
}