org.rocksdb.ColumnFamilyHandle Java Examples

The following examples show how to use org.rocksdb.ColumnFamilyHandle. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: act   Author: 20n   File: SQLConnection.java    License: GNU General Public License v3.0 6 votes vote down vote up
public Pair<RocksDB, Map<String, ColumnFamilyHandle>> openSupportingIndex(File supportingIndex)
    throws RocksDBException {
  List<FromBrendaDB> instances = BrendaSupportingEntries.allFromBrendaDBInstances();
  List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(instances.size() + 1);
  columnFamilyDescriptors.add(new ColumnFamilyDescriptor("default".getBytes()));
  for (FromBrendaDB instance : instances) {
    columnFamilyDescriptors.add(new ColumnFamilyDescriptor(instance.getColumnFamilyName().getBytes()));
  }
  List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(columnFamilyDescriptors.size());

  DBOptions dbOptions = new DBOptions();
  dbOptions.setCreateIfMissing(false);
  RocksDB rocksDB = RocksDB.open(dbOptions, supportingIndex.getAbsolutePath(),
      columnFamilyDescriptors, columnFamilyHandles);
  Map<String, ColumnFamilyHandle> columnFamilyHandleMap = new HashMap<>(columnFamilyHandles.size());
  // TODO: can we zip these together more easily w/ Java 8?

  for (int i = 0; i < columnFamilyDescriptors.size(); i++) {
    ColumnFamilyDescriptor cfd = columnFamilyDescriptors.get(i);
    ColumnFamilyHandle cfh = columnFamilyHandles.get(i);
    columnFamilyHandleMap.put(new String(cfd.columnFamilyName(), BrendaSupportingEntries.UTF8), cfh);
  }

  return Pair.of(rocksDB, columnFamilyHandleMap);
}
 
Example #2
Source Project: flink   Author: apache   File: RocksDBWriteBatchWrapperTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link RocksDBWriteBatchWrapper} flushes after the kv count exceeds the preconfigured value.
 */
@Test
public void testWriteBatchWrapperFlushAfterCountExceed() throws Exception {
	try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
		WriteOptions options = new WriteOptions().setDisableWAL(true);
		ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
		RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 100, 50000)) {
		long initBatchSize = writeBatchWrapper.getDataSize();
		byte[] dummy = new byte[2];
		ThreadLocalRandom.current().nextBytes(dummy);
		for (int i = 1; i < 100; ++i) {
			writeBatchWrapper.put(handle, dummy, dummy);
			// each kv consumes 8 bytes
			assertEquals(initBatchSize + 8 * i, writeBatchWrapper.getDataSize());
		}
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
	}
}
 
Example #3
Source Project: besu   Author: hyperledger   File: RocksDBColumnarKeyValueStorageTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void twoSegmentsAreIndependent() throws Exception {
  final SegmentedKeyValueStorage<ColumnFamilyHandle> store = createSegmentedStore();

  final Transaction<ColumnFamilyHandle> tx = store.startTransaction();
  tx.put(
      store.getSegmentIdentifierByName(TestSegment.BAR),
      bytesFromHexString("0001"),
      bytesFromHexString("0FFF"));
  tx.commit();

  final Optional<byte[]> result =
      store.get(store.getSegmentIdentifierByName(TestSegment.FOO), bytesFromHexString("0001"));

  assertThat(result).isEmpty();

  store.close();
}
 
Example #4
Source Project: hugegraph   Author: hugegraph   File: RocksDBIngester.java    License: Apache License 2.0 6 votes vote down vote up
public List<String> ingest(Path path, ColumnFamilyHandle cf)
                           throws RocksDBException {
    SuffixFileVisitor visitor = new SuffixFileVisitor(".sst");
    try {
        Files.walkFileTree(path, visitor);
    } catch (IOException e) {
        throw new BackendException("Failed to walk path '%s'", e, path);
    }

    List<Path> files = visitor.files();
    List<String> ssts = new ArrayList<>(files.size());
    for (Path file : files) {
        File sst = file.toFile();
        if (sst.exists() && sst.length() > 0) {
            ssts.add(sst.getPath());
        }
    }
    this.ingest(cf, ssts);

    return ssts;
}
 
Example #5
Source Project: flink   Author: apache   File: RocksDBWriteBatchWrapperTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that {@link RocksDBWriteBatchWrapper} flushes after the memory consumed exceeds the preconfigured value.
 */
@Test
public void testWriteBatchWrapperFlushAfterMemorySizeExceed() throws Exception {
	try (RocksDB db = RocksDB.open(folder.newFolder().getAbsolutePath());
		WriteOptions options = new WriteOptions().setDisableWAL(true);
		ColumnFamilyHandle handle = db.createColumnFamily(new ColumnFamilyDescriptor("test".getBytes()));
		RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db, options, 200, 50)) {

		long initBatchSize = writeBatchWrapper.getDataSize();
		byte[] dummy = new byte[6];
		ThreadLocalRandom.current().nextBytes(dummy);
		// will add 1 + 1 + 1 + 6 + 1 + 6 = 16 bytes for each KV
		// format is [handleType|kvType|keyLen|key|valueLen|value]
		// more information please ref write_batch.cc in RocksDB
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize + 16, writeBatchWrapper.getDataSize());
		writeBatchWrapper.put(handle, dummy, dummy);
		assertEquals(initBatchSize + 32, writeBatchWrapper.getDataSize());
		writeBatchWrapper.put(handle, dummy, dummy);
		// will flush all, then an empty write batch
		assertEquals(initBatchSize, writeBatchWrapper.getDataSize());
	}
}
 
Example #6
Source Project: flink   Author: flink-tpc-ds   File: RocksDBNativeMetricMonitor.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Updates the value of metricView if the reference is still valid.
 */
private void setProperty(ColumnFamilyHandle handle, String property, RocksDBNativeMetricView metricView) {
	if (metricView.isClosed()) {
		return;
	}
	try {
		synchronized (lock) {
			if (rocksDB != null) {
				long value = rocksDB.getLongProperty(handle, property);
				metricView.setValue(value);
			}
		}
	} catch (RocksDBException e) {
		metricView.close();
		LOG.warn("Failed to read native metric %s from RocksDB", property, e);
	}
}
 
Example #7
Source Project: Flink-CEPplus   Author: ljygz   File: RocksDBCachingPriorityQueueSet.java    License: Apache License 2.0 6 votes vote down vote up
RocksDBCachingPriorityQueueSet(
	@Nonnegative int keyGroupId,
	@Nonnegative int keyGroupPrefixBytes,
	@Nonnull RocksDB db,
	@Nonnull ColumnFamilyHandle columnFamilyHandle,
	@Nonnull TypeSerializer<E> byteOrderProducingSerializer,
	@Nonnull DataOutputSerializer outputStream,
	@Nonnull DataInputDeserializer inputStream,
	@Nonnull RocksDBWriteBatchWrapper batchWrapper,
	@Nonnull OrderedByteArraySetCache orderedByteArraySetCache) {
	this.db = db;
	this.columnFamilyHandle = columnFamilyHandle;
	this.byteOrderProducingSerializer = byteOrderProducingSerializer;
	this.batchWrapper = batchWrapper;
	this.outputView = outputStream;
	this.inputView = inputStream;
	this.orderedCache = orderedByteArraySetCache;
	this.allElementsInCache = false;
	this.groupPrefixBytes = createKeyGroupBytes(keyGroupId, keyGroupPrefixBytes);
	this.seekHint = groupPrefixBytes;
	this.internalIndex = HeapPriorityQueueElement.NOT_CONTAINED;
}
 
Example #8
Source Project: act   Author: 20n   File: PubchemTTLMerger.java    License: GNU General Public License v3.0 6 votes vote down vote up
public static AbstractRDFHandler makeHandlerForDataFile(
    Pair<RocksDB, Map<COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles, File file) {
  PC_RDF_DATA_FILE_CONFIG config = getDataTypeForFile(file);
  if (config == null) {
    LOGGER.info("No handler config found for file %s", file.getAbsolutePath());
    return null;
  }
  LOGGER.info("Selected handler type %s for file %s", config.name(), file.getName());

  return new PCRDFHandler(
      dbAndHandles,
      config.columnFamily,
      config.keyType,
      config.valType,
      config.reverseSubjectAndObject,
      config.valueTransformer
  );
}
 
Example #9
Source Project: sofa-jraft   Author: sofastack   File: RocksRawKVStore.java    License: Apache License 2.0 6 votes vote down vote up
void ingestSstFiles(final EnumMap<SstColumnFamily, File> sstFileTable) {
    final Timer.Context timeCtx = getTimeContext("INGEST_SST_FILE");
    final Lock readLock = this.readWriteLock.readLock();
    readLock.lock();
    try {
        for (final Map.Entry<SstColumnFamily, File> entry : sstFileTable.entrySet()) {
            final SstColumnFamily sstColumnFamily = entry.getKey();
            final File sstFile = entry.getValue();
            final ColumnFamilyHandle columnFamilyHandle = findColumnFamilyHandle(sstColumnFamily);
            try (final IngestExternalFileOptions ingestOptions = new IngestExternalFileOptions()) {
                if (FileUtils.sizeOf(sstFile) == 0L) {
                    return;
                }
                final String filePath = sstFile.getAbsolutePath();
                LOG.info("Start ingest sst file {}.", filePath);
                this.db.ingestExternalFile(columnFamilyHandle, Collections.singletonList(filePath), ingestOptions);
            } catch (final RocksDBException e) {
                throw new StorageException("Fail to ingest sst file at path: " + sstFile, e);
            }
        }
    } finally {
        readLock.unlock();
        timeCtx.stop();
    }
}
 
Example #10
Source Project: outbackcdx   Author: nla   File: Index.java    License: Apache License 2.0 5 votes vote down vote up
public Index(String name, RocksDB db, ColumnFamilyHandle defaultCF, ColumnFamilyHandle aliasCF, AccessControl accessControl, long scanCap, UrlCanonicalizer canonicalizer) {
    this.name = name;
    this.db = db;
    this.defaultCF = defaultCF;
    this.aliasCF = aliasCF;
    this.accessControl = accessControl;
    this.scanCap = scanCap;
    this.canonicalizer = canonicalizer;
}
 
Example #11
Source Project: flink   Author: apache   File: RocksDBReducingState.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBReducingState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		registerResult.f1.getStateSerializer(),
		stateDesc.getDefaultValue(),
		((ReducingStateDescriptor<SV>) stateDesc).getReduceFunction(),
		backend);
}
 
Example #12
Source Project: hadoop-ozone   Author: apache   File: RDBStore.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ArrayList<Table> listTables() {
  ArrayList<Table> returnList = new ArrayList<>();
  for (ColumnFamilyHandle handle : handleTable.values()) {
    returnList.add(new RDBTable(db, handle, writeOptions, rdbMetrics));
  }
  return returnList;
}
 
Example #13
Source Project: flink   Author: apache   File: RocksDBReducingState.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code RocksDBReducingState}.
 *
 * @param columnFamily The RocksDB column family that this state is associated to.
 * @param namespaceSerializer The serializer for the namespace.
 * @param valueSerializer The serializer for the state.
 * @param defaultValue The default value for the state.
 * @param reduceFunction The reduce function used for reducing state.
 * @param backend The backend for which this state is bind to.
 */
private RocksDBReducingState(ColumnFamilyHandle columnFamily,
		TypeSerializer<N> namespaceSerializer,
		TypeSerializer<V> valueSerializer,
		V defaultValue,
		ReduceFunction<V> reduceFunction,
		RocksDBKeyedStateBackend<K> backend) {

	super(columnFamily, namespaceSerializer, valueSerializer, defaultValue, backend);
	this.reduceFunction = reduceFunction;
}
 
Example #14
Source Project: iot-mqtt   Author: ShiCloud   File: RDB.java    License: Apache License 2.0 5 votes vote down vote up
private void cacheCFHandles(List<ColumnFamilyHandle> cfHandles) throws RocksDBException {
    if(cfHandles == null || cfHandles.size() == 0){
        log.error("[RocksDB] -> init columnFamilyHandle failure.");
        throw new RocksDBException("init columnFamilyHandle failure");
    }
    for (ColumnFamilyHandle cfHandle : cfHandles) {
        this.CF_HANDLES.put(new String(cfHandle.getName()),cfHandle);
    }
}
 
Example #15
Source Project: DDMQ   Author: didi   File: RDB.java    License: Apache License 2.0 5 votes vote down vote up
public static boolean deleteFilesInRange(final ColumnFamilyHandle cfh, final byte[] beginKey,
                                         final byte[] endKey) {
    try {
        DB.deleteRange(cfh, beginKey, endKey);
        LOGGER.debug("succ delete range, columnFamilyHandle:{}, beginKey:{}, endKey:{}",
                cfh.toString(), new String(beginKey), new String(endKey));
    } catch (RocksDBException e) {
        LOGGER.error("error while delete range, columnFamilyHandle:{}, beginKey:{}, endKey:{}, err:{}",
                cfh.toString(), new String(beginKey), new String(endKey), e.getMessage(), e);
        return false;
    }
    return true;
}
 
Example #16
Source Project: flink   Author: apache   File: RocksDBStateBackendTest.java    License: Apache License 2.0 5 votes vote down vote up
public void prepareRocksDB() throws Exception {
	String dbPath = new File(tempFolder.newFolder(), DB_INSTANCE_DIR_STRING).getAbsolutePath();
	ColumnFamilyOptions columnOptions = optionsContainer.getColumnOptions();

	ArrayList<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(1);
	db = RocksDBOperationUtils.openDB(dbPath, Collections.emptyList(),
		columnFamilyHandles, columnOptions, optionsContainer.getDbOptions());
	defaultCFHandle = columnFamilyHandles.remove(0);
}
 
Example #17
Source Project: nifi   Author: apache   File: RocksDBMetronome.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the value for the provided key in the specified column family
 *
 * @param columnFamilyHandle the column family from which to get the value
 * @param key                the key of the value to retrieve
 * @return the value for the specified key
 * @throws RocksDBException thrown if there is an error in the underlying library.
 */
public byte[] get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key) throws RocksDBException {
    dbReadLock.lock();
    try {
        checkDbState();
        return rocksDB.get(columnFamilyHandle, key);
    } finally {
        dbReadLock.unlock();
    }
}
 
Example #18
Source Project: besu   Author: hyperledger   File: RocksDBColumnarKeyValueStorageTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void canGetThroughSegmentIteration() throws Exception {
  final SegmentedKeyValueStorage<ColumnFamilyHandle> store = createSegmentedStore();
  final ColumnFamilyHandle fooSegment = store.getSegmentIdentifierByName(TestSegment.FOO);
  final ColumnFamilyHandle barSegment = store.getSegmentIdentifierByName(TestSegment.BAR);

  final Transaction<ColumnFamilyHandle> tx = store.startTransaction();
  tx.put(fooSegment, bytesOf(1), bytesOf(1));
  tx.put(fooSegment, bytesOf(2), bytesOf(2));
  tx.put(fooSegment, bytesOf(3), bytesOf(3));
  tx.put(barSegment, bytesOf(4), bytesOf(4));
  tx.put(barSegment, bytesOf(5), bytesOf(5));
  tx.put(barSegment, bytesOf(6), bytesOf(6));
  tx.commit();

  final Set<byte[]> gotFromFoo =
      store.getAllKeysThat(fooSegment, x -> Arrays.equals(x, bytesOf(3)));
  final Set<byte[]> gotFromBar =
      store.getAllKeysThat(
          barSegment, x -> Arrays.equals(x, bytesOf(4)) || Arrays.equals(x, bytesOf(5)));
  final Set<byte[]> gotEmpty =
      store.getAllKeysThat(fooSegment, x -> Arrays.equals(x, bytesOf(0)));

  assertThat(gotFromFoo.size()).isEqualTo(1);
  assertThat(gotFromBar.size()).isEqualTo(2);
  assertThat(gotEmpty).isEmpty();

  assertThat(gotFromFoo).containsExactlyInAnyOrder(bytesOf(3));
  assertThat(gotFromBar).containsExactlyInAnyOrder(bytesOf(4), bytesOf(5));

  store.close();
}
 
Example #19
Source Project: iot-mqtt   Author: ShiCloud   File: RDB.java    License: Apache License 2.0 5 votes vote down vote up
public byte[] get(final ColumnFamilyHandle cfh,final byte[] key){
    try {
        return DB.get(cfh, key);
    } catch (RocksDBException e) {
        log.error("[RocksDB] -> error while get, columnFamilyHandle:{}, key:{}, err:{}",
                cfh.toString(), new String(key), e.getMessage(), e);
        return null;
    }
}
 
Example #20
Source Project: flink   Author: flink-tpc-ds   File: RocksDBAggregatingState.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBAggregatingState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		registerResult.f1.getStateSerializer(),
		stateDesc.getDefaultValue(),
		((AggregatingStateDescriptor<?, SV, ?>) stateDesc).getAggregateFunction(),
		backend);
}
 
Example #21
Source Project: iot-mqtt   Author: ShiCloud   File: RDB.java    License: Apache License 2.0 5 votes vote down vote up
public boolean deleteRange(final ColumnFamilyHandle cfh,final byte[] beginKey,final byte[] endKey){
    try {
        DB.deleteRange(cfh, beginKey, endKey);
        log.debug("[RocksDB] -> succ delete range, columnFamilyHandle:{}, beginKey:{}, endKey:{}",
                cfh.toString(), new String(beginKey), new String(endKey));
    } catch (RocksDBException e) {
        log.error("[RocksDB] ->  error while delete range, columnFamilyHandle:{}, beginKey:{}, endKey:{}, err:{}",
                cfh.toString(), new String(beginKey), new String(endKey), e.getMessage(), e);
        return false;
    }
    return true;
}
 
Example #22
Source Project: flink   Author: apache   File: RocksDBTestUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static <K> RocksDBKeyedStateBackendBuilder<K> builderForTestDB(
		File instanceBasePath,
		TypeSerializer<K> keySerializer,
		RocksDB db,
		ColumnFamilyHandle defaultCFHandle,
		ColumnFamilyOptions columnFamilyOptions) {

	final RocksDBResourceContainer optionsContainer = new RocksDBResourceContainer();

	return new RocksDBKeyedStateBackendBuilder<>(
			"no-op",
			ClassLoader.getSystemClassLoader(),
			instanceBasePath,
			optionsContainer,
			stateName -> columnFamilyOptions,
			new KvStateRegistry().createTaskRegistry(new JobID(), new JobVertexID()),
			keySerializer,
			2,
			new KeyGroupRange(0, 1),
			new ExecutionConfig(),
			TestLocalRecoveryConfig.disabled(),
			RocksDBStateBackend.PriorityQueueStateType.HEAP,
			TtlTimeProvider.DEFAULT,
			new UnregisteredMetricsGroup(),
			Collections.emptyList(),
			UncompressedStreamCompressionDecorator.INSTANCE,
			db,
			defaultCFHandle,
			new CloseableRegistry());
}
 
Example #23
Source Project: Flink-CEPplus   Author: ljygz   File: RocksDBOperationUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static RocksDB openDB(
	String path,
	List<ColumnFamilyDescriptor> stateColumnFamilyDescriptors,
	List<ColumnFamilyHandle> stateColumnFamilyHandles,
	ColumnFamilyOptions columnFamilyOptions,
	DBOptions dbOptions) throws IOException {
	List<ColumnFamilyDescriptor> columnFamilyDescriptors =
		new ArrayList<>(1 + stateColumnFamilyDescriptors.size());

	// we add the required descriptor for the default CF in FIRST position, see
	// https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#opening-a-database-with-column-families
	columnFamilyDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions));
	columnFamilyDescriptors.addAll(stateColumnFamilyDescriptors);

	RocksDB dbRef;

	try {
		dbRef = RocksDB.open(
			Preconditions.checkNotNull(dbOptions),
			Preconditions.checkNotNull(path),
			columnFamilyDescriptors,
			stateColumnFamilyHandles);
	} catch (RocksDBException e) {
		IOUtils.closeQuietly(columnFamilyOptions);
		columnFamilyDescriptors.forEach((cfd) -> IOUtils.closeQuietly(cfd.getOptions()));
		throw new IOException("Error while opening RocksDB instance.", e);
	}

	// requested + default CF
	Preconditions.checkState(1 + stateColumnFamilyDescriptors.size() == stateColumnFamilyHandles.size(),
		"Not all requested column family handles have been created");
	return dbRef;
}
 
Example #24
Source Project: flink   Author: apache   File: RocksDBOperationUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static RocksDB openDB(
	String path,
	List<ColumnFamilyDescriptor> stateColumnFamilyDescriptors,
	List<ColumnFamilyHandle> stateColumnFamilyHandles,
	ColumnFamilyOptions columnFamilyOptions,
	DBOptions dbOptions) throws IOException {
	List<ColumnFamilyDescriptor> columnFamilyDescriptors =
		new ArrayList<>(1 + stateColumnFamilyDescriptors.size());

	// we add the required descriptor for the default CF in FIRST position, see
	// https://github.com/facebook/rocksdb/wiki/RocksJava-Basics#opening-a-database-with-column-families
	columnFamilyDescriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, columnFamilyOptions));
	columnFamilyDescriptors.addAll(stateColumnFamilyDescriptors);

	RocksDB dbRef;

	try {
		dbRef = RocksDB.open(
			Preconditions.checkNotNull(dbOptions),
			Preconditions.checkNotNull(path),
			columnFamilyDescriptors,
			stateColumnFamilyHandles);
	} catch (RocksDBException e) {
		IOUtils.closeQuietly(columnFamilyOptions);
		columnFamilyDescriptors.forEach((cfd) -> IOUtils.closeQuietly(cfd.getOptions()));

		// improve error reporting on Windows
		throwExceptionIfPathLengthExceededOnWindows(path, e);

		throw new IOException("Error while opening RocksDB instance.", e);
	}

	// requested + default CF
	Preconditions.checkState(1 + stateColumnFamilyDescriptors.size() == stateColumnFamilyHandles.size(),
		"Not all requested column family handles have been created");
	return dbRef;
}
 
Example #25
Source Project: flink   Author: apache   File: RocksDBPriorityQueueSetFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Nonnull
@Override
public <T extends HeapPriorityQueueElement & PriorityComparable & Keyed> KeyGroupedInternalPriorityQueue<T>
create(@Nonnull String stateName, @Nonnull TypeSerializer<T> byteOrderedElementSerializer) {

	final RocksDBKeyedStateBackend.RocksDbKvStateInfo stateCFHandle =
		tryRegisterPriorityQueueMetaInfo(stateName, byteOrderedElementSerializer);

	final ColumnFamilyHandle columnFamilyHandle = stateCFHandle.columnFamilyHandle;

	return new KeyGroupPartitionedPriorityQueue<>(
		KeyExtractorFunction.forKeyedObjects(),
		PriorityComparator.forPriorityComparableObjects(),
		new KeyGroupPartitionedPriorityQueue.PartitionQueueSetFactory<T, RocksDBCachingPriorityQueueSet<T>>() {
			@Nonnull
			@Override
			public RocksDBCachingPriorityQueueSet<T> create(
				int keyGroupId,
				int numKeyGroups,
				@Nonnull KeyExtractorFunction<T> keyExtractor,
				@Nonnull PriorityComparator<T> elementPriorityComparator) {
				TreeOrderedSetCache orderedSetCache = new TreeOrderedSetCache(DEFAULT_CACHES_SIZE);
				return new RocksDBCachingPriorityQueueSet<>(
					keyGroupId,
					keyGroupPrefixBytes,
					db,
					readOptions,
					columnFamilyHandle,
					byteOrderedElementSerializer,
					sharedElementOutView,
					sharedElementInView,
					writeBatchWrapper,
					orderedSetCache
				);
			}
		},
		keyGroupRange,
		numberOfKeyGroups);
}
 
Example #26
Source Project: Flink-CEPplus   Author: ljygz   File: RocksDBListState.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <E, K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBListState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		(TypeSerializer<List<E>>) registerResult.f1.getStateSerializer(),
		(List<E>) stateDesc.getDefaultValue(),
		backend);
}
 
Example #27
Source Project: flink   Author: flink-tpc-ds   File: RocksDBRestoreResult.java    License: Apache License 2.0 5 votes vote down vote up
public RocksDBRestoreResult(
	RocksDB db,
	ColumnFamilyHandle defaultColumnFamilyHandle,
	RocksDBNativeMetricMonitor nativeMetricMonitor,
	long lastCompletedCheckpointId,
	UUID backendUID,
	SortedMap<Long, Set<StateHandleID>> restoredSstFiles) {
	this.db = db;
	this.defaultColumnFamilyHandle = defaultColumnFamilyHandle;
	this.nativeMetricMonitor = nativeMetricMonitor;
	this.lastCompletedCheckpointId = lastCompletedCheckpointId;
	this.backendUID = backendUID;
	this.restoredSstFiles = restoredSstFiles;
}
 
Example #28
Source Project: Flink-CEPplus   Author: ljygz   File: RocksDBValueState.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
static <K, N, SV, S extends State, IS extends S> IS create(
	StateDescriptor<S, SV> stateDesc,
	Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>> registerResult,
	RocksDBKeyedStateBackend<K> backend) {
	return (IS) new RocksDBValueState<>(
		registerResult.f0,
		registerResult.f1.getNamespaceSerializer(),
		registerResult.f1.getStateSerializer(),
		stateDesc.getDefaultValue(),
		backend);
}
 
Example #29
Source Project: Flink-CEPplus   Author: ljygz   File: RocksDBRestoreResult.java    License: Apache License 2.0 5 votes vote down vote up
public RocksDBRestoreResult(
	RocksDB db,
	ColumnFamilyHandle defaultColumnFamilyHandle,
	RocksDBNativeMetricMonitor nativeMetricMonitor,
	long lastCompletedCheckpointId,
	UUID backendUID,
	SortedMap<Long, Set<StateHandleID>> restoredSstFiles) {
	this.db = db;
	this.defaultColumnFamilyHandle = defaultColumnFamilyHandle;
	this.nativeMetricMonitor = nativeMetricMonitor;
	this.lastCompletedCheckpointId = lastCompletedCheckpointId;
	this.backendUID = backendUID;
	this.restoredSstFiles = restoredSstFiles;
}
 
Example #30
Source Project: flink   Author: apache   File: RocksDBNativeMetricMonitor.java    License: Apache License 2.0 5 votes vote down vote up
private RocksDBNativeMetricView(
	ColumnFamilyHandle handle,
	@Nonnull String property
) {
	this.handle = handle;
	this.property = property;
	this.bigInteger = BigInteger.ZERO;
	this.closed = false;
}