org.apache.flink.contrib.streaming.state.RocksDBKeySerializationUtils Java Examples

The following examples show how to use org.apache.flink.contrib.streaming.state.RocksDBKeySerializationUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksStateKeysIterator.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private K deserializeKey(byte[] keyBytes, DataInputDeserializer readView) throws IOException {
	readView.setBuffer(keyBytes, keyGroupPrefixBytes, keyBytes.length - keyGroupPrefixBytes);
	return RocksDBKeySerializationUtils.readKey(
		keySerializer,
		byteArrayDataInputView,
		ambiguousKeyPossible);
}
 
Example #2
Source File: RocksStateKeysIterator.java    From flink with Apache License 2.0 5 votes vote down vote up
private K deserializeKey(byte[] keyBytes, DataInputDeserializer readView) throws IOException {
	readView.setBuffer(keyBytes, keyGroupPrefixBytes, keyBytes.length - keyGroupPrefixBytes);
	return RocksDBKeySerializationUtils.readKey(
		keySerializer,
		byteArrayDataInputView,
		ambiguousKeyPossible);
}
 
Example #3
Source File: RocksStateKeysIterator.java    From flink with Apache License 2.0 5 votes vote down vote up
private K deserializeKey(byte[] keyBytes, DataInputDeserializer readView) throws IOException {
	readView.setBuffer(keyBytes, keyGroupPrefixBytes, keyBytes.length - keyGroupPrefixBytes);
	return RocksDBKeySerializationUtils.readKey(
		keySerializer,
		byteArrayDataInputView,
		ambiguousKeyPossible);
}
 
Example #4
Source File: RocksDBIncrementalRestoreOperation.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary
 * RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the
 * real restore instance and then the temporary instance is discarded.
 */
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception {

	// Prepare for restore with rescaling
	KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial(
		restoreStateHandles, keyGroupRange);

	// Init base DB instance
	if (initialHandle != null) {
		restoreStateHandles.remove(initialHandle);
		initDBWithRescaling(initialHandle);
	} else {
		openDB();
	}

	// Transfer remaining key-groups from temporary instance into base DB
	byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
	RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);

	byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
	RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);

	for (KeyedStateHandle rawStateHandle : restoreStateHandles) {

		if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) {
			throw new IllegalStateException("Unexpected state handle type, " +
				"expected " + IncrementalRemoteKeyedStateHandle.class +
				", but found " + rawStateHandle.getClass());
		}

		Path temporaryRestoreInstancePath = new Path(instanceBasePath.getAbsolutePath() + UUID.randomUUID().toString());
		try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle(
			(IncrementalRemoteKeyedStateHandle) rawStateHandle,
			temporaryRestoreInstancePath);
			RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db)) {

			List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors;
			List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles;

			// iterating only the requested descriptors automatically skips the default column family handle
			for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) {
				ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i);

				ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle(
					null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i))
					.columnFamilyHandle;

				try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle)) {

					iterator.seek(startKeyGroupPrefixBytes);

					while (iterator.isValid()) {

						if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) {
							writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value());
						} else {
							// Since the iterator will visit the record according to the sorted order,
							// we can just break here.
							break;
						}

						iterator.next();
					}
				} // releases native iterator resources
			}
		} finally {
			cleanUpPathQuietly(temporaryRestoreInstancePath);
		}
	}
}
 
Example #5
Source File: RocksDBIncrementalRestoreOperation.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary
 * RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the
 * real restore instance and then the temporary instance is discarded.
 */
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception {

	// Prepare for restore with rescaling
	KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial(
		restoreStateHandles, keyGroupRange);

	// Init base DB instance
	if (initialHandle != null) {
		restoreStateHandles.remove(initialHandle);
		initDBWithRescaling(initialHandle);
	} else {
		openDB();
	}

	// Transfer remaining key-groups from temporary instance into base DB
	byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
	RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);

	byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
	RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);

	for (KeyedStateHandle rawStateHandle : restoreStateHandles) {

		if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) {
			throw new IllegalStateException("Unexpected state handle type, " +
				"expected " + IncrementalRemoteKeyedStateHandle.class +
				", but found " + rawStateHandle.getClass());
		}

		Path temporaryRestoreInstancePath = new Path(instanceBasePath.getAbsolutePath() + UUID.randomUUID().toString());
		try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle(
			(IncrementalRemoteKeyedStateHandle) rawStateHandle,
			temporaryRestoreInstancePath);
			RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db)) {

			List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors;
			List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles;

			// iterating only the requested descriptors automatically skips the default column family handle
			for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) {
				ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i);

				ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle(
					null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i))
					.columnFamilyHandle;

				try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle)) {

					iterator.seek(startKeyGroupPrefixBytes);

					while (iterator.isValid()) {

						if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) {
							writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value());
						} else {
							// Since the iterator will visit the record according to the sorted order,
							// we can just break here.
							break;
						}

						iterator.next();
					}
				} // releases native iterator resources
			}
		} finally {
			cleanUpPathQuietly(temporaryRestoreInstancePath);
		}
	}
}
 
Example #6
Source File: RocksDBIncrementalRestoreOperation.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Recovery from multi incremental states with rescaling. For rescaling, this method creates a temporary
 * RocksDB instance for a key-groups shard. All contents from the temporary instance are copied into the
 * real restore instance and then the temporary instance is discarded.
 */
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception {

	// Prepare for restore with rescaling
	KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial(
		restoreStateHandles, keyGroupRange);

	// Init base DB instance
	if (initialHandle != null) {
		restoreStateHandles.remove(initialHandle);
		initDBWithRescaling(initialHandle);
	} else {
		openDB();
	}

	// Transfer remaining key-groups from temporary instance into base DB
	byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
	RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);

	byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
	RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);

	for (KeyedStateHandle rawStateHandle : restoreStateHandles) {

		if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) {
			throw new IllegalStateException("Unexpected state handle type, " +
				"expected " + IncrementalRemoteKeyedStateHandle.class +
				", but found " + rawStateHandle.getClass());
		}

		Path temporaryRestoreInstancePath = instanceBasePath.getAbsoluteFile().toPath().resolve(UUID.randomUUID().toString());
		try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle(
			(IncrementalRemoteKeyedStateHandle) rawStateHandle,
			temporaryRestoreInstancePath);
			RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db, writeBatchSize)) {

			List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors;
			List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles;

			// iterating only the requested descriptors automatically skips the default column family handle
			for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) {
				ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i);

				ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle(
					null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i))
					.columnFamilyHandle;

				try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle, tmpRestoreDBInfo.readOptions)) {

					iterator.seek(startKeyGroupPrefixBytes);

					while (iterator.isValid()) {

						if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) {
							writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value());
						} else {
							// Since the iterator will visit the record according to the sorted order,
							// we can just break here.
							break;
						}

						iterator.next();
					}
				} // releases native iterator resources
			}
		} finally {
			cleanUpPathQuietly(temporaryRestoreInstancePath);
		}
	}
}