org.apache.flink.runtime.state.OperatorStateHandle Java Examples

The following examples show how to use org.apache.flink.runtime.state.OperatorStateHandle. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskStateSnapshotTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void hasState() {
	Random random = new Random(0x42);
	TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
	Assert.assertFalse(taskStateSnapshot.hasState());

	OperatorSubtaskState emptyOperatorSubtaskState = new OperatorSubtaskState();
	Assert.assertFalse(emptyOperatorSubtaskState.hasState());
	taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), emptyOperatorSubtaskState);
	Assert.assertFalse(taskStateSnapshot.hasState());

	OperatorStateHandle stateHandle = StateHandleDummyUtil.createNewOperatorStateHandle(2, random);
	OperatorSubtaskState nonEmptyOperatorSubtaskState = new OperatorSubtaskState(
		stateHandle,
		null,
		null,
		null,
		null,
		null
	);

	Assert.assertTrue(nonEmptyOperatorSubtaskState.hasState());
	taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), nonEmptyOperatorSubtaskState);
	Assert.assertTrue(taskStateSnapshot.hasState());
}
 
Example #2
Source File: StateAssignmentOperationTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testRepartitionUnionState() {
	OperatorID operatorID = new OperatorID();
	OperatorState operatorState = new OperatorState(operatorID, 2, 4);

	Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap1 = new HashMap<>(2);
	metaInfoMap1.put("t-3", new OperatorStateHandle.StateMetaInfo(new long[]{0}, OperatorStateHandle.Mode.UNION));
	metaInfoMap1.put("t-4", new OperatorStateHandle.StateMetaInfo(new long[]{22, 44}, OperatorStateHandle.Mode.UNION));
	OperatorStateHandle osh1 = new OperatorStreamStateHandle(metaInfoMap1, new ByteStreamStateHandle("test1", new byte[50]));
	operatorState.putState(0, new OperatorSubtaskState(osh1, null, null, null, null, null));

	Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap2 = new HashMap<>(1);
	metaInfoMap2.put("t-3", new OperatorStateHandle.StateMetaInfo(new long[]{0}, OperatorStateHandle.Mode.UNION));
	OperatorStateHandle osh2 = new OperatorStreamStateHandle(metaInfoMap2, new ByteStreamStateHandle("test2", new byte[20]));
	operatorState.putState(1, new OperatorSubtaskState(osh2, null, null, null, null, null));

	verifyOneKindPartitionableStateRescale(operatorState, operatorID);
}
 
Example #3
Source File: MetadataV2V3SerializerBase.java    From flink with Apache License 2.0 6 votes vote down vote up
void serializeOperatorStateHandle(OperatorStateHandle stateHandle, DataOutputStream dos) throws IOException {
	if (stateHandle != null) {
		dos.writeByte(PARTITIONABLE_OPERATOR_STATE_HANDLE);
		Map<String, OperatorStateHandle.StateMetaInfo> partitionOffsetsMap =
				stateHandle.getStateNameToPartitionOffsets();
		dos.writeInt(partitionOffsetsMap.size());
		for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> entry : partitionOffsetsMap.entrySet()) {
			dos.writeUTF(entry.getKey());

			OperatorStateHandle.StateMetaInfo stateMetaInfo = entry.getValue();

			int mode = stateMetaInfo.getDistributionMode().ordinal();
			dos.writeByte(mode);

			long[] offsets = stateMetaInfo.getOffsets();
			dos.writeInt(offsets.length);
			for (long offset : offsets) {
				dos.writeLong(offset);
			}
		}
		serializeStreamStateHandle(stateHandle.getDelegateStateHandle(), dos);
	} else {
		dos.writeByte(NULL_HANDLE);
	}
}
 
Example #4
Source File: RocksDBStateBackend.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws Exception {

	//the default for RocksDB; eventually there can be a operator state backend based on RocksDB, too.
	final boolean asyncSnapshots = true;
	return new DefaultOperatorStateBackendBuilder(
		env.getUserClassLoader(),
		env.getExecutionConfig(),
		asyncSnapshots,
		stateHandles,
		cancelStreamRegistry).build();
}
 
Example #5
Source File: OperatorSubtaskState.java    From flink with Apache License 2.0 6 votes vote down vote up
public OperatorSubtaskState(
	@Nonnull StateObjectCollection<OperatorStateHandle> managedOperatorState,
	@Nonnull StateObjectCollection<OperatorStateHandle> rawOperatorState,
	@Nonnull StateObjectCollection<KeyedStateHandle> managedKeyedState,
	@Nonnull StateObjectCollection<KeyedStateHandle> rawKeyedState) {

	this.managedOperatorState = Preconditions.checkNotNull(managedOperatorState);
	this.rawOperatorState = Preconditions.checkNotNull(rawOperatorState);
	this.managedKeyedState = Preconditions.checkNotNull(managedKeyedState);
	this.rawKeyedState = Preconditions.checkNotNull(rawKeyedState);

	long calculateStateSize = managedOperatorState.getStateSize();
	calculateStateSize += rawOperatorState.getStateSize();
	calculateStateSize += managedKeyedState.getStateSize();
	calculateStateSize += rawKeyedState.getStateSize();
	stateSize = calculateStateSize;
}
 
Example #6
Source File: OperatorStateInputFormat.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(OperatorStateInputSplit split) throws IOException {
	registry = new CloseableRegistry();

	final BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> backendRestorer =
		new BackendRestorerProcedure<>(
			(handles) -> createOperatorStateBackend(getRuntimeContext(), handles, registry),
			registry,
			operatorState.getOperatorID().toString()
		);

	try {
		restoredBackend = backendRestorer.createAndRestore(split.getPrioritizedManagedOperatorState());
	} catch (Exception exception) {
		throw new IOException("Failed to restore state backend", exception);
	}

	try {
		elements = getElements(restoredBackend).iterator();
	} catch (Exception e) {
		throw new IOException("Failed to read operator state from restored state backend", e);
	}
}
 
Example #7
Source File: CheckpointCoordinatorTestingUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static OperatorStateHandle generatePartitionableStateHandle(
	JobVertexID jobVertexID,
	int index,
	int namedStates,
	int partitionsPerState,
	boolean rawState) throws IOException {

	Map<String, List<? extends Serializable>> statesListsMap = new HashMap<>(namedStates);

	for (int i = 0; i < namedStates; ++i) {
		List<Integer> testStatesLists = new ArrayList<>(partitionsPerState);
		// generate state
		int seed = jobVertexID.hashCode() * index + i * namedStates;
		if (rawState) {
			seed = (seed + 1) * 31;
		}
		Random random = new Random(seed);
		for (int j = 0; j < partitionsPerState; ++j) {
			int simulatedStateValue = random.nextInt();
			testStatesLists.add(simulatedStateValue);
		}
		statesListsMap.put("state-" + i, testStatesLists);
	}

	return generatePartitionableStateHandle(statesListsMap);
}
 
Example #8
Source File: OperatorStateInputFormat.java    From flink with Apache License 2.0 6 votes vote down vote up
private static OperatorStateBackend createOperatorStateBackend(
	RuntimeContext runtimeContext,
	Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry){

	try {
		return new DefaultOperatorStateBackendBuilder(
			runtimeContext.getUserCodeClassLoader(),
			runtimeContext.getExecutionConfig(),
			false,
			stateHandles,
			cancelStreamRegistry
		).build();
	} catch (BackendBuildingException e) {
		throw new RuntimeException(e);
	}
}
 
Example #9
Source File: StateAssignmentOperation.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static Map<OperatorInstanceID, List<OperatorStateHandle>> applyRepartitioner(
	OperatorID operatorID,
	OperatorStateRepartitioner opStateRepartitioner,
	List<List<OperatorStateHandle>> chainOpParallelStates,
	int oldParallelism,
	int newParallelism) {

	List<List<OperatorStateHandle>> states = applyRepartitioner(
		opStateRepartitioner,
		chainOpParallelStates,
		oldParallelism,
		newParallelism);

	Map<OperatorInstanceID, List<OperatorStateHandle>> result = new HashMap<>(states.size());

	for (int subtaskIndex = 0; subtaskIndex < states.size(); subtaskIndex++) {
		checkNotNull(states.get(subtaskIndex) != null, "states.get(subtaskIndex) is null");
		result.put(OperatorInstanceID.of(subtaskIndex, operatorID), states.get(subtaskIndex));
	}

	return result;
}
 
Example #10
Source File: StateAssignmentOperation.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Repartitions the given operator state using the given {@link OperatorStateRepartitioner} with respect to the new
 * parallelism.
 *
 * @param opStateRepartitioner  partitioner to use
 * @param chainOpParallelStates state to repartition
 * @param oldParallelism        parallelism with which the state is currently partitioned
 * @param newParallelism        parallelism with which the state should be partitioned
 * @return repartitioned state
 */
// TODO rewrite based on operator id
public static List<List<OperatorStateHandle>> applyRepartitioner(
	OperatorStateRepartitioner opStateRepartitioner,
	List<List<OperatorStateHandle>> chainOpParallelStates,
	int oldParallelism,
	int newParallelism) {

	if (chainOpParallelStates == null) {
		return Collections.emptyList();
	}

	return opStateRepartitioner.repartitionState(
		chainOpParallelStates,
		oldParallelism,
		newParallelism);
	}
 
Example #11
Source File: StateHandleDummyUtil.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new test {@link OperatorStreamStateHandle} with a given number of randomly created named states.
 */
public static OperatorStateHandle createNewOperatorStateHandle(int numNamedStates, Random random) {
	Map<String, OperatorStateHandle.StateMetaInfo> operatorStateMetaData = new HashMap<>(numNamedStates);
	byte[] streamData = new byte[numNamedStates * 4];
	random.nextBytes(streamData);
	long off = 0;
	for (int i = 0; i < numNamedStates; ++i) {
		long[] offsets = new long[4];
		for (int o = 0; o < offsets.length; ++o) {
			offsets[o] = off++;
		}
		OperatorStateHandle.StateMetaInfo metaInfo =
			new OperatorStateHandle.StateMetaInfo(offsets, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);
		operatorStateMetaData.put(String.valueOf(UUID.randomUUID()), metaInfo);
	}
	ByteStreamStateHandle byteStreamStateHandle =
		new ByteStreamStateHandle(String.valueOf(UUID.randomUUID()), streamData);
	return new OperatorStreamStateHandle(operatorStateMetaData, byteStreamStateHandle);
}
 
Example #12
Source File: StateHandleDummyUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a deep copy of the given {@link OperatorStreamStateHandle}.
 */
public static OperatorStateHandle deepDummyCopy(OperatorStateHandle original) {

	if (original == null) {
		return null;
	}

	ByteStreamStateHandle stateHandleCopy = cloneByteStreamStateHandle((ByteStreamStateHandle) original.getDelegateStateHandle());
	Map<String, OperatorStateHandle.StateMetaInfo> offsets = original.getStateNameToPartitionOffsets();
	Map<String, OperatorStateHandle.StateMetaInfo> offsetsCopy = new HashMap<>(offsets.size());

	for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> entry : offsets.entrySet()) {
		OperatorStateHandle.StateMetaInfo metaInfo = entry.getValue();
		OperatorStateHandle.StateMetaInfo metaInfoCopy =
			new OperatorStateHandle.StateMetaInfo(metaInfo.getOffsets(), metaInfo.getDistributionMode());
		offsetsCopy.put(String.valueOf(entry.getKey()), metaInfoCopy);
	}
	return new OperatorStreamStateHandle(offsetsCopy, stateHandleCopy);
}
 
Example #13
Source File: CheckpointCoordinatorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ChainedStateHandle<OperatorStateHandle> generateChainedPartitionableStateHandle(
		JobVertexID jobVertexID,
		int index,
		int namedStates,
		int partitionsPerState,
		boolean rawState) throws IOException {

	Map<String, List<? extends Serializable>> statesListsMap = new HashMap<>(namedStates);

	for (int i = 0; i < namedStates; ++i) {
		List<Integer> testStatesLists = new ArrayList<>(partitionsPerState);
		// generate state
		int seed = jobVertexID.hashCode() * index + i * namedStates;
		if (rawState) {
			seed = (seed + 1) * 31;
		}
		Random random = new Random(seed);
		for (int j = 0; j < partitionsPerState; ++j) {
			int simulatedStateValue = random.nextInt();
			testStatesLists.add(simulatedStateValue);
		}
		statesListsMap.put("state-" + i, testStatesLists);
	}

	return ChainedStateHandle.wrapSingleHandle(generatePartitionableStateHandle(statesListsMap));
}
 
Example #14
Source File: StateAssignmentOperationTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testRepartitionSplitDistributeStates() {
	OperatorID operatorID = new OperatorID();
	OperatorState operatorState = new OperatorState(operatorID, 2, 4);

	Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap1 = new HashMap<>(1);
	metaInfoMap1.put("t-1", new OperatorStateHandle.StateMetaInfo(new long[]{0, 10}, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
	OperatorStateHandle osh1 = new OperatorStreamStateHandle(metaInfoMap1, new ByteStreamStateHandle("test1", new byte[30]));
	operatorState.putState(0, new OperatorSubtaskState(osh1, null, null, null, null, null));

	Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap2 = new HashMap<>(1);
	metaInfoMap2.put("t-2", new OperatorStateHandle.StateMetaInfo(new long[]{0, 15}, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
	OperatorStateHandle osh2 = new OperatorStreamStateHandle(metaInfoMap2, new ByteStreamStateHandle("test2", new byte[40]));
	operatorState.putState(1, new OperatorSubtaskState(osh2, null, null, null, null, null));

	verifyOneKindPartitionableStateRescale(operatorState, operatorID);
}
 
Example #15
Source File: MetadataV2V3SerializerBase.java    From flink with Apache License 2.0 6 votes vote down vote up
protected OperatorSubtaskState deserializeSubtaskState(
		DataInputStream dis,
		@Nullable DeserializationContext context) throws IOException {

	final boolean hasManagedOperatorState = dis.readInt() != 0;
	final OperatorStateHandle managedOperatorState = hasManagedOperatorState ? deserializeOperatorStateHandle(dis, context) : null;

	final boolean hasRawOperatorState = dis.readInt() != 0;
	final OperatorStateHandle rawOperatorState = hasRawOperatorState ? deserializeOperatorStateHandle(dis, context) : null;

	final KeyedStateHandle managedKeyedState = deserializeKeyedStateHandle(dis, context);
	final KeyedStateHandle rawKeyedState = deserializeKeyedStateHandle(dis, context);

	StateObjectCollection<InputChannelStateHandle> inputChannelState = deserializeInputChannelStateHandle(dis, context);

	StateObjectCollection<ResultSubpartitionStateHandle> resultSubpartitionState = deserializeResultSubpartitionStateHandle(dis, context);

	return new OperatorSubtaskState(
		managedOperatorState,
		rawOperatorState,
		managedKeyedState,
		rawKeyedState,
		inputChannelState,
		resultSubpartitionState);
}
 
Example #16
Source File: StateAssignmentOperationTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testRepartitionBroadcastState() {
	OperatorID operatorID = new OperatorID();
	OperatorState operatorState = new OperatorState(operatorID, 2, 4);

	Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap1 = new HashMap<>(2);
	metaInfoMap1.put("t-5", new OperatorStateHandle.StateMetaInfo(new long[]{0, 10, 20}, OperatorStateHandle.Mode.BROADCAST));
	metaInfoMap1.put("t-6", new OperatorStateHandle.StateMetaInfo(new long[]{30, 40, 50}, OperatorStateHandle.Mode.BROADCAST));
	OperatorStateHandle osh1 = new OperatorStreamStateHandle(metaInfoMap1, new ByteStreamStateHandle("test1", new byte[60]));
	operatorState.putState(0, new OperatorSubtaskState(osh1, null, null, null));

	Map<String, OperatorStateHandle.StateMetaInfo> metaInfoMap2 = new HashMap<>(2);
	metaInfoMap2.put("t-5", new OperatorStateHandle.StateMetaInfo(new long[]{0, 10, 20}, OperatorStateHandle.Mode.BROADCAST));
	metaInfoMap2.put("t-6", new OperatorStateHandle.StateMetaInfo(new long[]{30, 40, 50}, OperatorStateHandle.Mode.BROADCAST));
	OperatorStateHandle osh2 = new OperatorStreamStateHandle(metaInfoMap2, new ByteStreamStateHandle("test2", new byte[60]));
	operatorState.putState(1, new OperatorSubtaskState(osh2, null, null, null));

	verifyOneKindPartitionableStateRescale(operatorState, operatorID);
}
 
Example #17
Source File: TaskStateSnapshotTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void hasState() {
	Random random = new Random(0x42);
	TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
	Assert.assertFalse(taskStateSnapshot.hasState());

	OperatorSubtaskState emptyOperatorSubtaskState = new OperatorSubtaskState();
	Assert.assertFalse(emptyOperatorSubtaskState.hasState());
	taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), emptyOperatorSubtaskState);
	Assert.assertFalse(taskStateSnapshot.hasState());

	OperatorStateHandle stateHandle = StateHandleDummyUtil.createNewOperatorStateHandle(2, random);
	OperatorSubtaskState nonEmptyOperatorSubtaskState = new OperatorSubtaskState(
		stateHandle,
		null,
		null,
		null
	);

	Assert.assertTrue(nonEmptyOperatorSubtaskState.hasState());
	taskStateSnapshot.putSubtaskStateByOperatorID(new OperatorID(), nonEmptyOperatorSubtaskState);
	Assert.assertTrue(taskStateSnapshot.hasState());
}
 
Example #18
Source File: MemoryStateBackend.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws Exception {

	return new DefaultOperatorStateBackendBuilder(
		env.getUserClassLoader(),
		env.getExecutionConfig(),
		isUsingAsynchronousSnapshots(),
		stateHandles,
		cancelStreamRegistry).build();
}
 
Example #19
Source File: MockStateBackend.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) {
	throw new UnsupportedOperationException();
}
 
Example #20
Source File: CheckpointCoordinatorTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static void collectResult(int opIdx, OperatorStateHandle operatorStateHandle, List<String> resultCollector) throws Exception {
	try (FSDataInputStream in = operatorStateHandle.openInputStream()) {
		for (Map.Entry<String, OperatorStateHandle.StateMetaInfo> entry : operatorStateHandle.getStateNameToPartitionOffsets().entrySet()) {
			for (long offset : entry.getValue().getOffsets()) {
				in.seek(offset);
				Integer state = InstantiationUtil.
						deserializeObject(in, Thread.currentThread().getContextClassLoader());
				resultCollector.add(opIdx + " : " + entry.getKey() + " : " + state);
			}
		}
	}
}
 
Example #21
Source File: BackendRestorerProcedureTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests if there is an exception if all restore attempts are exhausted and failed.
 */
@Test
public void testExceptionThrownIfAllRestoresFailed() throws Exception {

	CloseableRegistry closeableRegistry = new CloseableRegistry();

	OperatorStateHandle firstFailHandle = mock(OperatorStateHandle.class);
	OperatorStateHandle secondFailHandle = mock(OperatorStateHandle.class);
	OperatorStateHandle thirdFailHandle = mock(OperatorStateHandle.class);

	List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions = Arrays.asList(
		new StateObjectCollection<>(Collections.singletonList(firstFailHandle)),
		new StateObjectCollection<>(Collections.singletonList(secondFailHandle)),
		new StateObjectCollection<>(Collections.singletonList(thirdFailHandle)));

	BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure =
		new BackendRestorerProcedure<>(backendSupplier, closeableRegistry, "test op state backend");

	try {
		restorerProcedure.createAndRestore(sortedRestoreOptions);
		Assert.fail();
	} catch (Exception ignore) {
	}

	verify(firstFailHandle).openInputStream();
	verify(secondFailHandle).openInputStream();
	verify(thirdFailHandle).openInputStream();
}
 
Example #22
Source File: StreamTaskStateInitializerImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
protected OperatorStateBackend operatorStateBackend(
	String operatorIdentifierText,
	PrioritizedOperatorSubtaskState prioritizedOperatorSubtaskStates,
	CloseableRegistry backendCloseableRegistry) throws Exception {

	String logDescription = "operator state backend for " + operatorIdentifierText;

	// Now restore processing is included in backend building/constructing process, so we need to make sure
	// each stream constructed in restore could also be closed in case of task cancel, for example the data
	// input stream opened for serDe during restore.
	CloseableRegistry cancelStreamRegistryForRestore = new CloseableRegistry();
	backendCloseableRegistry.registerCloseable(cancelStreamRegistryForRestore);
	BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> backendRestorer =
		new BackendRestorerProcedure<>(
			(stateHandles) -> stateBackend.createOperatorStateBackend(
				environment,
				operatorIdentifierText,
				stateHandles,
				cancelStreamRegistryForRestore),
			backendCloseableRegistry,
			logDescription);

	try {
		return backendRestorer.createAndRestore(
			prioritizedOperatorSubtaskStates.getPrioritizedManagedOperatorState());
	} finally {
		if (backendCloseableRegistry.unregisterCloseable(cancelStreamRegistryForRestore)) {
			IOUtils.closeQuietly(cancelStreamRegistryForRestore);
		}
	}
}
 
Example #23
Source File: RoundRobinOperatorStateRepartitioner.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Repartition all named states.
 */
private List<Map<StreamStateHandle, OperatorStateHandle>> repartition(
		GroupByStateNameResults nameToStateByMode,
		int newParallelism) {

	// We will use this to merge w.r.t. StreamStateHandles for each parallel subtask inside the maps
	List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(newParallelism);

	// Initialize
	for (int i = 0; i < newParallelism; ++i) {
		mergeMapList.add(new HashMap<>());
	}

	// Start with the state handles we distribute round robin by splitting by offsets
	Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState =
			nameToStateByMode.getByMode(OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);

	repartitionSplitState(nameToDistributeState, newParallelism, mergeMapList);

	// Now we also add the state handles marked for union to all parallel instances
	Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToUnionState =
			nameToStateByMode.getByMode(OperatorStateHandle.Mode.UNION);

	repartitionUnionState(nameToUnionState, mergeMapList);

	// Now we also add the state handles marked for uniform broadcast to all parallel instances
	Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToBroadcastState =
			nameToStateByMode.getByMode(OperatorStateHandle.Mode.BROADCAST);

	repartitionBroadcastState(nameToBroadcastState, mergeMapList);

	return mergeMapList;
}
 
Example #24
Source File: MetadataV2V3SerializerBase.java    From flink with Apache License 2.0 5 votes vote down vote up
OperatorStateHandle deserializeOperatorStateHandle(
		DataInputStream dis,
		@Nullable DeserializationContext context) throws IOException {

	final int type = dis.readByte();
	if (NULL_HANDLE == type) {
		return null;
	} else if (PARTITIONABLE_OPERATOR_STATE_HANDLE == type) {
		int mapSize = dis.readInt();
		Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>(mapSize);
		for (int i = 0; i < mapSize; ++i) {
			String key = dis.readUTF();

			int modeOrdinal = dis.readByte();
			OperatorStateHandle.Mode mode = OperatorStateHandle.Mode.values()[modeOrdinal];

			long[] offsets = new long[dis.readInt()];
			for (int j = 0; j < offsets.length; ++j) {
				offsets[j] = dis.readLong();
			}

			OperatorStateHandle.StateMetaInfo metaInfo =
					new OperatorStateHandle.StateMetaInfo(offsets, mode);
			offsetsMap.put(key, metaInfo);
		}
		StreamStateHandle stateHandle = deserializeStreamStateHandle(dis, context);
		return new OperatorStreamStateHandle(offsetsMap, stateHandle);
	} else {
		throw new IllegalStateException("Reading invalid OperatorStateHandle, type: " + type);
	}
}
 
Example #25
Source File: RoundRobinOperatorStateRepartitioner.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Repartition all named states.
 */
private List<Map<StreamStateHandle, OperatorStateHandle>> repartition(
		GroupByStateNameResults nameToStateByMode,
		int newParallelism) {

	// We will use this to merge w.r.t. StreamStateHandles for each parallel subtask inside the maps
	List<Map<StreamStateHandle, OperatorStateHandle>> mergeMapList = new ArrayList<>(newParallelism);

	// Initialize
	for (int i = 0; i < newParallelism; ++i) {
		mergeMapList.add(new HashMap<>());
	}

	// Start with the state handles we distribute round robin by splitting by offsets
	Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToDistributeState =
			nameToStateByMode.getByMode(OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);

	repartitionSplitState(nameToDistributeState, newParallelism, mergeMapList);

	// Now we also add the state handles marked for union to all parallel instances
	Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToUnionState =
			nameToStateByMode.getByMode(OperatorStateHandle.Mode.UNION);

	repartitionUnionState(nameToUnionState, mergeMapList);

	// Now we also add the state handles marked for uniform broadcast to all parallel instances
	Map<String, List<Tuple2<StreamStateHandle, OperatorStateHandle.StateMetaInfo>>> nameToBroadcastState =
			nameToStateByMode.getByMode(OperatorStateHandle.Mode.BROADCAST);

	repartitionBroadcastState(nameToBroadcastState, mergeMapList);

	return mergeMapList;
}
 
Example #26
Source File: TaskCheckpointingBehaviourTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws Exception {
	return new DefaultOperatorStateBackendBuilder(
		env.getUserClassLoader(),
		env.getExecutionConfig(),
		true,
		stateHandles,
		cancelStreamRegistry) {
		@Override
		@SuppressWarnings("unchecked")
		public DefaultOperatorStateBackend build() {
			return new DefaultOperatorStateBackend(
				executionConfig,
				cancelStreamRegistry,
				new HashMap<>(),
				new HashMap<>(),
				new HashMap<>(),
				new HashMap<>(),
				mock(AbstractSnapshotStrategy.class)
			) {
				@Nonnull
				@Override
				public RunnableFuture<SnapshotResult<OperatorStateHandle>> snapshot(
					long checkpointId,
					long timestamp,
					@Nonnull CheckpointStreamFactory streamFactory,
					@Nonnull CheckpointOptions checkpointOptions) throws Exception {

					throw new Exception("Sync part snapshot exception.");
				}
			};
		}
	}.build();
}
 
Example #27
Source File: StubStateBackend.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws Exception {
	return backend.createOperatorStateBackend(env, operatorIdentifier, stateHandles, cancelStreamRegistry);
}
 
Example #28
Source File: FsStateBackend.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws BackendBuildingException {

	return new DefaultOperatorStateBackendBuilder(
		env.getUserClassLoader(),
		env.getExecutionConfig(),
		isUsingAsynchronousSnapshots(),
		stateHandles,
		cancelStreamRegistry).build();
}
 
Example #29
Source File: OperatorSnapshotFutures.java    From flink with Apache License 2.0 5 votes vote down vote up
public OperatorSnapshotFutures(
	@Nonnull RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateManagedFuture,
	@Nonnull RunnableFuture<SnapshotResult<KeyedStateHandle>> keyedStateRawFuture,
	@Nonnull RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateManagedFuture,
	@Nonnull RunnableFuture<SnapshotResult<OperatorStateHandle>> operatorStateRawFuture) {
	this.keyedStateManagedFuture = keyedStateManagedFuture;
	this.keyedStateRawFuture = keyedStateRawFuture;
	this.operatorStateManagedFuture = operatorStateManagedFuture;
	this.operatorStateRawFuture = operatorStateRawFuture;
}
 
Example #30
Source File: StateBackendITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public OperatorStateBackend createOperatorStateBackend(
	Environment env,
	String operatorIdentifier,
	@Nonnull Collection<OperatorStateHandle> stateHandles,
	CloseableRegistry cancelStreamRegistry) throws Exception {

	throw new SuccessException();
}