Java Code Examples for org.apache.flink.runtime.io.network.partition.ResultPartitionType#PIPELINED_BOUNDED

The following examples show how to use org.apache.flink.runtime.io.network.partition.ResultPartitionType#PIPELINED_BOUNDED . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamNetworkBenchmarkEnvironment.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
protected ResultPartitionWriter createResultPartition(
		JobID jobId,
		ResultPartitionID partitionId,
		NetworkEnvironment environment,
		int channels) throws Exception {

	ResultPartition resultPartition = new ResultPartition(
		"sender task",
		new NoOpTaskActions(),
		jobId,
		partitionId,
		ResultPartitionType.PIPELINED_BOUNDED,
		channels,
		1,
		environment.getResultPartitionManager(),
		new NoOpResultPartitionConsumableNotifier(),
		ioManager,
		false);

	environment.setupPartition(resultPartition);

	return resultPartition;
}
 
Example 2
Source File: StreamNetworkBenchmarkEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
private InputGateDeploymentDescriptor createInputGateDeploymentDescriptor(
		TaskManagerLocation senderLocation,
		int consumedSubpartitionIndex,
		ResourceID localLocation) {

	final ShuffleDescriptor[] channelDescriptors = Arrays.stream(partitionIds)
		.map(partitionId ->
			createShuffleDescriptor(localMode, partitionId, localLocation, senderLocation, consumedSubpartitionIndex))
		.toArray(ShuffleDescriptor[]::new);

	return new InputGateDeploymentDescriptor(
		dataSetID,
		ResultPartitionType.PIPELINED_BOUNDED,
		consumedSubpartitionIndex,
		channelDescriptors);
}
 
Example 3
Source File: StreamingJobGraphGenerator.java    From flink with Apache License 2.0 6 votes vote down vote up
private ResultPartitionType determineResultPartitionType(StreamPartitioner<?> partitioner) {
	switch (streamGraph.getGlobalDataExchangeMode()) {
		case ALL_EDGES_BLOCKING:
			return ResultPartitionType.BLOCKING;
		case FORWARD_EDGES_PIPELINED:
			if (partitioner instanceof ForwardPartitioner) {
				return ResultPartitionType.PIPELINED_BOUNDED;
			} else {
				return ResultPartitionType.BLOCKING;
			}
		case POINTWISE_EDGES_PIPELINED:
			if (isPointwisePartitioner(partitioner)) {
				return ResultPartitionType.PIPELINED_BOUNDED;
			} else {
				return ResultPartitionType.BLOCKING;
			}
		case ALL_EDGES_PIPELINED:
			return ResultPartitionType.PIPELINED_BOUNDED;
		default:
			throw new RuntimeException("Unrecognized global data exchange mode " + streamGraph.getGlobalDataExchangeMode());
	}
}
 
Example 4
Source File: StreamNetworkBenchmarkEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
private InputGateDeploymentDescriptor createInputGateDeploymentDescriptor(
		TaskManagerLocation senderLocation,
		int gateIndex,
		ResourceID localLocation) {

	final ShuffleDescriptor[] channelDescriptors = new ShuffleDescriptor[channels];
	for (int channelIndex = 0; channelIndex < channels; ++channelIndex) {
		channelDescriptors[channelIndex] = createShuffleDescriptor(
			localMode, partitionIds[gateIndex], localLocation, senderLocation, channelIndex);
	}

	return new InputGateDeploymentDescriptor(
		dataSetID,
		ResultPartitionType.PIPELINED_BOUNDED,
		// 0 is used because TestRemoteInputChannel and TestLocalInputChannel will
		// ignore this and use channelIndex instead when requesting a subpartition
		0,
		channelDescriptors);
}
 
Example 5
Source File: StreamNetworkBenchmarkEnvironment.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private InputGate createInputGate(
		JobID jobId,
		IntermediateDataSetID dataSetID,
		ExecutionAttemptID executionAttemptID,
		final TaskManagerLocation senderLocation,
		NetworkEnvironment environment,
		final int channels) throws IOException {

	InputGate[] gates = new InputGate[channels];
	for (int channel = 0; channel < channels; ++channel) {
		int finalChannel = channel;
		InputChannelDeploymentDescriptor[] channelDescriptors = Arrays.stream(partitionIds)
			.map(partitionId -> new InputChannelDeploymentDescriptor(
				partitionId,
				localMode ? ResultPartitionLocation.createLocal() : ResultPartitionLocation.createRemote(new ConnectionID(senderLocation, finalChannel))))
			.toArray(InputChannelDeploymentDescriptor[]::new);

		final InputGateDeploymentDescriptor gateDescriptor = new InputGateDeploymentDescriptor(
			dataSetID,
			ResultPartitionType.PIPELINED_BOUNDED,
			channel,
			channelDescriptors);

		SingleInputGate gate = SingleInputGate.create(
			"receiving task[" + channel + "]",
			jobId,
			executionAttemptID,
			gateDescriptor,
			environment,
			new NoOpTaskActions(),
			UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup());

		environment.setupInputGate(gate);
		gates[channel] = gate;
	}

	if (channels > 1) {
		return new UnionInputGate(gates);
	} else {
		return gates[0];
	}
}
 
Example 6
Source File: StreamingJobGraphGenerator.java    From flink with Apache License 2.0 4 votes vote down vote up
private void connect(Integer headOfChain, StreamEdge edge) {

		physicalEdgesInOrder.add(edge);

		Integer downStreamvertexID = edge.getTargetId();

		JobVertex headVertex = jobVertices.get(headOfChain);
		JobVertex downStreamVertex = jobVertices.get(downStreamvertexID);

		StreamConfig downStreamConfig = new StreamConfig(downStreamVertex.getConfiguration());

		downStreamConfig.setNumberOfInputs(downStreamConfig.getNumberOfInputs() + 1);

		StreamPartitioner<?> partitioner = edge.getPartitioner();

		ResultPartitionType resultPartitionType;
		switch (edge.getShuffleMode()) {
			case PIPELINED:
				resultPartitionType = ResultPartitionType.PIPELINED_BOUNDED;
				break;
			case BATCH:
				resultPartitionType = ResultPartitionType.BLOCKING;
				break;
			case UNDEFINED:
				resultPartitionType = streamGraph.isBlockingConnectionsBetweenChains() ?
						ResultPartitionType.BLOCKING : ResultPartitionType.PIPELINED_BOUNDED;
				break;
			default:
				throw new UnsupportedOperationException("Data exchange mode " +
					edge.getShuffleMode() + " is not supported yet.");
		}

		JobEdge jobEdge;
		if (partitioner instanceof ForwardPartitioner || partitioner instanceof RescalePartitioner) {
			jobEdge = downStreamVertex.connectNewDataSetAsInput(
				headVertex,
				DistributionPattern.POINTWISE,
				resultPartitionType);
		} else {
			jobEdge = downStreamVertex.connectNewDataSetAsInput(
					headVertex,
					DistributionPattern.ALL_TO_ALL,
					resultPartitionType);
		}
		// set strategy name so that web interface can show it.
		jobEdge.setShipStrategyName(partitioner.toString());

		if (LOG.isDebugEnabled()) {
			LOG.debug("CONNECTED: {} - {} -> {}", partitioner.getClass().getSimpleName(),
					headOfChain, downStreamvertexID);
		}
	}
 
Example 7
Source File: StreamingJobGraphGenerator.java    From flink with Apache License 2.0 4 votes vote down vote up
private void connect(Integer headOfChain, StreamEdge edge) {

		physicalEdgesInOrder.add(edge);

		Integer downStreamVertexID = edge.getTargetId();

		JobVertex headVertex = jobVertices.get(headOfChain);
		JobVertex downStreamVertex = jobVertices.get(downStreamVertexID);

		StreamConfig downStreamConfig = new StreamConfig(downStreamVertex.getConfiguration());

		downStreamConfig.setNumberOfInputs(downStreamConfig.getNumberOfInputs() + 1);

		StreamPartitioner<?> partitioner = edge.getPartitioner();

		ResultPartitionType resultPartitionType;
		switch (edge.getShuffleMode()) {
			case PIPELINED:
				resultPartitionType = ResultPartitionType.PIPELINED_BOUNDED;
				break;
			case BATCH:
				resultPartitionType = ResultPartitionType.BLOCKING;
				break;
			case UNDEFINED:
				resultPartitionType = determineResultPartitionType(partitioner);
				break;
			default:
				throw new UnsupportedOperationException("Data exchange mode " +
					edge.getShuffleMode() + " is not supported yet.");
		}

		JobEdge jobEdge;
		if (isPointwisePartitioner(partitioner)) {
			jobEdge = downStreamVertex.connectNewDataSetAsInput(
				headVertex,
				DistributionPattern.POINTWISE,
				resultPartitionType);
		} else {
			jobEdge = downStreamVertex.connectNewDataSetAsInput(
					headVertex,
					DistributionPattern.ALL_TO_ALL,
					resultPartitionType);
		}
		// set strategy name so that web interface can show it.
		jobEdge.setShipStrategyName(partitioner.toString());

		if (LOG.isDebugEnabled()) {
			LOG.debug("CONNECTED: {} - {} -> {}", partitioner.getClass().getSimpleName(),
					headOfChain, downStreamVertexID);
		}
	}