org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter Java Examples

The following examples show how to use org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SubtaskCheckpointCoordinatorImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
private void prepareInflightDataSnapshot(long checkpointId) throws IOException {
	ResultPartitionWriter[] writers = env.getAllWriters();
	for (ResultPartitionWriter writer : writers) {
		for (int i = 0; i < writer.getNumberOfSubpartitions(); i++) {
			ResultSubpartition subpartition = writer.getSubpartition(i);
			channelStateWriter.addOutputData(
				checkpointId,
				subpartition.getSubpartitionInfo(),
				ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN,
				subpartition.requestInflightBufferSnapshot().toArray(new Buffer[0]));
		}
	}
	channelStateWriter.finishOutput(checkpointId);
	prepareInputSnapshot.apply(channelStateWriter, checkpointId)
		.whenComplete((unused, ex) -> {
			if (ex != null) {
				channelStateWriter.abort(checkpointId, ex, false /* result is needed and cleaned by getWriteResult */);
			} else {
				channelStateWriter.finishInput(checkpointId);
			}
		});
}
 
Example #2
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnPartition(final ResultPartitionType partitionType) throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (partitionType.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(eq(jobId), eq(consumableNotifyingPartitionWriter.getPartitionId()), eq(taskActions));
		}
	}
}
 
Example #3
Source File: StreamTask.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static <OUT> RecordWriter<SerializationDelegate<StreamRecord<OUT>>> createRecordWriter(
		StreamEdge edge,
		int outputIndex,
		Environment environment,
		String taskName,
		long bufferTimeout) {
	@SuppressWarnings("unchecked")
	StreamPartitioner<OUT> outputPartitioner = (StreamPartitioner<OUT>) edge.getPartitioner();

	LOG.debug("Using partitioner {} for output {} of task {}", outputPartitioner, outputIndex, taskName);

	ResultPartitionWriter bufferWriter = environment.getWriter(outputIndex);

	// we initialize the partitioner here with the number of key groups (aka max. parallelism)
	if (outputPartitioner instanceof ConfigurableStreamPartitioner) {
		int numKeyGroups = bufferWriter.getNumTargetKeyGroups();
		if (0 < numKeyGroups) {
			((ConfigurableStreamPartitioner) outputPartitioner).configure(numKeyGroups);
		}
	}

	RecordWriter<SerializationDelegate<StreamRecord<OUT>>> output =
		RecordWriter.createRecordWriter(bufferWriter, outputPartitioner, bufferTimeout, taskName);
	output.setMetricGroup(environment.getMetricGroup().getIOMetricGroup());
	return output;
}
 
Example #4
Source File: ConsumableNotifyingResultPartitionWriterDecorator.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ResultPartitionWriter[] decorate(
		Collection<ResultPartitionDeploymentDescriptor> descs,
		ResultPartitionWriter[] partitionWriters,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {

	ResultPartitionWriter[] consumableNotifyingPartitionWriters = new ResultPartitionWriter[partitionWriters.length];
	int counter = 0;
	for (ResultPartitionDeploymentDescriptor desc : descs) {
		if (desc.sendScheduleOrUpdateConsumersMessage() && desc.getPartitionType().isPipelined()) {
			consumableNotifyingPartitionWriters[counter] = new ConsumableNotifyingResultPartitionWriterDecorator(
				taskActions,
				jobId,
				partitionWriters[counter],
				notifier);
		} else {
			consumableNotifyingPartitionWriters[counter] = partitionWriters[counter];
		}
		counter++;
	}
	return consumableNotifyingPartitionWriters;
}
 
Example #5
Source File: StreamNetworkBenchmarkEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
protected ResultPartitionWriter createResultPartition(
		JobID jobId,
		ResultPartitionID partitionId,
		NettyShuffleEnvironment environment,
		int channels) throws Exception {

	ResultPartitionWriter resultPartitionWriter = new ResultPartitionBuilder()
		.setResultPartitionId(partitionId)
		.setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED)
		.setNumberOfSubpartitions(channels)
		.setResultPartitionManager(environment.getResultPartitionManager())
		.setupBufferPoolFactoryFromNettyShuffleEnvironment(environment)
		.build();

	ResultPartitionWriter consumableNotifyingPartitionWriter = new ConsumableNotifyingResultPartitionWriterDecorator(
		new NoOpTaskActions(),
		jobId,
		resultPartitionWriter,
		new NoOpResultPartitionConsumableNotifier());

	consumableNotifyingPartitionWriter.setup();

	return consumableNotifyingPartitionWriter;
}
 
Example #6
Source File: ShuffleCompressionITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void invoke() throws Exception {
	ResultPartitionWriter resultPartitionWriter = getEnvironment().getWriter(0);
	RecordWriterBuilder<LongValue> recordWriterBuilder = new RecordWriterBuilder<>();
	if (getEnvironment().getExecutionConfig().getExecutionMode() == ExecutionMode.PIPELINED) {
		// enable output flush for pipeline mode
		recordWriterBuilder.setTimeout(100);
	}
	if (useBroadcastPartitioner) {
		recordWriterBuilder.setChannelSelector(new BroadcastPartitioner());
	}
	RecordWriter<LongValue> writer = recordWriterBuilder.build(resultPartitionWriter);

	for (int i = 0; i < NUM_RECORDS_TO_SEND; ++i) {
		writer.broadcastEmit(RECORD_TO_SEND);
	}
	writer.flushAll();
	writer.clearBuffers();
}
 
Example #7
Source File: ConsumableNotifyingResultPartitionWriterDecorator.java    From flink with Apache License 2.0 6 votes vote down vote up
public static ResultPartitionWriter[] decorate(
		Collection<ResultPartitionDeploymentDescriptor> descs,
		ResultPartitionWriter[] partitionWriters,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {

	ResultPartitionWriter[] consumableNotifyingPartitionWriters = new ResultPartitionWriter[partitionWriters.length];
	int counter = 0;
	for (ResultPartitionDeploymentDescriptor desc : descs) {
		if (desc.sendScheduleOrUpdateConsumersMessage() && desc.getPartitionType().isPipelined()) {
			consumableNotifyingPartitionWriters[counter] = new ConsumableNotifyingResultPartitionWriterDecorator(
				taskActions,
				jobId,
				partitionWriters[counter],
				notifier);
		} else {
			consumableNotifyingPartitionWriters[counter] = partitionWriters[counter];
		}
		counter++;
	}
	return consumableNotifyingPartitionWriters;
}
 
Example #8
Source File: Task.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Releases resources before task exits. We should also fail the partition to release if the task
 * has failed, is canceled, or is being canceled at the moment.
 */
private void releaseResources() {
	LOG.debug("Release task {} network resources (state: {}).", taskNameWithSubtask, getExecutionState());

	for (ResultPartitionWriter partitionWriter : consumableNotifyingPartitionWriters) {
		taskEventDispatcher.unregisterPartition(partitionWriter.getPartitionId());
		if (isCanceledOrFailed()) {
			partitionWriter.fail(getFailureCause());
		}
	}

	closeNetworkResources();
	try {
		taskStateManager.close();
	} catch (Exception e) {
		LOG.error("Failed to close task state manager for task {}.", taskNameWithSubtask, e);
	}
}
 
Example #9
Source File: StreamNetworkBenchmarkEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
public ResultPartitionWriter createResultPartitionWriter(int partitionIndex) throws Exception {

		ResultPartitionWriter resultPartitionWriter = new ResultPartitionBuilder()
			.setResultPartitionId(partitionIds[partitionIndex])
			.setResultPartitionType(ResultPartitionType.PIPELINED_BOUNDED)
			.setNumberOfSubpartitions(channels)
			.setResultPartitionManager(senderEnv.getResultPartitionManager())
			.setupBufferPoolFactoryFromNettyShuffleEnvironment(senderEnv)
			.build();

		ResultPartitionWriter consumableNotifyingPartitionWriter = new ConsumableNotifyingResultPartitionWriterDecorator(
			new NoOpTaskActions(),
			jobId,
			resultPartitionWriter,
			new NoOpResultPartitionConsumableNotifier());

		consumableNotifyingPartitionWriter.setup();

		return consumableNotifyingPartitionWriter;
	}
 
Example #10
Source File: Task.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Releases network resources before task exits. We should also fail the partition to release if the task
 * has failed, is canceled, or is being canceled at the moment.
 */
private void releaseNetworkResources() {
	LOG.debug("Release task {} network resources (state: {}).", taskNameWithSubtask, getExecutionState());

	for (ResultPartitionWriter partitionWriter : consumableNotifyingPartitionWriters) {
		taskEventDispatcher.unregisterPartition(partitionWriter.getPartitionId());
		if (isCanceledOrFailed()) {
			partitionWriter.fail(getFailureCause());
		}
	}

	closeNetworkResources();
}
 
Example #11
Source File: NettyShuffleEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Registers legacy network metric groups before shuffle service refactoring.
 *
 * <p>Registers legacy metric groups if shuffle service implementation is original default one.
 *
 * @deprecated should be removed in future
 */
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public void registerLegacyNetworkMetrics(
		MetricGroup metricGroup,
		ResultPartitionWriter[] producedPartitions,
		InputGate[] inputGates) {
	NettyShuffleMetricFactory.registerLegacyNetworkMetrics(
		config.isNetworkDetailedMetrics(),
		config.isCreditBased(),
		metricGroup,
		producedPartitions,
		inputGates);
}
 
Example #12
Source File: NettyShuffleMetricFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Registers legacy network metric groups before shuffle service refactoring.
 *
 * <p>Registers legacy metric groups if shuffle service implementation is original default one.
 *
 * @deprecated should be removed in future
 */
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public static void registerLegacyNetworkMetrics(
		boolean isDetailedMetrics,
		boolean isCreditBased,
		MetricGroup metricGroup,
		ResultPartitionWriter[] producedPartitions,
		InputGate[] inputGates) {
	checkNotNull(metricGroup);
	checkNotNull(producedPartitions);
	checkNotNull(inputGates);

	// add metrics for buffers
	final MetricGroup buffersGroup = metricGroup.addGroup(METRIC_GROUP_BUFFERS_DEPRECATED);

	// similar to MetricUtils.instantiateNetworkMetrics() but inside this IOMetricGroup (metricGroup)
	final MetricGroup networkGroup = metricGroup.addGroup(METRIC_GROUP_NETWORK_DEPRECATED);
	final MetricGroup outputGroup = networkGroup.addGroup(METRIC_GROUP_OUTPUT);
	final MetricGroup inputGroup = networkGroup.addGroup(METRIC_GROUP_INPUT);

	ResultPartition[] resultPartitions = Arrays.copyOf(producedPartitions, producedPartitions.length, ResultPartition[].class);
	registerOutputMetrics(isDetailedMetrics, outputGroup, buffersGroup, resultPartitions);

	SingleInputGate[] singleInputGates = Arrays.copyOf(inputGates, inputGates.length, SingleInputGate[].class);
	registerInputMetrics(isDetailedMetrics, isCreditBased, inputGroup, buffersGroup, singleInputGates);
}
 
Example #13
Source File: StreamNetworkPointToPointBenchmark.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the throughput benchmark with the given parameters.
 *
 * @param flushTimeout
 * 		output flushing interval of the
 * 		{@link org.apache.flink.runtime.io.network.api.writer.RecordWriter}'s output flusher thread
 */
public void setUp(long flushTimeout, Configuration config) throws Exception {
	environment = new StreamNetworkBenchmarkEnvironment<>();
	environment.setUp(1, 1, false, -1, -1, config);

	ResultPartitionWriter resultPartitionWriter = environment.createResultPartitionWriter(0);

	recordWriter = new RecordWriterBuilder().setTimeout(flushTimeout).build(resultPartitionWriter);
	receiver = environment.createReceiver();
}
 
Example #14
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example #15
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example #16
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ResultPartitionWriter createConsumableNotifyingResultPartitionWriter(
		ResultPartitionType partitionType,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	return ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
}
 
Example #17
Source File: PartialConsumePipelinedResultTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void invoke() throws Exception {
	final ResultPartitionWriter writer = getEnvironment().getWriter(0);

	for (int i = 0; i < 8; i++) {
		final BufferBuilder bufferBuilder = writer.getBufferBuilder();
		writer.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0);
		Thread.sleep(50);
		bufferBuilder.finish();
	}
}
 
Example #18
Source File: StreamTask.java    From flink with Apache License 2.0 5 votes vote down vote up
private static <OUT> RecordWriter<SerializationDelegate<StreamRecord<OUT>>> createRecordWriter(
		StreamEdge edge,
		int outputIndex,
		Environment environment,
		String taskName,
		long bufferTimeout) {
	@SuppressWarnings("unchecked")
	StreamPartitioner<OUT> outputPartitioner = (StreamPartitioner<OUT>) edge.getPartitioner();

	LOG.debug("Using partitioner {} for output {} of task {}", outputPartitioner, outputIndex, taskName);

	ResultPartitionWriter bufferWriter = environment.getWriter(outputIndex);

	// we initialize the partitioner here with the number of key groups (aka max. parallelism)
	if (outputPartitioner instanceof ConfigurableStreamPartitioner) {
		int numKeyGroups = bufferWriter.getNumTargetKeyGroups();
		if (0 < numKeyGroups) {
			((ConfigurableStreamPartitioner) outputPartitioner).configure(numKeyGroups);
		}
	}

	RecordWriter<SerializationDelegate<StreamRecord<OUT>>> output = new RecordWriterBuilder()
		.setChannelSelector(outputPartitioner)
		.setTimeout(bufferTimeout)
		.setTaskName(taskName)
		.build(bufferWriter);
	output.setMetricGroup(environment.getMetricGroup().getIOMetricGroup());
	return output;
}
 
Example #19
Source File: ConsumableNotifyingResultPartitionWriterDecorator.java    From flink with Apache License 2.0 5 votes vote down vote up
public ConsumableNotifyingResultPartitionWriterDecorator(
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionWriter partitionWriter,
		ResultPartitionConsumableNotifier partitionConsumableNotifier) {
	this.taskActions = checkNotNull(taskActions);
	this.jobId = checkNotNull(jobId);
	this.partitionWriter = checkNotNull(partitionWriter);
	this.partitionConsumableNotifier = checkNotNull(partitionConsumableNotifier);
}
 
Example #20
Source File: Task.java    From flink with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public static void setupPartitionsAndGates(
	ResultPartitionWriter[] producedPartitions, InputGate[] inputGates) throws IOException {

	for (ResultPartitionWriter partition : producedPartitions) {
		partition.setup();
	}

	// InputGates must be initialized after the partitions, since during InputGate#setup
	// we are requesting partitions
	for (InputGate gate : inputGates) {
		gate.setup();
	}
}
 
Example #21
Source File: NettyShuffleEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Registers legacy network metric groups before shuffle service refactoring.
 *
 * <p>Registers legacy metric groups if shuffle service implementation is original default one.
 *
 * @deprecated should be removed in future
 */
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public void registerLegacyNetworkMetrics(
		MetricGroup metricGroup,
		ResultPartitionWriter[] producedPartitions,
		InputGate[] inputGates) {
	NettyShuffleMetricFactory.registerLegacyNetworkMetrics(
		config.isNetworkDetailedMetrics(),
		metricGroup,
		producedPartitions,
		inputGates);
}
 
Example #22
Source File: OutputBlockedInvokable.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void invoke() throws Exception {
	final IntValue value = new IntValue(1234);
	final ResultPartitionWriter resultPartitionWriter = getEnvironment().getWriter(0);
	final RecordWriter<IntValue> writer = new RecordWriterBuilder<IntValue>().build(resultPartitionWriter);

	while (true) {
		writer.emit(value);
	}
}
 
Example #23
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example #24
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example #25
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ResultPartitionWriter createConsumableNotifyingResultPartitionWriter(
		ResultPartitionType partitionType,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	return ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
}
 
Example #26
Source File: PartialConsumePipelinedResultTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void invoke() throws Exception {
	final ResultPartitionWriter writer = getEnvironment().getWriter(0);

	for (int i = 0; i < 8; i++) {
		final BufferBuilder bufferBuilder = writer.getBufferBuilder(0);
		writer.addBufferConsumer(bufferBuilder.createBufferConsumer(), 0);
		Thread.sleep(50);
		bufferBuilder.finish();
	}
}
 
Example #27
Source File: StreamTask.java    From flink with Apache License 2.0 5 votes vote down vote up
private void readRecoveredChannelState() throws IOException, InterruptedException {
	ChannelStateReader reader = getEnvironment().getTaskStateManager().getChannelStateReader();
	if (!reader.hasChannelStates()) {
		requestPartitions();
		return;
	}

	ResultPartitionWriter[] writers = getEnvironment().getAllWriters();
	if (writers != null) {
		for (ResultPartitionWriter writer : writers) {
			writer.readRecoveredState(reader);
		}
	}

	// It would get possible benefits to recovery input side after output side, which guarantees the
	// output can request more floating buffers from global firstly.
	InputGate[] inputGates = getEnvironment().getAllInputGates();
	if (inputGates != null && inputGates.length > 0) {
		CompletableFuture[] futures = new CompletableFuture[inputGates.length];
		for (int i = 0; i < inputGates.length; i++) {
			futures[i] = inputGates[i].readRecoveredState(channelIOExecutor, reader);
		}

		// Note that we must request partition after all the single gates finished recovery.
		CompletableFuture.allOf(futures).thenRun(() -> mainMailboxExecutor.execute(
			this::requestPartitions, "Input gates request partitions"));
	}
}
 
Example #28
Source File: StreamTask.java    From flink with Apache License 2.0 5 votes vote down vote up
private static <OUT> RecordWriter<SerializationDelegate<StreamRecord<OUT>>> createRecordWriter(
		StreamEdge edge,
		int outputIndex,
		Environment environment,
		String taskName,
		long bufferTimeout) {
	@SuppressWarnings("unchecked")
	StreamPartitioner<OUT> outputPartitioner = (StreamPartitioner<OUT>) edge.getPartitioner();

	LOG.debug("Using partitioner {} for output {} of task {}", outputPartitioner, outputIndex, taskName);

	ResultPartitionWriter bufferWriter = environment.getWriter(outputIndex);

	// we initialize the partitioner here with the number of key groups (aka max. parallelism)
	if (outputPartitioner instanceof ConfigurableStreamPartitioner) {
		int numKeyGroups = bufferWriter.getNumTargetKeyGroups();
		if (0 < numKeyGroups) {
			((ConfigurableStreamPartitioner) outputPartitioner).configure(numKeyGroups);
		}
	}

	RecordWriter<SerializationDelegate<StreamRecord<OUT>>> output = new RecordWriterBuilder<SerializationDelegate<StreamRecord<OUT>>>()
		.setChannelSelector(outputPartitioner)
		.setTimeout(bufferTimeout)
		.setTaskName(taskName)
		.build(bufferWriter);
	output.setMetricGroup(environment.getMetricGroup().getIOMetricGroup());
	return output;
}
 
Example #29
Source File: StreamNetworkThroughputBenchmark.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the throughput benchmark with the given parameters.
 *
 * @param recordWriters
 * 		number of senders, i.e.
 * 		{@link org.apache.flink.runtime.io.network.api.writer.RecordWriter} instances
 * @param channels
 * 		number of outgoing channels / receivers
 */
public void setUp(
		int recordWriters,
		int channels,
		int flushTimeout,
		boolean broadcastMode,
		boolean localMode,
		int senderBufferPoolSize,
		int receiverBufferPoolSize,
		Configuration config) throws Exception {
	environment = new StreamNetworkBenchmarkEnvironment<>();
	environment.setUp(
		recordWriters,
		channels,
		localMode,
		senderBufferPoolSize,
		receiverBufferPoolSize,
		config);
	writerThreads = new LongRecordWriterThread[recordWriters];
	for (int writer = 0; writer < recordWriters; writer++) {
		ResultPartitionWriter resultPartitionWriter = environment.createResultPartitionWriter(writer);
		RecordWriterBuilder recordWriterBuilder = new RecordWriterBuilder().setTimeout(flushTimeout);
		setChannelSelector(recordWriterBuilder, broadcastMode);
		writerThreads[writer] = new LongRecordWriterThread(
			recordWriterBuilder.build(resultPartitionWriter),
			broadcastMode);
		writerThreads[writer].start();
	}
	receiver = environment.createReceiver();
}
 
Example #30
Source File: StreamMockEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
public StreamMockEnvironment(
	JobID jobID,
	ExecutionAttemptID executionAttemptID,
	Configuration jobConfig,
	Configuration taskConfig,
	ExecutionConfig executionConfig,
	long memorySize,
	MockInputSplitProvider inputSplitProvider,
	int bufferSize,
	TaskStateManager taskStateManager) {

	this.jobID = jobID;
	this.executionAttemptID = executionAttemptID;

	int subtaskIndex = 0;
	this.taskInfo = new TaskInfo(
		"", /* task name */
		1, /* num key groups / max parallelism */
		subtaskIndex, /* index of this subtask */
		1, /* num subtasks */
		0 /* attempt number */);
	this.jobConfiguration = jobConfig;
	this.taskConfiguration = taskConfig;
	this.inputs = new LinkedList<InputGate>();
	this.outputs = new LinkedList<ResultPartitionWriter>();
	this.memManager = new MemoryManager(memorySize, 1);
	this.ioManager = new IOManagerAsync();
	this.taskStateManager = Preconditions.checkNotNull(taskStateManager);
	this.aggregateManager = new TestGlobalAggregateManager();
	this.inputSplitProvider = inputSplitProvider;
	this.bufferSize = bufferSize;

	this.executionConfig = executionConfig;
	this.accumulatorRegistry = new AccumulatorRegistry(jobID, getExecutionId());

	KvStateRegistry registry = new KvStateRegistry();
	this.kvStateRegistry = registry.createTaskRegistry(jobID, getJobVertexId());
}