org.apache.flink.runtime.taskmanager.TaskActions Java Examples

The following examples show how to use org.apache.flink.runtime.taskmanager.TaskActions. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnFinishedPartition(final ResultPartitionType pipelined)
	throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	try {
		ResultPartition partition = createPartition(notifier, pipelined, true);
		partition.finish();
		reset(notifier);
		// partition.add() should fail
		partition.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(any(JobID.class), any(ResultPartitionID.class), any(TaskActions.class));
	}
}
 
Example #2
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnPartition(final ResultPartitionType partitionType) throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (partitionType.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(eq(jobId), eq(consumableNotifyingPartitionWriter.getPartitionId()), eq(taskActions));
		}
	}
}
 
Example #3
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnReleasedPartition(final ResultPartitionType pipelined)
	throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	try {
		ResultPartition partition = createPartition(notifier, pipelined, true);
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		partition.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(any(JobID.class), any(ResultPartitionID.class), any(TaskActions.class));
	}
}
 
Example #4
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnPartition(final ResultPartitionType pipelined)
	throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	ResultPartition partition = createPartition(notifier, pipelined, true);
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		partition.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (pipelined.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(
					eq(partition.getJobId()),
					eq(partition.getPartitionId()),
					any(TaskActions.class));
		}
	}
}
 
Example #5
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static ResultPartition createPartition(
	ResultPartitionConsumableNotifier notifier,
	ResultPartitionType type,
	boolean sendScheduleOrUpdateConsumersMessage) {
	return new ResultPartition(
		"TestTask",
		mock(TaskActions.class),
		new JobID(),
		new ResultPartitionID(),
		type,
		1,
		1,
		mock(ResultPartitionManager.class),
		notifier,
		ioManager,
		sendScheduleOrUpdateConsumersMessage);
}
 
Example #6
Source File: NetworkEnvironmentTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Helper to create simple {@link ResultPartition} instance for use by a {@link Task} inside
 * {@link NetworkEnvironment#registerTask(Task)}.
 *
 * @param partitionType
 * 		the produced partition type
 * @param channels
 * 		the number of output channels
 *
 * @return instance with minimal data set and some mocks so that it is useful for {@link
 * NetworkEnvironment#registerTask(Task)}
 */
private static ResultPartition createResultPartition(
		final ResultPartitionType partitionType, final int channels) {
	return new ResultPartition(
		"TestTask-" + partitionType + ":" + channels,
		mock(TaskActions.class),
		new JobID(),
		new ResultPartitionID(),
		partitionType,
		channels,
		channels,
		mock(ResultPartitionManager.class),
		new NoOpResultPartitionConsumableNotifier(),
		mock(IOManager.class),
		false);
}
 
Example #7
Source File: SingleInputGateTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private SingleInputGate createInputGate(
		int numberOfInputChannels, ResultPartitionType partitionType) {
	SingleInputGate inputGate = new SingleInputGate(
		"Test Task Name",
		new JobID(),
		new IntermediateDataSetID(),
		partitionType,
		0,
		numberOfInputChannels,
		mock(TaskActions.class),
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
		enableCreditBasedFlowControl);

	assertEquals(partitionType, inputGate.getConsumedPartitionType());

	return inputGate;
}
 
Example #8
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnPartition(final ResultPartitionType partitionType) throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (partitionType.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(eq(jobId), eq(consumableNotifyingPartitionWriter.getPartitionId()), eq(taskActions));
		}
	}
}
 
Example #9
Source File: InputGateFairnessTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public FairnessVerifyingInputGate(
		String owningTaskName,
		JobID jobId,
		IntermediateDataSetID consumedResultId,
		int consumedSubpartitionIndex,
		int numberOfInputChannels,
		TaskActions taskActions,
		TaskIOMetricGroup metrics,
		boolean isCreditBased) {

	super(owningTaskName, jobId, consumedResultId, ResultPartitionType.PIPELINED,
		consumedSubpartitionIndex,
			numberOfInputChannels, taskActions, metrics, isCreditBased);

	try {
		Field f = SingleInputGate.class.getDeclaredField("inputChannelsWithData");
		f.setAccessible(true);
		channelsWithData = (ArrayDeque<InputChannel>) f.get(this);
	}
	catch (Exception e) {
		throw new RuntimeException(e);
	}

	this.uniquenessChecker = new HashSet<>();
}
 
Example #10
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example #11
Source File: RpcResultPartitionConsumableNotifier.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void notifyPartitionConsumable(JobID jobId, ResultPartitionID partitionId, final TaskActions taskActions) {
	CompletableFuture<Acknowledge> acknowledgeFuture = jobMasterGateway.scheduleOrUpdateConsumers(partitionId, timeout);

	acknowledgeFuture.whenCompleteAsync(
		(Acknowledge ack, Throwable throwable) -> {
			if (throwable != null) {
				LOG.error("Could not schedule or update consumers at the JobManager.", throwable);

				taskActions.failExternally(new RuntimeException("Could not notify JobManager to schedule or update consumers.", throwable));
			}
		},
		executor);
}
 
Example #12
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example #13
Source File: SingleInputGate.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public SingleInputGate(
	String owningTaskName,
	JobID jobId,
	IntermediateDataSetID consumedResultId,
	final ResultPartitionType consumedPartitionType,
	int consumedSubpartitionIndex,
	int numberOfInputChannels,
	TaskActions taskActions,
	TaskIOMetricGroup metrics,
	boolean isCreditBased) {

	this.owningTaskName = checkNotNull(owningTaskName);
	this.jobId = checkNotNull(jobId);

	this.consumedResultId = checkNotNull(consumedResultId);
	this.consumedPartitionType = checkNotNull(consumedPartitionType);

	checkArgument(consumedSubpartitionIndex >= 0);
	this.consumedSubpartitionIndex = consumedSubpartitionIndex;

	checkArgument(numberOfInputChannels > 0);
	this.numberOfInputChannels = numberOfInputChannels;

	this.inputChannels = new HashMap<>(numberOfInputChannels);
	this.channelsWithEndOfPartitionEvents = new BitSet(numberOfInputChannels);
	this.enqueuedInputChannelsWithData = new BitSet(numberOfInputChannels);

	this.taskActions = checkNotNull(taskActions);
	this.isCreditBased = isCreditBased;
}
 
Example #14
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ResultPartitionWriter createConsumableNotifyingResultPartitionWriter(
		ResultPartitionType partitionType,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	return ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
}
 
Example #15
Source File: RpcResultPartitionConsumableNotifier.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void notifyPartitionConsumable(JobID jobId, ResultPartitionID partitionId, final TaskActions taskActions) {
	CompletableFuture<Acknowledge> acknowledgeFuture = jobMasterGateway.scheduleOrUpdateConsumers(partitionId, timeout);

	acknowledgeFuture.whenCompleteAsync(
		(Acknowledge ack, Throwable throwable) -> {
			if (throwable != null) {
				LOG.error("Could not schedule or update consumers at the JobManager.", throwable);

				taskActions.failExternally(new RuntimeException("Could not notify JobManager to schedule or update consumers.", throwable));
			}
		},
		executor);
}
 
Example #16
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example #17
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example #18
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private ResultPartitionWriter createConsumableNotifyingResultPartitionWriter(
		ResultPartitionType partitionType,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionConsumableNotifier notifier) {
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	return ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
}
 
Example #19
Source File: InputGateConcurrentTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumptionWithLocalChannels() throws Exception {
	final int numberOfChannels = 11;
	final int buffersPerChannel = 1000;

	final ResultPartition resultPartition = mock(ResultPartition.class);

	final PipelinedSubpartition[] partitions = new PipelinedSubpartition[numberOfChannels];
	final Source[] sources = new Source[numberOfChannels];

	final ResultPartitionManager resultPartitionManager = createResultPartitionManager(partitions);

	final SingleInputGate gate = new SingleInputGate(
			"Test Task Name",
			new JobID(),
			new IntermediateDataSetID(), ResultPartitionType.PIPELINED,
			0, numberOfChannels,
			mock(TaskActions.class),
			UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
			true);

	for (int i = 0; i < numberOfChannels; i++) {
		LocalInputChannel channel = new LocalInputChannel(gate, i, new ResultPartitionID(),
				resultPartitionManager, mock(TaskEventDispatcher.class), UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup());
		gate.setInputChannel(new IntermediateResultPartitionID(), channel);

		partitions[i] = new PipelinedSubpartition(0, resultPartition);
		sources[i] = new PipelinedSubpartitionSource(partitions[i]);
	}

	ProducerThread producer = new ProducerThread(sources, numberOfChannels * buffersPerChannel, 4, 10);
	ConsumerThread consumer = new ConsumerThread(gate, numberOfChannels * buffersPerChannel);
	producer.start();
	consumer.start();

	// the 'sync()' call checks for exceptions and failed assertions
	producer.sync();
	consumer.sync();
}
 
Example #20
Source File: InputGateConcurrentTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testConsumptionWithRemoteChannels() throws Exception {
	final int numberOfChannels = 11;
	final int buffersPerChannel = 1000;

	final ConnectionManager connManager = createDummyConnectionManager();
	final Source[] sources = new Source[numberOfChannels];

	final SingleInputGate gate = new SingleInputGate(
			"Test Task Name",
			new JobID(),
			new IntermediateDataSetID(), ResultPartitionType.PIPELINED,
			0,
			numberOfChannels,
			mock(TaskActions.class),
			UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
			true);

	for (int i = 0; i < numberOfChannels; i++) {
		RemoteInputChannel channel = new RemoteInputChannel(
				gate, i, new ResultPartitionID(), mock(ConnectionID.class),
				connManager, 0, 0, UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup());
		gate.setInputChannel(new IntermediateResultPartitionID(), channel);

		sources[i] = new RemoteChannelSource(channel);
	}

	ProducerThread producer = new ProducerThread(sources, numberOfChannels * buffersPerChannel, 4, 10);
	ConsumerThread consumer = new ConsumerThread(gate, numberOfChannels * buffersPerChannel);
	producer.start();
	consumer.start();

	// the 'sync()' call checks for exceptions and failed assertions
	producer.sync();
	consumer.sync();
}
 
Example #21
Source File: RpcResultPartitionConsumableNotifier.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void notifyPartitionConsumable(JobID jobId, ResultPartitionID partitionId, final TaskActions taskActions) {
	CompletableFuture<Acknowledge> acknowledgeFuture = jobMasterGateway.scheduleOrUpdateConsumers(partitionId, timeout);

	acknowledgeFuture.whenCompleteAsync(
		(Acknowledge ack, Throwable throwable) -> {
			if (throwable != null) {
				LOG.error("Could not schedule or update consumers at the JobManager.", throwable);

				taskActions.failExternally(new RuntimeException("Could not notify JobManager to schedule or update consumers.", throwable));
			}
		},
		executor);
}
 
Example #22
Source File: PartitionRequestClientHandlerTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates and returns the single input gate for credit-based testing.
 *
 * @return The new created single input gate.
 */
static SingleInputGate createSingleInputGate() {
	return new SingleInputGate(
		"InputGate",
		new JobID(),
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		1,
		mock(TaskActions.class),
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
		true);
}
 
Example #23
Source File: RemoteInputChannelTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private SingleInputGate createSingleInputGate() {
	return new SingleInputGate(
		"InputGate",
		new JobID(),
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		1,
		mock(TaskActions.class),
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
		true);
}
 
Example #24
Source File: NetworkEnvironmentTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Helper to create spy of a {@link SingleInputGate} for use by a {@link Task} inside
 * {@link NetworkEnvironment#registerTask(Task)}.
 *
 * @param partitionType
 * 		the consumed partition type
 * @param channels
 * 		the number of input channels
 *
 * @return input gate with some fake settings
 */
private SingleInputGate createSingleInputGate(
		final ResultPartitionType partitionType, final int channels) {
	return spy(new SingleInputGate(
		"Test Task Name",
		new JobID(),
		new IntermediateDataSetID(),
		partitionType,
		0,
		channels,
		mock(TaskActions.class),
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
		enableCreditBasedFlowControl));
}
 
Example #25
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
ResultPartitionWithCountDownLatch(
		String owningTaskName,
		TaskActions taskActions,
		JobID jobId,
		ResultPartitionID partitionId,
		ResultPartitionType partitionType,
		int numberOfSubpartitions,
		int numTargetKeyGroups,
		ResultPartitionManager partitionManager,
		ResultPartitionConsumableNotifier partitionConsumableNotifier,
		IOManager ioManager,
		boolean sendScheduleOrUpdateConsumersMessage,
		CountDownLatch blockLatch,
		CountDownLatch doneLatch) {
	super(
		owningTaskName,
		taskActions,
		jobId,
		partitionId,
		partitionType,
		numberOfSubpartitions,
		numTargetKeyGroups,
		partitionManager,
		partitionConsumableNotifier,
		ioManager,
		sendScheduleOrUpdateConsumersMessage);
	this.blockLatch = Preconditions.checkNotNull(blockLatch);
	this.doneLatch = Preconditions.checkNotNull(doneLatch);
}
 
Example #26
Source File: NoOpResultPartitionConsumableNotifier.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void notifyPartitionConsumable(JobID jobId, ResultPartitionID partitionId, TaskActions taskActions) {}
 
Example #27
Source File: SingleInputGate.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Creates an input gate and all of its input channels.
 */
public static SingleInputGate create(
	String owningTaskName,
	JobID jobId,
	ExecutionAttemptID executionId,
	InputGateDeploymentDescriptor igdd,
	NetworkEnvironment networkEnvironment,
	TaskActions taskActions,
	TaskIOMetricGroup metrics) {

	final IntermediateDataSetID consumedResultId = checkNotNull(igdd.getConsumedResultId());
	final ResultPartitionType consumedPartitionType = checkNotNull(igdd.getConsumedPartitionType());

	final int consumedSubpartitionIndex = igdd.getConsumedSubpartitionIndex();
	checkArgument(consumedSubpartitionIndex >= 0);

	final InputChannelDeploymentDescriptor[] icdd = checkNotNull(igdd.getInputChannelDeploymentDescriptors());

	final SingleInputGate inputGate = new SingleInputGate(
		owningTaskName, jobId, consumedResultId, consumedPartitionType, consumedSubpartitionIndex,
		icdd.length, taskActions, metrics, networkEnvironment.isCreditBased());

	// Create the input channels. There is one input channel for each consumed partition.
	final InputChannel[] inputChannels = new InputChannel[icdd.length];

	int numLocalChannels = 0;
	int numRemoteChannels = 0;
	int numUnknownChannels = 0;

	for (int i = 0; i < inputChannels.length; i++) {
		final ResultPartitionID partitionId = icdd[i].getConsumedPartitionId();
		final ResultPartitionLocation partitionLocation = icdd[i].getConsumedPartitionLocation();

		if (partitionLocation.isLocal()) {
			inputChannels[i] = new LocalInputChannel(inputGate, i, partitionId,
				networkEnvironment.getResultPartitionManager(),
				networkEnvironment.getTaskEventDispatcher(),
				networkEnvironment.getPartitionRequestInitialBackoff(),
				networkEnvironment.getPartitionRequestMaxBackoff(),
				metrics
			);

			numLocalChannels++;
		}
		else if (partitionLocation.isRemote()) {
			inputChannels[i] = new RemoteInputChannel(inputGate, i, partitionId,
				partitionLocation.getConnectionId(),
				networkEnvironment.getConnectionManager(),
				networkEnvironment.getPartitionRequestInitialBackoff(),
				networkEnvironment.getPartitionRequestMaxBackoff(),
				metrics
			);

			numRemoteChannels++;
		}
		else if (partitionLocation.isUnknown()) {
			inputChannels[i] = new UnknownInputChannel(inputGate, i, partitionId,
				networkEnvironment.getResultPartitionManager(),
				networkEnvironment.getTaskEventDispatcher(),
				networkEnvironment.getConnectionManager(),
				networkEnvironment.getPartitionRequestInitialBackoff(),
				networkEnvironment.getPartitionRequestMaxBackoff(),
				metrics
			);

			numUnknownChannels++;
		}
		else {
			throw new IllegalStateException("Unexpected partition location.");
		}

		inputGate.setInputChannel(partitionId.getPartitionId(), inputChannels[i]);
	}

	LOG.debug("{}: Created {} input channels (local: {}, remote: {}, unknown: {}).",
		owningTaskName,
		inputChannels.length,
		numLocalChannels,
		numRemoteChannels,
		numUnknownChannels);

	return inputGate;
}
 
Example #28
Source File: ResultPartition.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public ResultPartition(
	String owningTaskName,
	TaskActions taskActions, // actions on the owning task
	JobID jobId,
	ResultPartitionID partitionId,
	ResultPartitionType partitionType,
	int numberOfSubpartitions,
	int numTargetKeyGroups,
	ResultPartitionManager partitionManager,
	ResultPartitionConsumableNotifier partitionConsumableNotifier,
	IOManager ioManager,
	boolean sendScheduleOrUpdateConsumersMessage) {

	this.owningTaskName = checkNotNull(owningTaskName);
	this.taskActions = checkNotNull(taskActions);
	this.jobId = checkNotNull(jobId);
	this.partitionId = checkNotNull(partitionId);
	this.partitionType = checkNotNull(partitionType);
	this.subpartitions = new ResultSubpartition[numberOfSubpartitions];
	this.numTargetKeyGroups = numTargetKeyGroups;
	this.partitionManager = checkNotNull(partitionManager);
	this.partitionConsumableNotifier = checkNotNull(partitionConsumableNotifier);
	this.sendScheduleOrUpdateConsumersMessage = sendScheduleOrUpdateConsumersMessage;

	// Create the subpartitions.
	switch (partitionType) {
		case BLOCKING:
			for (int i = 0; i < subpartitions.length; i++) {
				subpartitions[i] = new SpillableSubpartition(i, this, ioManager);
			}

			break;

		case PIPELINED:
		case PIPELINED_BOUNDED:
			for (int i = 0; i < subpartitions.length; i++) {
				subpartitions[i] = new PipelinedSubpartition(i, this);
			}

			break;

		default:
			throw new IllegalArgumentException("Unsupported result partition type.");
	}

	// Initially, partitions should be consumed once before release.
	pin();

	LOG.debug("{}: Initialized {}", owningTaskName, this);
}
 
Example #29
Source File: NoOpResultPartitionConsumableNotifier.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void notifyPartitionConsumable(JobID jobId, ResultPartitionID partitionId, TaskActions taskActions) {}
 
Example #30
Source File: TestSingleInputGate.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public TestSingleInputGate(int numberOfInputChannels, boolean initialize) {
	checkArgument(numberOfInputChannels >= 1);

	SingleInputGate realGate = new SingleInputGate(
		"Test Task Name",
		new JobID(),
		new IntermediateDataSetID(),
		ResultPartitionType.PIPELINED,
		0,
		numberOfInputChannels,
		mock(TaskActions.class),
		UnregisteredMetricGroups.createUnregisteredTaskMetricGroup().getIOMetricGroup(),
		true);

	this.inputGate = spy(realGate);

	// Notify about late registrations (added for DataSinkTaskTest#testUnionDataSinkTask).
	// After merging registerInputOutput and invoke, we have to make sure that the test
	// notifications happen at the expected time. In real programs, this is guaranteed by
	// the instantiation and request partition life cycle.
	try {
		Field f = realGate.getClass().getDeclaredField("inputChannelsWithData");
		f.setAccessible(true);
		final ArrayDeque<InputChannel> notifications = (ArrayDeque<InputChannel>) f.get(realGate);

		doAnswer(new Answer<Void>() {
			@Override
			public Void answer(InvocationOnMock invocation) throws Throwable {
				invocation.callRealMethod();

				synchronized (notifications) {
					if (!notifications.isEmpty()) {
						InputGateListener listener = (InputGateListener) invocation.getArguments()[0];
						listener.notifyInputGateNonEmpty(inputGate);
					}
				}

				return null;
			}
		}).when(inputGate).registerListener(any(InputGateListener.class));
	} catch (Exception e) {
		throw new RuntimeException(e);
	}

	this.inputChannels = new TestInputChannel[numberOfInputChannels];

	if (initialize) {
		for (int i = 0; i < numberOfInputChannels; i++) {
			inputChannels[i] = new TestInputChannel(inputGate, i);
			inputGate.setInputChannel(new IntermediateResultPartitionID(), inputChannels[i]);
		}
	}
}