org.apache.flink.runtime.io.network.partition.ResultSubpartitionView Java Examples

The following examples show how to use org.apache.flink.runtime.io.network.partition.ResultSubpartitionView. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: LocalInputChannelTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that reading from a channel when after the partition has been
 * released are handled and don't lead to NPEs.
 */
@Test
public void testGetNextAfterPartitionReleased() throws Exception {
	ResultSubpartitionView subpartitionView = createResultSubpartitionView(false);
	TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView);
	LocalInputChannel channel = createLocalInputChannel(new SingleInputGateBuilder().build(), partitionManager);

	channel.requestSubpartition(0);
	assertFalse(channel.getNextBuffer().isPresent());

	// release the subpartition view
	subpartitionView.releaseAllResources();

	try {
		channel.getNextBuffer();
		fail("Did not throw expected CancelTaskException");
	} catch (CancelTaskException ignored) {
	}

	channel.releaseAllResources();
	assertFalse(channel.getNextBuffer().isPresent());
}
 
Example #2
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testProducerFailedException() throws Exception {
	PartitionRequestQueue queue = new PartitionRequestQueue();

	ResultSubpartitionView view = new ReleasedResultSubpartitionView();

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	EmbeddedChannel ch = new EmbeddedChannel(queue);

	CreditBasedSequenceNumberingViewReader seqView = new CreditBasedSequenceNumberingViewReader(new InputChannelID(), 2, queue);
	seqView.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
	// Add available buffer to trigger enqueue the erroneous view
	seqView.notifyDataAvailable();

	ch.runPendingTasks();

	// Read the enqueued msg
	Object msg = ch.readOutbound();

	assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class);

	NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg;
	assertTrue(err.cause instanceof CancelTaskException);
}
 
Example #3
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void testBufferWriting(ResultSubpartitionView view) throws IOException {
	// setup
	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final SequenceNumberingViewReader reader = new SequenceNumberingViewReader(receiverId, queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);

	// notify about buffer availability and encode one buffer
	reader.notifyDataAvailable();

	channel.runPendingTasks();

	Object read = channel.readOutbound();
	assertNotNull(read);
	if (read instanceof NettyMessage.ErrorResponse) {
		((NettyMessage.ErrorResponse) read).cause.printStackTrace();
	}
	assertThat(read, instanceOf(NettyMessage.BufferResponse.class));
	read = channel.readOutbound();
	assertNull(read);
}
 
Example #4
Source File: LocalInputChannelTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test(expected = CancelTaskException.class)
public void testProducerFailedException() throws Exception {
	ResultSubpartitionView view = mock(ResultSubpartitionView.class);
	when(view.isReleased()).thenReturn(true);
	when(view.getFailureCause()).thenReturn(new Exception("Expected test exception"));

	ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
	when(partitionManager
			.createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class)))
			.thenReturn(view);

	SingleInputGate inputGate = mock(SingleInputGate.class);
	BufferProvider bufferProvider = mock(BufferProvider.class);
	when(inputGate.getBufferProvider()).thenReturn(bufferProvider);

	LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager);

	ch.requestSubpartition(0);

	// Should throw an instance of CancelTaskException.
	ch.getNextBuffer();
}
 
Example #5
Source File: LocalInputChannelTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test(expected = CancelTaskException.class)
public void testProducerFailedException() throws Exception {
	ResultSubpartitionView view = mock(ResultSubpartitionView.class);
	when(view.isReleased()).thenReturn(true);
	when(view.getFailureCause()).thenReturn(new Exception("Expected test exception"));

	ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
	when(partitionManager
			.createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class)))
			.thenReturn(view);

	SingleInputGate inputGate = mock(SingleInputGate.class);
	BufferProvider bufferProvider = mock(BufferProvider.class);
	when(inputGate.getBufferProvider()).thenReturn(bufferProvider);

	LocalInputChannel ch = createLocalInputChannel(
			inputGate, partitionManager, new Tuple2<>(0, 0));

	ch.requestSubpartition(0);

	// Should throw an instance of CancelTaskException.
	ch.getNextBuffer();
}
 
Example #6
Source File: LocalInputChannelTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test(expected = CancelTaskException.class)
public void testProducerFailedException() throws Exception {
	ResultSubpartitionView view = mock(ResultSubpartitionView.class);
	when(view.isReleased()).thenReturn(true);
	when(view.getFailureCause()).thenReturn(new Exception("Expected test exception"));

	ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
	when(partitionManager
			.createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferAvailabilityListener.class)))
			.thenReturn(view);

	SingleInputGate inputGate = mock(SingleInputGate.class);
	BufferProvider bufferProvider = mock(BufferProvider.class);
	when(inputGate.getBufferProvider()).thenReturn(bufferProvider);

	LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager);

	ch.requestSubpartition(0);

	// Should throw an instance of CancelTaskException.
	ch.getNextBuffer();
}
 
Example #7
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testProducerFailedException() throws Exception {
	PartitionRequestQueue queue = new PartitionRequestQueue();

	ResultSubpartitionView view = new ReleasedResultSubpartitionView();

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	EmbeddedChannel ch = new EmbeddedChannel(queue);

	CreditBasedSequenceNumberingViewReader seqView = new CreditBasedSequenceNumberingViewReader(new InputChannelID(), 2, queue);
	seqView.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
	// Add available buffer to trigger enqueue the erroneous view
	seqView.notifyDataAvailable();

	ch.runPendingTasks();

	// Read the enqueued msg
	Object msg = ch.readOutbound();

	assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class);

	NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg;
	assertTrue(err.cause instanceof CancelTaskException);
}
 
Example #8
Source File: PartitionRequestQueueTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private void testBufferWriting(ResultSubpartitionView view) throws IOException {
	// setup
	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final SequenceNumberingViewReader reader = new SequenceNumberingViewReader(receiverId, queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);

	// notify about buffer availability and encode one buffer
	reader.notifyDataAvailable();

	channel.runPendingTasks();

	Object read = channel.readOutbound();
	assertNotNull(read);
	if (read instanceof NettyMessage.ErrorResponse) {
		((NettyMessage.ErrorResponse) read).cause.printStackTrace();
	}
	assertThat(read, instanceOf(NettyMessage.BufferResponse.class));
	read = channel.readOutbound();
	assertNull(read);
}
 
Example #9
Source File: PartitionRequestQueueTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testProducerFailedException() throws Exception {
	PartitionRequestQueue queue = new PartitionRequestQueue();

	ResultSubpartitionView view = new ReleasedResultSubpartitionView();

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	EmbeddedChannel ch = new EmbeddedChannel(queue);

	CreditBasedSequenceNumberingViewReader seqView = new CreditBasedSequenceNumberingViewReader(new InputChannelID(), 2, queue);
	seqView.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
	// Add available buffer to trigger enqueue the erroneous view
	seqView.notifyDataAvailable();

	ch.runPendingTasks();

	// Read the enqueued msg
	Object msg = ch.readOutbound();

	assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class);

	NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg;
	assertTrue(err.cause instanceof CancelTaskException);
}
 
Example #10
Source File: RecordWriterDelegateTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void verifyAvailability(RecordWriterDelegate writerDelegate) throws Exception {
	// writer is available at the beginning
	assertTrue(writerDelegate.isAvailable());
	assertTrue(writerDelegate.getAvailableFuture().isDone());

	// request one buffer from the local pool to make it unavailable
	RecordWriter recordWriter = writerDelegate.getRecordWriter(0);
	final BufferBuilder bufferBuilder = checkNotNull(recordWriter.getBufferBuilder(0));
	assertFalse(writerDelegate.isAvailable());
	CompletableFuture future = writerDelegate.getAvailableFuture();
	assertFalse(future.isDone());

	// recycle the buffer to make the local pool available again
	BufferBuilderTestUtils.fillBufferBuilder(bufferBuilder, 1).finish();
	ResultSubpartitionView readView = recordWriter.getTargetPartition().getSubpartition(0).createReadView(new NoOpBufferAvailablityListener());
	Buffer buffer = readView.getNextBuffer().buffer();

	buffer.recycleBuffer();
	assertTrue(future.isDone());
	assertTrue(writerDelegate.isAvailable());
	assertTrue(writerDelegate.getAvailableFuture().isDone());
}
 
Example #11
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
private ResultSubpartitionView checkAndWaitForSubpartitionView() {
	// synchronizing on the request lock means this blocks until the asynchronous request
	// for the partition view has been completed
	// by then the subpartition view is visible or the channel is released
	synchronized (requestLock) {
		checkState(!isReleased, "released");
		checkState(subpartitionView != null, "Queried for a buffer before requesting the subpartition.");
		return subpartitionView;
	}
}
 
Example #12
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testBufferWriting(ResultSubpartitionView view) throws IOException {
	// setup
	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(
		receiverId,
		Integer.MAX_VALUE,
		queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);

	// notify about buffer availability and encode one buffer
	reader.notifyDataAvailable();

	channel.runPendingTasks();

	Object read = channel.readOutbound();
	assertNotNull(read);
	if (read instanceof NettyMessage.ErrorResponse) {
		((NettyMessage.ErrorResponse) read).cause.printStackTrace();
	}
	assertThat(read, instanceOf(NettyMessage.BufferResponse.class));
	read = channel.readOutbound();
	assertNull(read);
}
 
Example #13
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public int unsynchronizedGetNumberOfQueuedBuffers() {
	ResultSubpartitionView view = subpartitionView;

	if (view != null) {
		return view.unsynchronizedGetNumberOfQueuedBuffers();
	}

	return 0;
}
 
Example #14
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Releases the partition reader.
 */
@Override
void releaseAllResources() throws IOException {
	if (!isReleased) {
		isReleased = true;

		ResultSubpartitionView view = subpartitionView;
		if (view != null) {
			view.releaseAllResources();
			subpartitionView = null;
		}
	}
}
 
Example #15
Source File: LocalInputChannelTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that reading from a channel when after the partition has been
 * released are handled and don't lead to NPEs.
 */
@Test
public void testGetNextAfterPartitionReleased() throws Exception {
	ResultSubpartitionView reader = mock(ResultSubpartitionView.class);
	SingleInputGate gate = mock(SingleInputGate.class);
	ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);

	when(partitionManager.createSubpartitionView(
		any(ResultPartitionID.class),
		anyInt(),
		any(BufferAvailabilityListener.class))).thenReturn(reader);

	LocalInputChannel channel = createLocalInputChannel(gate, partitionManager);

	channel.requestSubpartition(0);

	// Null buffer but not released
	when(reader.getNextBuffer()).thenReturn(null);
	when(reader.isReleased()).thenReturn(false);

	assertFalse(channel.getNextBuffer().isPresent());

	// Null buffer and released
	when(reader.getNextBuffer()).thenReturn(null);
	when(reader.isReleased()).thenReturn(true);

	try {
		channel.getNextBuffer();
		fail("Did not throw expected CancelTaskException");
	} catch (CancelTaskException ignored) {
	}

	channel.releaseAllResources();
	assertFalse(channel.getNextBuffer().isPresent());
}
 
Example #16
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
 * verifying the reader would be enqueued in the pipeline if the next sending buffer is an event
 * even though it has no available credits.
 */
@Test
public void testEnqueueReaderByNotifyingEventBuffer() throws Exception {
	// setup
	final ResultSubpartitionView view = new NextIsEventResultSubpartitionView();

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);

	// block the channel so that we see an intermediate state in the test
	ByteBuf channelBlockingBuffer = blockChannel(channel);
	assertNull(channel.readOutbound());

	// Notify an available event buffer to trigger enqueue the reader
	reader.notifyDataAvailable();

	channel.runPendingTasks();

	// The reader is enqueued in the pipeline because the next buffer is an event, even though no credits are available
	assertThat(queue.getAvailableReaders(), contains(reader)); // contains only (this) one!
	assertEquals(0, reader.getNumCreditsAvailable());

	// Flush the buffer to make the channel writable again and see the final results
	channel.flush();
	assertSame(channelBlockingBuffer, channel.readOutbound());

	assertEquals(0, queue.getAvailableReaders().size());
	assertEquals(0, reader.getNumCreditsAvailable());
	assertNull(channel.readOutbound());
}
 
Example #17
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
 * verifying the reader would be enqueued in the pipeline if the next sending buffer is an event
 * even though it has no available credits.
 */
@Test
public void testEnqueueReaderByNotifyingEventBuffer() throws Exception {
	// setup
	final ResultSubpartitionView view = new NextIsEventResultSubpartitionView();

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);

	// block the channel so that we see an intermediate state in the test
	ByteBuf channelBlockingBuffer = blockChannel(channel);
	assertNull(channel.readOutbound());

	// Notify an available event buffer to trigger enqueue the reader
	reader.notifyDataAvailable();

	channel.runPendingTasks();

	// The reader is enqueued in the pipeline because the next buffer is an event, even though no credits are available
	assertThat(queue.getAvailableReaders(), contains(reader)); // contains only (this) one!
	assertEquals(0, reader.getNumCreditsAvailable());

	// Flush the buffer to make the channel writable again and see the final results
	channel.flush();
	assertSame(channelBlockingBuffer, channel.readOutbound());

	assertEquals(0, queue.getAvailableReaders().size());
	assertEquals(0, reader.getNumCreditsAvailable());
	assertNull(channel.readOutbound());
}
 
Example #18
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public int unsynchronizedGetNumberOfQueuedBuffers() {
	ResultSubpartitionView view = subpartitionView;

	if (view != null) {
		return view.unsynchronizedGetNumberOfQueuedBuffers();
	}

	return 0;
}
 
Example #19
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Releases the partition reader.
 */
@Override
void releaseAllResources() throws IOException {
	if (!isReleased) {
		isReleased = true;

		ResultSubpartitionView view = subpartitionView;
		if (view != null) {
			view.releaseAllResources();
			subpartitionView = null;
		}
	}
}
 
Example #20
Source File: LocalInputChannel.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private ResultSubpartitionView checkAndWaitForSubpartitionView() {
	// synchronizing on the request lock means this blocks until the asynchronous request
	// for the partition view has been completed
	// by then the subpartition view is visible or the channel is released
	synchronized (requestLock) {
		checkState(!isReleased, "released");
		checkState(subpartitionView != null, "Queried for a buffer before requesting the subpartition.");
		return subpartitionView;
	}
}
 
Example #21
Source File: LocalInputChannel.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Releases the partition reader.
 */
@Override
void releaseAllResources() throws IOException {
	if (!isReleased) {
		isReleased = true;

		ResultSubpartitionView view = subpartitionView;
		if (view != null) {
			view.releaseAllResources();
			subpartitionView = null;
		}
	}
}
 
Example #22
Source File: PartitionRequestQueueTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
 * verifying the reader would be enqueued in the pipeline if the next sending buffer is an event
 * even though it has no available credits.
 */
@Test
public void testEnqueueReaderByNotifyingEventBuffer() throws Exception {
	// setup
	final ResultSubpartitionView view = new NextIsEventResultSubpartitionView();

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);

	// block the channel so that we see an intermediate state in the test
	ByteBuf channelBlockingBuffer = blockChannel(channel);
	assertNull(channel.readOutbound());

	// Notify an available event buffer to trigger enqueue the reader
	reader.notifyDataAvailable();

	channel.runPendingTasks();

	// The reader is enqueued in the pipeline because the next buffer is an event, even though no credits are available
	assertThat(queue.getAvailableReaders(), contains(reader)); // contains only (this) one!
	assertEquals(0, reader.getNumCreditsAvailable());

	// Flush the buffer to make the channel writable again and see the final results
	channel.flush();
	assertSame(channelBlockingBuffer, channel.readOutbound());

	assertEquals(0, queue.getAvailableReaders().size());
	assertEquals(0, reader.getNumCreditsAvailable());
	assertNull(channel.readOutbound());
}
 
Example #23
Source File: LocalInputChannel.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException {
	checkError();

	ResultSubpartitionView subpartitionView = this.subpartitionView;
	if (subpartitionView == null) {
		// There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local
		// channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When
		// they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or
		// during) it was released during reading the EndOfPartitionEvent (2).
		if (isReleased) {
			return Optional.empty();
		}

		// this can happen if the request for the partition was triggered asynchronously
		// by the time trigger
		// would be good to avoid that, by guaranteeing that the requestPartition() and
		// getNextBuffer() always come from the same thread
		// we could do that by letting the timer insert a special "requesting channel" into the input gate's queue
		subpartitionView = checkAndWaitForSubpartitionView();
	}

	BufferAndBacklog next = subpartitionView.getNextBuffer();

	if (next == null) {
		if (subpartitionView.isReleased()) {
			throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released.");
		} else {
			return Optional.empty();
		}
	}

	numBytesIn.inc(next.buffer().getSizeUnsafe());
	numBuffersIn.inc();
	return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog()));
}
 
Example #24
Source File: LocalInputChannelTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static ResultSubpartitionView createResultSubpartitionView(boolean addBuffer) throws IOException {
	int bufferSize = 4096;
	ResultPartition parent = PartitionTestUtils.createPartition(
		ResultPartitionType.PIPELINED,
		NoOpFileChannelManager.INSTANCE,
		true,
		bufferSize);
	ResultSubpartition subpartition = parent.getAllPartitions()[0];
	if (addBuffer) {
		subpartition.add(BufferBuilderTestUtils.createFilledFinishedBufferConsumer(bufferSize));
	}
	return subpartition.createReadView(() -> {});
}
 
Example #25
Source File: LocalInputChannelTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that buffer is not compressed when getting from a {@link LocalInputChannel}.
 */
@Test
public void testGetBufferFromLocalChannelWhenCompressionEnabled() throws Exception {
	ResultSubpartitionView subpartitionView = createResultSubpartitionView(true);
	TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView);
	LocalInputChannel channel = createLocalInputChannel(new SingleInputGateBuilder().build(), partitionManager);

	// request partition and get next buffer
	channel.requestSubpartition(0);
	Optional<InputChannel.BufferAndAvailability> bufferAndAvailability = channel.getNextBuffer();
	assertTrue(bufferAndAvailability.isPresent());
	assertFalse(bufferAndAvailability.get().buffer().isCompressed());
}
 
Example #26
Source File: SingleInputGateTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ResultSubpartitionView createSubpartitionView(
		ResultPartitionID partitionId,
		int subpartitionIndex,
		BufferAvailabilityListener availabilityListener) throws IOException {
	++counter;
	return subpartitionView;
}
 
Example #27
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
Optional<BufferAndAvailability> getNextBuffer() throws IOException, InterruptedException {
	checkError();

	ResultSubpartitionView subpartitionView = this.subpartitionView;
	if (subpartitionView == null) {
		// There is a possible race condition between writing a EndOfPartitionEvent (1) and flushing (3) the Local
		// channel on the sender side, and reading EndOfPartitionEvent (2) and processing flush notification (4). When
		// they happen in that order (1 - 2 - 3 - 4), flush notification can re-enqueue LocalInputChannel after (or
		// during) it was released during reading the EndOfPartitionEvent (2).
		if (isReleased) {
			return Optional.empty();
		}

		// this can happen if the request for the partition was triggered asynchronously
		// by the time trigger
		// would be good to avoid that, by guaranteeing that the requestPartition() and
		// getNextBuffer() always come from the same thread
		// we could do that by letting the timer insert a special "requesting channel" into the input gate's queue
		subpartitionView = checkAndWaitForSubpartitionView();
	}

	BufferAndBacklog next = subpartitionView.getNextBuffer();

	if (next == null) {
		if (subpartitionView.isReleased()) {
			throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released.");
		} else {
			return Optional.empty();
		}
	}

	numBytesIn.inc(next.buffer().getSize());
	numBuffersIn.inc();
	return Optional.of(new BufferAndAvailability(next.buffer(), next.isMoreAvailable(), next.buffersInBacklog()));
}
 
Example #28
Source File: LocalInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
private ResultSubpartitionView checkAndWaitForSubpartitionView() {
	// synchronizing on the request lock means this blocks until the asynchronous request
	// for the partition view has been completed
	// by then the subpartition view is visible or the channel is released
	synchronized (requestLock) {
		checkState(!isReleased, "released");
		checkState(subpartitionView != null, "Queried for a buffer before requesting the subpartition.");
		return subpartitionView;
	}
}
 
Example #29
Source File: SingleInputGateTest.java    From flink with Apache License 2.0 4 votes vote down vote up
public TestingResultPartitionManager(ResultSubpartitionView subpartitionView) {
	this.subpartitionView = subpartitionView;
}
 
Example #30
Source File: PartitionRequestQueueTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
 * verifying the reader would be enqueued in the pipeline iff it has both available credits and buffers.
 */
@Test
public void testEnqueueReaderByNotifyingBufferAndCredit() throws Exception {
	// setup
	final ResultSubpartitionView view = new DefaultBufferResultSubpartitionView(10);

	ResultPartitionProvider partitionProvider =
		(partitionId, index, availabilityListener) -> view;

	final InputChannelID receiverId = new InputChannelID();
	final PartitionRequestQueue queue = new PartitionRequestQueue();
	final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
	final EmbeddedChannel channel = new EmbeddedChannel(queue);

	reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
	queue.notifyReaderCreated(reader);

	// block the channel so that we see an intermediate state in the test
	ByteBuf channelBlockingBuffer = blockChannel(channel);
	assertNull(channel.readOutbound());

	// Notify available buffers to trigger enqueue the reader
	final int notifyNumBuffers = 5;
	for (int i = 0; i < notifyNumBuffers; i++) {
		reader.notifyDataAvailable();
	}

	channel.runPendingTasks();

	// the reader is not enqueued in the pipeline because no credits are available
	// -> it should still have the same number of pending buffers
	assertEquals(0, queue.getAvailableReaders().size());
	assertTrue(reader.hasBuffersAvailable());
	assertFalse(reader.isRegisteredAsAvailable());
	assertEquals(0, reader.getNumCreditsAvailable());

	// Notify available credits to trigger enqueue the reader again
	final int notifyNumCredits = 3;
	for (int i = 1; i <= notifyNumCredits; i++) {
		queue.addCreditOrResumeConsumption(receiverId, viewReader -> viewReader.addCredit(1));

		// the reader is enqueued in the pipeline because it has both available buffers and credits
		// since the channel is blocked though, we will not process anything and only enqueue the
		// reader once
		assertTrue(reader.isRegisteredAsAvailable());
		assertThat(queue.getAvailableReaders(), contains(reader)); // contains only (this) one!
		assertEquals(i, reader.getNumCreditsAvailable());
		assertTrue(reader.hasBuffersAvailable());
	}

	// Flush the buffer to make the channel writable again and see the final results
	channel.flush();
	assertSame(channelBlockingBuffer, channel.readOutbound());

	assertEquals(0, queue.getAvailableReaders().size());
	assertEquals(0, reader.getNumCreditsAvailable());
	assertTrue(reader.hasBuffersAvailable());
	assertFalse(reader.isRegisteredAsAvailable());
	for (int i = 1; i <= notifyNumCredits; i++) {
		assertThat(channel.readOutbound(), instanceOf(NettyMessage.BufferResponse.class));
	}
	assertNull(channel.readOutbound());
}