Java Code Examples for org.apache.flink.runtime.io.network.buffer.BufferConsumer#close()

The following examples show how to use org.apache.flink.runtime.io.network.buffer.BufferConsumer#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ResultPartition.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public boolean addBufferConsumer(
		BufferConsumer bufferConsumer,
		int subpartitionIndex,
		boolean isPriorityEvent) throws IOException {
	checkNotNull(bufferConsumer);

	ResultSubpartition subpartition;
	try {
		checkInProduceState();
		subpartition = subpartitions[subpartitionIndex];
	}
	catch (Exception ex) {
		bufferConsumer.close();
		throw ex;
	}

	return subpartition.add(bufferConsumer, isPriorityEvent);
}
 
Example 2
Source File: PipelinedSubpartition.java    From flink with Apache License 2.0 6 votes vote down vote up
private boolean add(BufferConsumer bufferConsumer, boolean finish, boolean insertAsHead) {
	checkNotNull(bufferConsumer);

	final boolean notifyDataAvailable;
	synchronized (buffers) {
		if (isFinished || isReleased) {
			bufferConsumer.close();
			return false;
		}

		// Add the bufferConsumer and update the stats
		handleAddingBarrier(bufferConsumer, insertAsHead);
		updateStatistics(bufferConsumer);
		increaseBuffersInBacklog(bufferConsumer);
		notifyDataAvailable = insertAsHead || finish || shouldNotifyDataAvailable();

		isFinished |= finish;
	}

	if (notifyDataAvailable) {
		notifyDataAvailable();
	}

	return true;
}
 
Example 3
Source File: ResultPartition.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public boolean addBufferConsumer(BufferConsumer bufferConsumer, int subpartitionIndex) throws IOException {
	checkNotNull(bufferConsumer);

	ResultSubpartition subpartition;
	try {
		checkInProduceState();
		subpartition = subpartitions[subpartitionIndex];
	}
	catch (Exception ex) {
		bufferConsumer.close();
		throw ex;
	}

	return subpartition.add(bufferConsumer);
}
 
Example 4
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnReleasedPartition(final ResultPartitionType pipelined)
	throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	try {
		ResultPartition partition = createPartition(notifier, pipelined, true);
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		partition.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(any(JobID.class), any(ResultPartitionID.class), any(TaskActions.class));
	}
}
 
Example 5
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnFinishedPartition(final ResultPartitionType pipelined)
	throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	try {
		ResultPartition partition = createPartition(notifier, pipelined, true);
		partition.finish();
		reset(notifier);
		// partition.add() should fail
		partition.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(any(JobID.class), any(ResultPartitionID.class), any(TaskActions.class));
	}
}
 
Example 6
Source File: SpillableSubpartition.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private boolean add(BufferConsumer bufferConsumer, boolean forceFinishRemainingBuffers)
		throws IOException {
	checkNotNull(bufferConsumer);

	synchronized (buffers) {
		if (isFinished || isReleased) {
			bufferConsumer.close();
			return false;
		}

		buffers.add(bufferConsumer);
		// The number of buffers are needed later when creating
		// the read views. If you ever remove this line here,
		// make sure to still count the number of buffers.
		updateStatistics(bufferConsumer);
		increaseBuffersInBacklog(bufferConsumer);

		if (spillWriter != null) {
			spillFinishedBufferConsumers(forceFinishRemainingBuffers);
		}
	}
	return true;
}
 
Example 7
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a released partition.
 *
 * @param spilled
 * 		whether the partition should be spilled to disk (<tt>true</tt>) or not (<tt>false</tt>,
 * 		spillable).
 */
private void testAddOnReleasedPartition(boolean spilled) throws Exception {
	SpillableSubpartition partition = createSubpartition();
	partition.release();
	if (spilled) {
		assertEquals(0, partition.releaseMemory());
	}

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean bufferRecycled;
	try {
		partition.add(buffer);
	} finally {
		bufferRecycled = buffer.isRecycled();
		if (!bufferRecycled) {
			buffer.close();
		}
	}
	if (!bufferRecycled) {
		Assert.fail("buffer not recycled");
	}
	assertEquals(0, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes());
}
 
Example 8
Source File: PipelinedSubpartition.java    From flink with Apache License 2.0 6 votes vote down vote up
private boolean add(BufferConsumer bufferConsumer, boolean finish) {
	checkNotNull(bufferConsumer);

	final boolean notifyDataAvailable;
	synchronized (buffers) {
		if (isFinished || isReleased) {
			bufferConsumer.close();
			return false;
		}

		// Add the bufferConsumer and update the stats
		buffers.add(bufferConsumer);
		updateStatistics(bufferConsumer);
		increaseBuffersInBacklog(bufferConsumer);
		notifyDataAvailable = shouldNotifyDataAvailable() || finish;

		isFinished |= finish;
	}

	if (notifyDataAvailable) {
		notifyDataAvailable();
	}

	return true;
}
 
Example 9
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example 10
Source File: BroadcastRecordWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
public void closeConsumer(KeepingPartitionWriter partitionWriter, int subpartitionIndex, int expectedSize) {
	BufferConsumer bufferConsumer = partitionWriter.getAddedBufferConsumers(subpartitionIndex).get(0);
	Buffer buffer = bufferConsumer.build();
	bufferConsumer.close();
	assertEquals(expectedSize, buffer.getSize());
	buffer.recycleBuffer();
}
 
Example 11
Source File: PipelinedSubpartition.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void release() {
	// view reference accessible outside the lock, but assigned inside the locked scope
	final PipelinedSubpartitionView view;

	synchronized (buffers) {
		if (isReleased) {
			return;
		}

		// Release all available buffers
		for (BufferConsumer buffer : buffers) {
			buffer.close();
		}
		buffers.clear();

		view = readView;
		readView = null;

		// Make sure that no further buffers are added to the subpartition
		isReleased = true;
	}

	LOG.debug("{}: Released {}.", parent.getOwningTaskName(), this);

	if (view != null) {
		view.releaseAllResources();
	}
}
 
Example 12
Source File: BoundedBlockingSubpartition.java    From flink with Apache License 2.0 5 votes vote down vote up
private void writeAndCloseBufferConsumer(BufferConsumer bufferConsumer) throws IOException {
	try {
		final Buffer buffer = bufferConsumer.build();
		try {
			if (canBeCompressed(buffer)) {
				final Buffer compressedBuffer = parent.bufferCompressor.compressToIntermediateBuffer(buffer);
				data.writeBuffer(compressedBuffer);
				if (compressedBuffer != buffer) {
					compressedBuffer.recycleBuffer();
				}
			} else {
				data.writeBuffer(buffer);
			}

			numBuffersAndEventsWritten++;
			if (buffer.isBuffer()) {
				numDataBuffersWritten++;
			}
		}
		finally {
			buffer.recycleBuffer();
		}
	}
	finally {
		bufferConsumer.close();
	}
}
 
Example 13
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example 14
Source File: InputGateConcurrentTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
void addBufferConsumer(BufferConsumer bufferConsumer) throws Exception {
	try {
		Buffer buffer = bufferConsumer.build();
		checkState(bufferConsumer.isFinished(), "Handling of non finished buffers is not yet implemented");
		channel.onBuffer(buffer, seq++, -1);
	}
	finally {
		bufferConsumer.close();
	}
}
 
Example 15
Source File: SpillableSubpartition.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
long spillFinishedBufferConsumers(boolean forceFinishRemainingBuffers) throws IOException {
	long spilledBytes = 0;

	while (!buffers.isEmpty()) {
		BufferConsumer bufferConsumer = buffers.getFirst();
		Buffer buffer = bufferConsumer.build();
		updateStatistics(buffer);
		int bufferSize = buffer.getSize();
		spilledBytes += bufferSize;

		// NOTE we may be in the process of finishing the subpartition where any buffer should
		// be treated as if it was finished!
		if (bufferConsumer.isFinished() || forceFinishRemainingBuffers) {
			if (bufferSize > 0) {
				spillWriter.writeBlock(buffer);
			} else {
				// If we skip a buffer for the spill writer, we need to adapt the backlog accordingly
				decreaseBuffersInBacklog(buffer);
				buffer.recycleBuffer();
			}
			bufferConsumer.close();
			buffers.poll();
		} else {
			// If there is already data, we need to spill it anyway, since we do not get this
			// slice from the buffer consumer again during the next build.
			// BEWARE: by doing so, we increase the actual number of buffers in the spill writer!
			if (bufferSize > 0) {
				spillWriter.writeBlock(buffer);
				increaseBuffersInBacklog(bufferConsumer);
			} else {
				buffer.recycleBuffer();
			}

			return spilledBytes;
		}
	}
	return spilledBytes;
}
 
Example 16
Source File: RecordWriterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public void addBufferConsumer(BufferConsumer bufferConsumer, int targetChannel) throws IOException {
	bufferConsumer.close();
}
 
Example 17
Source File: PipelinedSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link PipelinedSubpartition#release()}.
 *
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean createView) throws Exception {
	PipelinedSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledBufferConsumer(4096);
	BufferConsumer buffer2 = createFilledBufferConsumer(4096);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view first
		ResultSubpartitionView view = null;
		if (createView) {
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	assertEquals(2, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes()); // buffer data is never consumed
}
 
Example 18
Source File: PipelinedSubpartitionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link PipelinedSubpartition#release()}.
 *
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean createView) throws Exception {
	PipelinedSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledBufferConsumer(4096);
	BufferConsumer buffer2 = createFilledBufferConsumer(4096);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view first
		ResultSubpartitionView view = null;
		if (createView) {
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	assertEquals(2, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes()); // buffer data is never consumed
}
 
Example 19
Source File: PipelinedSubpartitionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link PipelinedSubpartition#release()}.
 *
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean createView) throws Exception {
	PipelinedSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledFinishedBufferConsumer(4096);
	BufferConsumer buffer2 = createFilledFinishedBufferConsumer(4096);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view first
		ResultSubpartitionView view = null;
		if (createView) {
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	assertEquals(2, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes()); // buffer data is never consumed
}
 
Example 20
Source File: SpillableSubpartition.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized void release() throws IOException {
	// view reference accessible outside the lock, but assigned inside the locked scope
	final ResultSubpartitionView view;

	synchronized (buffers) {
		if (isReleased) {
			return;
		}

		// Release all available buffers
		for (BufferConsumer buffer : buffers) {
			buffer.close();
		}
		buffers.clear();

		view = readView;

		// No consumer yet, we are responsible to clean everything up. If
		// one is available, the view is responsible is to clean up (see
		// below).
		if (view == null) {

			// TODO This can block until all buffers are written out to
			// disk if a spill is in-progress before deleting the file.
			// It is possibly called from the Netty event loop threads,
			// which can bring down the network.
			if (spillWriter != null) {
				spillWriter.closeAndDelete();
			}
		}

		isReleased = true;
	}

	LOG.debug("{}: Released {}.", parent.getOwningTaskName(), this);

	if (view != null) {
		view.releaseAllResources();
	}
}