Java Code Examples for org.apache.flink.runtime.io.network.buffer.BufferConsumer#isRecycled()

The following examples show how to use org.apache.flink.runtime.io.network.buffer.BufferConsumer#isRecycled() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnPartition(final ResultPartitionType pipelined)
	throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	ResultPartition partition = createPartition(notifier, pipelined, true);
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		partition.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (pipelined.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(
					eq(partition.getJobId()),
					eq(partition.getPartitionId()),
					any(TaskActions.class));
		}
	}
}
 
Example 2
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnPartition(final ResultPartitionType partitionType) throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (partitionType.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(eq(jobId), eq(consumableNotifyingPartitionWriter.getPartitionId()), eq(taskActions));
		}
	}
}
 
Example 3
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer(BufferConsumer, int)} on a working partition.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnPartition(final ResultPartitionType partitionType) throws Exception {
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	try {
		// partition.add() adds the bufferConsumer without recycling it (if not spilling)
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertFalse("bufferConsumer should not be recycled (still in the queue)", bufferConsumer.isRecycled());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
		}
		// should have been notified for pipelined partitions
		if (partitionType.isPipelined()) {
			verify(notifier, times(1))
				.notifyPartitionConsumable(eq(jobId), eq(consumableNotifyingPartitionWriter.getPartitionId()), eq(taskActions));
		}
	}
}
 
Example 4
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a finished partition.
 *
 * @param spilled
 * 		whether the partition should be spilled to disk (<tt>true</tt>) or not (<tt>false</tt>,
 * 		spillable).
 */
private void testAddOnFinishedPartition(boolean spilled) throws Exception {
	SpillableSubpartition partition = createSubpartition();
	if (spilled) {
		assertEquals(0, partition.releaseMemory());
	}
	partition.finish();
	// finish adds an EndOfPartitionEvent
	assertEquals(1, partition.getTotalNumberOfBuffers());
	// if not spilled, statistics are only updated when consuming the buffers
	assertEquals(spilled ? 4 : 0, partition.getTotalNumberOfBytes());

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	try {
		partition.add(buffer);
	} finally {
		if (!buffer.isRecycled()) {
			buffer.close();
			Assert.fail("buffer not recycled");
		}
	}
	// still same statistics
	assertEquals(1, partition.getTotalNumberOfBuffers());
	// if not spilled, statistics are only updated when consuming the buffers
	assertEquals(spilled ? 4 : 0, partition.getTotalNumberOfBytes());
}
 
Example 5
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnReleasedPartition(final ResultPartitionType pipelined)
	throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	try {
		ResultPartition partition = createPartition(notifier, pipelined, true);
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		partition.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(any(JobID.class), any(ResultPartitionID.class), any(TaskActions.class));
	}
}
 
Example 6
Source File: ResultPartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param pipelined the result partition type to set up
 */
protected void testAddOnFinishedPartition(final ResultPartitionType pipelined)
	throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	try {
		ResultPartition partition = createPartition(notifier, pipelined, true);
		partition.finish();
		reset(notifier);
		// partition.add() should fail
		partition.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(any(JobID.class), any(ResultPartitionID.class), any(TaskActions.class));
	}
}
 
Example 7
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a spilled partition where adding the
 * write request fails with an exception.
 */
@Test
public void testAddOnSpilledPartitionWithFailingWriter() throws Exception {
	IOManager ioManager = new IOManagerAsyncWithClosedBufferFileWriter();
	SpillableSubpartition partition = createSubpartition(ioManager);
	assertEquals(0, partition.releaseMemory());

	exception.expect(IOException.class);

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean bufferRecycled;
	try {
		partition.add(buffer);
	} finally {
		ioManager.shutdown();
		bufferRecycled = buffer.isRecycled();
		if (!bufferRecycled) {
			buffer.close();
		}
	}
	if (!bufferRecycled) {
		Assert.fail("buffer not recycled");
	}
	assertEquals(0, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes());
}
 
Example 8
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a spilled partition where adding the
 * write request fails with an exception.
 */
@Test
public void testAddOnSpilledPartitionWithSlowWriter() throws Exception {
	// simulate slow writer by a no-op write operation
	IOManager ioManager = new IOManagerAsyncWithNoOpBufferFileWriter();
	SpillableSubpartition partition = createSubpartition(ioManager);
	assertEquals(0, partition.releaseMemory());

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean bufferRecycled;
	try {
		partition.add(buffer);
	} finally {
		ioManager.shutdown();
		bufferRecycled = buffer.isRecycled();
		if (!bufferRecycled) {
			buffer.close();
		}
	}
	if (bufferRecycled) {
		Assert.fail("buffer recycled before the write operation completed");
	}
	assertEquals(1, partition.getTotalNumberOfBuffers());
	assertEquals(BUFFER_DATA_SIZE, partition.getTotalNumberOfBytes());
}
 
Example 9
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a released partition.
 *
 * @param spilled
 * 		whether the partition should be spilled to disk (<tt>true</tt>) or not (<tt>false</tt>,
 * 		spillable).
 */
private void testAddOnReleasedPartition(boolean spilled) throws Exception {
	SpillableSubpartition partition = createSubpartition();
	partition.release();
	if (spilled) {
		assertEquals(0, partition.releaseMemory());
	}

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean bufferRecycled;
	try {
		partition.add(buffer);
	} finally {
		bufferRecycled = buffer.isRecycled();
		if (!bufferRecycled) {
			buffer.close();
		}
	}
	if (!bufferRecycled) {
		Assert.fail("buffer not recycled");
	}
	assertEquals(0, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes());
}
 
Example 10
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example 11
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example 12
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already finished.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnFinishedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartitionWriter consumableNotifyingPartitionWriter = createConsumableNotifyingResultPartitionWriter(
		partitionType,
		taskActions,
		jobId,
		notifier);
	try {
		consumableNotifyingPartitionWriter.finish();
		reset(notifier);
		// partition.add() should fail
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		Assert.fail("exception expected");
	} catch (IllegalStateException e) {
		// expected => ignored
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(
			eq(jobId),
			eq(consumableNotifyingPartitionWriter.getPartitionId()),
			eq(taskActions));
	}
}
 
Example 13
Source File: ResultPartitionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link ResultPartition#addBufferConsumer} on a partition which has already been released.
 *
 * @param partitionType the result partition type to set up
 */
private void testAddOnReleasedPartition(final ResultPartitionType partitionType) throws Exception {
	BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
	ResultPartitionConsumableNotifier notifier = mock(ResultPartitionConsumableNotifier.class);
	JobID jobId = new JobID();
	TaskActions taskActions = new NoOpTaskActions();
	ResultPartition partition = partitionType == ResultPartitionType.BLOCKING ?
		createPartition(partitionType, fileChannelManager) : createPartition(partitionType);
	ResultPartitionWriter consumableNotifyingPartitionWriter = ConsumableNotifyingResultPartitionWriterDecorator.decorate(
		Collections.singleton(PartitionTestUtils.createPartitionDeploymentDescriptor(partitionType)),
		new ResultPartitionWriter[] {partition},
		taskActions,
		jobId,
		notifier)[0];
	try {
		partition.release();
		// partition.add() silently drops the bufferConsumer but recycles it
		consumableNotifyingPartitionWriter.addBufferConsumer(bufferConsumer, 0);
		assertTrue(partition.isReleased());
	} finally {
		if (!bufferConsumer.isRecycled()) {
			bufferConsumer.close();
			Assert.fail("bufferConsumer not recycled");
		}
		// should not have notified either
		verify(notifier, never()).notifyPartitionConsumable(eq(jobId), eq(partition.getPartitionId()), eq(taskActions));
	}
}
 
Example 14
Source File: PipelinedSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link PipelinedSubpartition#release()}.
 *
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean createView) throws Exception {
	PipelinedSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledBufferConsumer(4096);
	BufferConsumer buffer2 = createFilledBufferConsumer(4096);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view first
		ResultSubpartitionView view = null;
		if (createView) {
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	assertEquals(2, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes()); // buffer data is never consumed
}
 
Example 15
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link SpillableSubpartition#release()}.
 *
 * @param spilled
 * 		whether the partition should be spilled to disk (<tt>true</tt>) or not (<tt>false</tt>,
 * 		spillable)
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean spilled, boolean createView) throws Exception {
	SpillableSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	BufferConsumer buffer2 = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view before spilling
		// (tests both code paths since this view may then contain the spilled view)
		ResultSubpartitionView view = null;
		if (createView) {
			partition.finish();
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}
		if (spilled) {
			// note: in case we create a view, one buffer will already reside in the view and
			//       one EndOfPartitionEvent will be added instead (so overall the number of
			//       buffers to spill is the same
			assertEquals(2, partition.releaseMemory());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	// note: in case we create a view, there will be an additional EndOfPartitionEvent
	assertEquals(createView ? 3 : 2, partition.getTotalNumberOfBuffers());
	if (spilled) {
		// with a view, one buffer remains in nextBuffer and is not counted yet
		assertEquals(BUFFER_DATA_SIZE + (createView ? 4 : BUFFER_DATA_SIZE),
			partition.getTotalNumberOfBytes());
	} else {
		// non-spilled byte statistics are only updated when buffers are consumed
		assertEquals(0, partition.getTotalNumberOfBytes());
	}
}
 
Example 16
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#releaseMemory()} with a spillable partition which has a a
 * writer that does not do any write to check for correct buffer recycling.
 */
private void testReleaseOnSpillablePartitionWithSlowWriter(boolean createView) throws Exception {
	// simulate slow writer by a no-op write operation
	IOManager ioManager = new IOManagerAsyncWithNoOpBufferFileWriter();
	SpillableSubpartition partition = createSubpartition(ioManager);

	BufferConsumer buffer1 = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	BufferConsumer buffer2 = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	try {
		// we need two buffers because the view will use one of them and not release it
		partition.add(buffer1);
		partition.add(buffer2);
		assertFalse("buffer1 should not be recycled (still in the queue)", buffer1.isRecycled());
		assertFalse("buffer2 should not be recycled (still in the queue)", buffer2.isRecycled());
		assertEquals(2, partition.getTotalNumberOfBuffers());
		assertEquals(0, partition.getTotalNumberOfBytes()); // only updated when buffers are consumed or spilled

		if (createView) {
			// Create a read view
			partition.finish();
			partition.createReadView(new NoOpBufferAvailablityListener());
			assertEquals(0, partition.getTotalNumberOfBytes()); // only updated when buffers are consumed or spilled
		}

		// one instance of the buffers is placed in the view's nextBuffer and not released
		// (if there is no view, there will be no additional EndOfPartitionEvent)
		assertEquals(2, partition.releaseMemory());
		assertFalse("buffer1 should not be recycled (advertised as nextBuffer)", buffer1.isRecycled());
		assertFalse("buffer2 should not be recycled (not written yet)", buffer2.isRecycled());
	} finally {
		ioManager.shutdown();
		if (!buffer1.isRecycled()) {
			buffer1.close();
		}
		if (!buffer2.isRecycled()) {
			buffer2.close();
		}
	}
	// note: a view requires a finished partition which has an additional EndOfPartitionEvent
	assertEquals(2 + (createView ? 1 : 0), partition.getTotalNumberOfBuffers());
	// with a view, one buffer remains in nextBuffer and is not counted yet
	assertEquals(BUFFER_DATA_SIZE + (createView ? 4 : BUFFER_DATA_SIZE), partition.getTotalNumberOfBytes());
}
 
Example 17
Source File: PipelinedSubpartitionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link PipelinedSubpartition#release()}.
 *
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean createView) throws Exception {
	PipelinedSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledBufferConsumer(4096);
	BufferConsumer buffer2 = createFilledBufferConsumer(4096);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view first
		ResultSubpartitionView view = null;
		if (createView) {
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	assertEquals(2, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes()); // buffer data is never consumed
}
 
Example 18
Source File: PipelinedSubpartitionTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests cleanup of {@link PipelinedSubpartition#release()}.
 *
 * @param createView
 * 		whether the partition should have a view attached to it (<tt>true</tt>) or not (<tt>false</tt>)
 */
private void testCleanupReleasedPartition(boolean createView) throws Exception {
	PipelinedSubpartition partition = createSubpartition();

	BufferConsumer buffer1 = createFilledFinishedBufferConsumer(4096);
	BufferConsumer buffer2 = createFilledFinishedBufferConsumer(4096);
	boolean buffer1Recycled;
	boolean buffer2Recycled;
	try {
		partition.add(buffer1);
		partition.add(buffer2);
		// create the read view first
		ResultSubpartitionView view = null;
		if (createView) {
			view = partition.createReadView(new NoOpBufferAvailablityListener());
		}

		partition.release();

		assertTrue(partition.isReleased());
		if (createView) {
			assertTrue(view.isReleased());
		}
		assertTrue(buffer1.isRecycled());
	} finally {
		buffer1Recycled = buffer1.isRecycled();
		if (!buffer1Recycled) {
			buffer1.close();
		}
		buffer2Recycled = buffer2.isRecycled();
		if (!buffer2Recycled) {
			buffer2.close();
		}
	}
	if (!buffer1Recycled) {
		Assert.fail("buffer 1 not recycled");
	}
	if (!buffer2Recycled) {
		Assert.fail("buffer 2 not recycled");
	}
	assertEquals(2, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes()); // buffer data is never consumed
}