Java Code Examples for org.apache.flink.runtime.io.disk.iomanager.IOManager#shutdown()

The following examples show how to use org.apache.flink.runtime.io.disk.iomanager.IOManager#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a spilled partition where adding the
 * write request fails with an exception.
 */
@Test
public void testAddOnSpilledPartitionWithSlowWriter() throws Exception {
	// simulate slow writer by a no-op write operation
	IOManager ioManager = new IOManagerAsyncWithNoOpBufferFileWriter();
	SpillableSubpartition partition = createSubpartition(ioManager);
	assertEquals(0, partition.releaseMemory());

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean bufferRecycled;
	try {
		partition.add(buffer);
	} finally {
		ioManager.shutdown();
		bufferRecycled = buffer.isRecycled();
		if (!bufferRecycled) {
			buffer.close();
		}
	}
	if (bufferRecycled) {
		Assert.fail("buffer recycled before the write operation completed");
	}
	assertEquals(1, partition.getTotalNumberOfBuffers());
	assertEquals(BUFFER_DATA_SIZE, partition.getTotalNumberOfBytes());
}
 
Example 2
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#add(BufferConsumer)} with a spilled partition where adding the
 * write request fails with an exception.
 */
@Test
public void testAddOnSpilledPartitionWithFailingWriter() throws Exception {
	IOManager ioManager = new IOManagerAsyncWithClosedBufferFileWriter();
	SpillableSubpartition partition = createSubpartition(ioManager);
	assertEquals(0, partition.releaseMemory());

	exception.expect(IOException.class);

	BufferConsumer buffer = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	boolean bufferRecycled;
	try {
		partition.add(buffer);
	} finally {
		ioManager.shutdown();
		bufferRecycled = buffer.isRecycled();
		if (!bufferRecycled) {
			buffer.close();
		}
	}
	if (!bufferRecycled) {
		Assert.fail("buffer not recycled");
	}
	assertEquals(0, partition.getTotalNumberOfBuffers());
	assertEquals(0, partition.getTotalNumberOfBytes());
}
 
Example 3
Source File: HashTableTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * This tests the case where no additional partition buffers are used at the point when spilling
 * is triggered, testing that overflow bucket buffers are taken into account when deciding which
 * partition to spill.
 */
@Test
public void testSpillingFreesOnlyOverflowSegments() {
	final IOManager ioMan = new IOManagerAsync();
	
	final TypeSerializer<ByteValue> serializer = ByteValueSerializer.INSTANCE;
	final TypeComparator<ByteValue> buildComparator = new ValueComparator<>(true, ByteValue.class);
	final TypeComparator<ByteValue> probeComparator = new ValueComparator<>(true, ByteValue.class);
	
	@SuppressWarnings("unchecked")
	final TypePairComparator<ByteValue, ByteValue> pairComparator = Mockito.mock(TypePairComparator.class);
	
	try {
		final int pageSize = 32*1024;
		final int numSegments = 34;

		List<MemorySegment> memory = getMemory(numSegments, pageSize);

		MutableHashTable<ByteValue, ByteValue> table = new MutableHashTable<>(
				serializer, serializer, buildComparator, probeComparator,
				pairComparator, memory, ioMan, 1, false);

		table.open(new ByteValueIterator(100000000), new ByteValueIterator(1));
		
		table.close();
		
		checkNoTempFilesRemain(ioMan);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
	finally {
		ioMan.shutdown();
	}
}
 
Example 4
Source File: FileChannelStreamsTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteOutputView() {
	final IOManager ioManager = new IOManagerAsync();
	try {
		MemoryManager memMan = new MemoryManager(4 * 16*1024, 1, 16*1024, MemoryType.HEAP, true);
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
		
		FileChannelOutputView out = new FileChannelOutputView(writer, memMan, memory, memMan.getPageSize());
		new StringValue("Some test text").write(out);
		
		// close for the first time, make sure all memory returns
		out.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		out.close();
		
		// delete, make sure file is removed
		out.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
	finally {
		ioManager.shutdown();
	}
}
 
Example 5
Source File: FileChannelStreamsTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteInputView() {
	final IOManager ioManager = new IOManagerAsync();
	try {
		MemoryManager memMan = new MemoryManager(4 * 16*1024, 1, 16*1024, MemoryType.HEAP, true);
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		
		// add some test data
		try (FileWriter wrt = new FileWriter(channel.getPath())) {
			wrt.write("test data");
		}
		
		BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		FileChannelInputView in = new FileChannelInputView(reader, memMan, memory, 9);
		
		// read just something
		in.readInt();
		
		// close for the first time, make sure all memory returns
		in.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		in.close();
		
		// delete, make sure file is removed
		in.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
	finally {
		ioManager.shutdown();
	}
}
 
Example 6
Source File: HashTableTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the MutableHashTable spills its partitions when creating the initial table
 * without overflow segments in the partitions. This means that the records are large.
 */
@Test
public void testSpillingWhenBuildingTableWithoutOverflow() throws Exception {
	final IOManager ioMan = new IOManagerAsync();

	try {
		final TypeSerializer<byte[]> serializer = BytePrimitiveArraySerializer.INSTANCE;
		final TypeComparator<byte[]> buildComparator = new BytePrimitiveArrayComparator(true);
		final TypeComparator<byte[]> probeComparator = new BytePrimitiveArrayComparator(true);

		@SuppressWarnings("unchecked") final TypePairComparator<byte[], byte[]> pairComparator =
			new GenericPairComparator<>(
				new BytePrimitiveArrayComparator(true), new BytePrimitiveArrayComparator(true));

		final int pageSize = 128;
		final int numSegments = 33;

		List<MemorySegment> memory = getMemory(numSegments, pageSize);

		MutableHashTable<byte[], byte[]> table = new MutableHashTable<byte[], byte[]>(
			serializer,
			serializer,
			buildComparator,
			probeComparator,
			pairComparator,
			memory,
			ioMan,
			1,
			false);

		int numElements = 9;

		table.open(
			new CombiningIterator<byte[]>(
				new ByteArrayIterator(numElements, 128, (byte) 0),
				new ByteArrayIterator(numElements, 128, (byte) 1)),
			new CombiningIterator<byte[]>(
				new ByteArrayIterator(1, 128, (byte) 0),
				new ByteArrayIterator(1, 128, (byte) 1)));

		while (table.nextRecord()) {
			MutableObjectIterator<byte[]> iterator = table.getBuildSideIterator();

			int counter = 0;

			while (iterator.next() != null) {
				counter++;
			}

			// check that we retrieve all our elements
			Assert.assertEquals(numElements, counter);
		}

		table.close();
	} finally {
		ioMan.shutdown();
	}
}
 
Example 7
Source File: HashTablePerformanceComparison.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testMutableHashMapPerformance() {
	final IOManager ioManager = new IOManagerAsync();
	try {
		final int NUM_MEM_PAGES = SIZE * NUM_PAIRS / PAGE_SIZE;
		
		MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_PAIRS, 1, false);

		MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(0, 1, false);
		
		MutableObjectIterator<IntPair> probeTester = new UniformIntPairGenerator(NUM_PAIRS, 1, false);
		
		MutableObjectIterator<IntPair> updater = new UniformIntPairGenerator(NUM_PAIRS, 1, false);

		MutableObjectIterator<IntPair> updateTester = new UniformIntPairGenerator(NUM_PAIRS, 1, false);
		
		long start;
		long end;
		
		long first = System.currentTimeMillis();
		
		System.out.println("Creating and filling MutableHashMap...");
		start = System.currentTimeMillis();
		MutableHashTable<IntPair, IntPair> table = new MutableHashTable<IntPair, IntPair>(serializer, serializer, comparator, comparator, pairComparator, getMemory(NUM_MEM_PAGES, PAGE_SIZE), ioManager);				
		table.open(buildInput, probeInput);
		end = System.currentTimeMillis();
		System.out.println("HashMap ready. Time: " + (end-start) + " ms");
		
		System.out.println("Starting first probing run...");
		start = System.currentTimeMillis();
		IntPair compare = new IntPair();
		HashBucketIterator<IntPair, IntPair> iter;
		IntPair target = new IntPair(); 
		while(probeTester.next(compare) != null) {
			iter = table.getMatchesFor(compare);
			iter.next(target);
			assertEquals(target.getKey(), compare.getKey());
			assertEquals(target.getValue(), compare.getValue());
			assertTrue(iter.next(target) == null);
		}
		end = System.currentTimeMillis();
		System.out.println("Probing done. Time: " + (end-start) + " ms");

		System.out.println("Starting update...");
		start = System.currentTimeMillis();
		while(updater.next(compare) != null) {
			compare.setValue(compare.getValue() + 1);
			iter = table.getMatchesFor(compare);
			iter.next(target);
			iter.writeBack(compare);
			//assertFalse(iter.next(target));
		}
		end = System.currentTimeMillis();
		System.out.println("Update done. Time: " + (end-start) + " ms");
		
		System.out.println("Starting second probing run...");
		start = System.currentTimeMillis();
		while(updateTester.next(compare) != null) {
			compare.setValue(compare.getValue() + 1);
			iter = table.getMatchesFor(compare);
			iter.next(target);
			assertEquals(target.getKey(), compare.getKey());
			assertEquals(target.getValue(), compare.getValue());
			assertTrue(iter.next(target) == null);
		}
		end = System.currentTimeMillis();
		System.out.println("Probing done. Time: " + (end-start) + " ms");
		
		table.close();
		
		end = System.currentTimeMillis();
		System.out.println("Overall time: " + (end-first) + " ms");
		
		assertEquals("Memory lost", NUM_MEM_PAGES, table.getFreedMemory().size());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail("Error: " + e.getMessage());
	} finally {
		ioManager.shutdown();
	}
}
 
Example 8
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests {@link SpillableSubpartition#releaseMemory()} with a spillable partition which has a a
 * writer that does not do any write to check for correct buffer recycling.
 */
private void testReleaseOnSpillablePartitionWithSlowWriter(boolean createView) throws Exception {
	// simulate slow writer by a no-op write operation
	IOManager ioManager = new IOManagerAsyncWithNoOpBufferFileWriter();
	SpillableSubpartition partition = createSubpartition(ioManager);

	BufferConsumer buffer1 = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	BufferConsumer buffer2 = createFilledBufferConsumer(BUFFER_DATA_SIZE, BUFFER_DATA_SIZE);
	try {
		// we need two buffers because the view will use one of them and not release it
		partition.add(buffer1);
		partition.add(buffer2);
		assertFalse("buffer1 should not be recycled (still in the queue)", buffer1.isRecycled());
		assertFalse("buffer2 should not be recycled (still in the queue)", buffer2.isRecycled());
		assertEquals(2, partition.getTotalNumberOfBuffers());
		assertEquals(0, partition.getTotalNumberOfBytes()); // only updated when buffers are consumed or spilled

		if (createView) {
			// Create a read view
			partition.finish();
			partition.createReadView(new NoOpBufferAvailablityListener());
			assertEquals(0, partition.getTotalNumberOfBytes()); // only updated when buffers are consumed or spilled
		}

		// one instance of the buffers is placed in the view's nextBuffer and not released
		// (if there is no view, there will be no additional EndOfPartitionEvent)
		assertEquals(2, partition.releaseMemory());
		assertFalse("buffer1 should not be recycled (advertised as nextBuffer)", buffer1.isRecycled());
		assertFalse("buffer2 should not be recycled (not written yet)", buffer2.isRecycled());
	} finally {
		ioManager.shutdown();
		if (!buffer1.isRecycled()) {
			buffer1.close();
		}
		if (!buffer2.isRecycled()) {
			buffer2.close();
		}
	}
	// note: a view requires a finished partition which has an additional EndOfPartitionEvent
	assertEquals(2 + (createView ? 1 : 0), partition.getTotalNumberOfBuffers());
	// with a view, one buffer remains in nextBuffer and is not counted yet
	assertEquals(BUFFER_DATA_SIZE + (createView ? 4 : BUFFER_DATA_SIZE), partition.getTotalNumberOfBytes());
}