Java Code Examples for org.apache.flink.runtime.io.disk.iomanager.FileIOChannel#ID

The following examples show how to use org.apache.flink.runtime.io.disk.iomanager.FileIOChannel#ID . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LongHashPartition.java    From flink with Apache License 2.0 6 votes vote down vote up
int spillPartition(IOManager ioAccess, FileIOChannel.ID targetChannel,
		LinkedBlockingQueue<MemorySegment> bufferReturnQueue) throws IOException {
	// sanity checks
	if (!isInMemory()) {
		throw new RuntimeException("Bug in Hybrid Hash Join: " +
				"Request to spill a partition that has already been spilled.");
	}
	if (getNumOccupiedMemorySegments() < 2) {
		throw new RuntimeException("Bug in Hybrid Hash Join: " +
				"Request to spill a partition with less than two buffers.");
	}

	// create the channel block writer and spill the current buffers
	// that keep the build side buffers current block, as it is most likely not full, yet
	// we return the number of blocks that become available
	this.buildSideChannel = FileChannelUtil.createBlockChannelWriter(
			ioAccess,
			targetChannel,
			bufferReturnQueue,
			longTable.compressionEnable(),
			longTable.compressionCodecFactory(),
			longTable.compressionBlockSize(),
			segmentSize);
	return this.buildSideWriteBuffer.spill(this.buildSideChannel);
}
 
Example 2
Source File: ReOpenableHashPartition.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Spills this partition to disk. This method is invoked once after the initial open() method
 * 
 * @return Number of memorySegments in the writeBehindBuffers!
 */
int spillInMemoryPartition(FileIOChannel.ID targetChannel, IOManager ioManager, LinkedBlockingQueue<MemorySegment> writeBehindBuffers) throws IOException {
	this.initialPartitionBuffersCount = partitionBuffers.length; // for ReOpenableHashMap
	this.initialBuildSideChannel = targetChannel;
	
	initialBuildSideWriter = ioManager.createBlockChannelWriter(targetChannel, writeBehindBuffers);
	
	final int numSegments = this.partitionBuffers.length;
	for (int i = 0; i < numSegments; i++) {
		initialBuildSideWriter.writeBlock(partitionBuffers[i]);
	}
	this.partitionBuffers = null;
	initialBuildSideWriter.close();
	// num partitions are now in the writeBehindBuffers. We propagate this information back
	return numSegments;
	
}
 
Example 3
Source File: BaseHybridHashTable.java    From flink with Apache License 2.0 6 votes vote down vote up
protected List<MemorySegment> readAllBuffers(FileIOChannel.ID id, int blockCount) throws IOException {
	// we are guaranteed to stay in memory
	ensureNumBuffersReturned(blockCount);

	LinkedBlockingQueue<MemorySegment> retSegments = new LinkedBlockingQueue<>();
	BlockChannelReader<MemorySegment> reader = FileChannelUtil.createBlockChannelReader(
			ioManager, id, retSegments,
			compressionEnable, compressionCodecFactory, compressionBlockSize, segmentSize);
	for (int i = 0; i < blockCount; i++) {
		reader.readBlock(availableMemory.remove(availableMemory.size() - 1));
	}
	reader.closeAndDelete();

	final List<MemorySegment> buffers = new ArrayList<>();
	retSegments.drainTo(buffers);
	return buffers;
}
 
Example 4
Source File: HashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Spills this partition to disk and sets it up such that it continues spilling records that are added to
 * it. The spilling process must free at least one buffer, either in the partition's record buffers, or in
 * the memory segments for overflow buckets.
 * The partition immediately takes back one buffer to use it for further spilling.
 * 
 * @param target The list to which memory segments from overflow buckets are added.
 * @param ioAccess The I/O manager to be used to create a writer to disk.
 * @param targetChannel The id of the target channel for this partition.
 * @return The number of buffers that were freed by spilling this partition.
 * @throws IOException Thrown, if the writing failed.
 */
public int spillPartition(List<MemorySegment> target, IOManager ioAccess, FileIOChannel.ID targetChannel,
		LinkedBlockingQueue<MemorySegment> bufferReturnQueue)
throws IOException
{
	// sanity checks
	if (!isInMemory()) {
		throw new RuntimeException("Bug in Hybrid Hash Join: " +
				"Request to spill a partition that has already been spilled.");
	}
	if (getNumOccupiedMemorySegments() < 2) {
		throw new RuntimeException("Bug in Hybrid Hash Join: " +
			"Request to spill a partition with less than two buffers.");
	}
	
	// return the memory from the overflow segments
	for (int i = 0; i < this.numOverflowSegments; i++) {
		target.add(this.overflowSegments[i]);
	}
	this.overflowSegments = null;
	this.numOverflowSegments = 0;
	this.nextOverflowBucket = 0;
	
	// create the channel block writer and spill the current buffers
	// that keep the build side buffers current block, as it is most likely not full, yet
	// we return the number of blocks that become available
	this.buildSideChannel = ioAccess.createBlockChannelWriter(targetChannel, bufferReturnQueue);
	return this.buildSideWriteBuffer.spill(this.buildSideChannel);
}
 
Example 5
Source File: FileChannelStreamsTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteInputView() {
	try (IOManager ioManager = new IOManagerAsync()) {
		MemoryManager memMan = MemoryManagerBuilder.newBuilder().build();
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		
		// add some test data
		try (FileWriter wrt = new FileWriter(channel.getPath())) {
			wrt.write("test data");
		}
		
		BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		FileChannelInputView in = new FileChannelInputView(reader, memMan, memory, 9);
		
		// read just something
		in.readInt();
		
		// close for the first time, make sure all memory returns
		in.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		in.close();
		
		// delete, make sure file is removed
		in.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 6
Source File: CompressedHeaderlessChannelTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCompressedView() throws IOException {
	for (int testTime = 0; testTime < 10; testTime++) {
		int testRounds = new Random().nextInt(20000);
		FileIOChannel.ID channel = ioManager.createChannel();
		BufferFileWriter writer = this.ioManager.createBufferFileWriter(channel);
		CompressedHeaderlessChannelWriterOutputView outputView =
				new CompressedHeaderlessChannelWriterOutputView(
						writer,
						compressionFactory,
						BUFFER_SIZE
				);

		for (int i = 0; i < testRounds; i++) {
			outputView.writeInt(i);
		}
		outputView.close();
		int blockCount = outputView.getBlockCount();

		CompressedHeaderlessChannelReaderInputView inputView =
				new CompressedHeaderlessChannelReaderInputView(
						channel,
						ioManager,
						compressionFactory,
						BUFFER_SIZE,
						blockCount
				);

		for (int i = 0; i < testRounds; i++) {
			assertEquals(i, inputView.readInt());
		}
		inputView.close();
	}
}
 
Example 7
Source File: FileChannelStreamsTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteInputView() {
	final IOManager ioManager = new IOManagerAsync();
	try {
		MemoryManager memMan = new MemoryManager(4 * 16*1024, 1, 16*1024, MemoryType.HEAP, true);
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		
		// add some test data
		try (FileWriter wrt = new FileWriter(channel.getPath())) {
			wrt.write("test data");
		}
		
		BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		FileChannelInputView in = new FileChannelInputView(reader, memMan, memory, 9);
		
		// read just something
		in.readInt();
		
		// close for the first time, make sure all memory returns
		in.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		in.close();
		
		// delete, make sure file is removed
		in.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
	finally {
		ioManager.shutdown();
	}
}
 
Example 8
Source File: ResettableExternalBuffer.java    From flink with Apache License 2.0 5 votes vote down vote up
private void spill() throws IOException {
	FileIOChannel.ID channel = ioManager.createChannel();

	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	int numRecordBuffers = inMemoryBuffer.getNumRecordBuffers();
	ArrayList<MemorySegment> segments = inMemoryBuffer.getRecordBufferSegments();
	try {
		// spill in memory buffer in zero-copy.
		for (int i = 0; i < numRecordBuffers; i++) {
			writer.writeBlock(segments.get(i));
		}
		LOG.info("here spill the reset buffer data with {} bytes", writer.getSize());
		writer.close();
	} catch (IOException e) {
		writer.closeAndDelete();
		throw e;
	}

	spillSize += numRecordBuffers * segmentSize;
	spilledChannelIDs.add(new ChannelWithMeta(
		channel,
		inMemoryBuffer.getNumRecordBuffers(),
		inMemoryBuffer.getNumBytesInLastBuffer()));
	this.spilledChannelRowOffsets.add(numRows);

	inMemoryBuffer.reset();
}
 
Example 9
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public BufferFileWriter createBufferFileWriter(FileIOChannel.ID channelID) throws IOException {
	blockLatch.countDown();
	try {
		doneLatch.await();
	} catch (InterruptedException e) {
		throw new IOException("Blocking operation was interrupted.", e);
	}

	return super.createBufferFileWriter(channelID);
}
 
Example 10
Source File: FixedLengthRecordSorterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testFlushPartialMemoryPage() throws Exception {
	// Insert IntPair which would fill 2 memory pages.
	final int NUM_RECORDS = 2 * MEMORY_PAGE_SIZE / 8;
	final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), 3);

	FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
	UniformIntPairGenerator generator = new UniformIntPairGenerator(Integer.MAX_VALUE, 1, false);

	// write the records
	IntPair record = new IntPair();
	int num = -1;
	do {
		generator.next(record);
		num++;
	}
	while (sorter.write(record) && num < NUM_RECORDS);

	FileIOChannel.ID channelID = this.ioManager.createChannelEnumerator().next();
	BlockChannelWriter<MemorySegment> blockChannelWriter = this.ioManager.createBlockChannelWriter(channelID);
	final List<MemorySegment> writeBuffer = this.memoryManager.allocatePages(new DummyInvokable(), 3);
	ChannelWriterOutputView outputView = new ChannelWriterOutputView(blockChannelWriter, writeBuffer, writeBuffer.get(0).size());

	sorter.writeToOutput(outputView, 1, NUM_RECORDS - 1);

	this.memoryManager.release(outputView.close());

	BlockChannelReader<MemorySegment> blockChannelReader = this.ioManager.createBlockChannelReader(channelID);
	final List<MemorySegment> readBuffer = this.memoryManager.allocatePages(new DummyInvokable(), 3);
	ChannelReaderInputView readerInputView = new ChannelReaderInputView(blockChannelReader, readBuffer, false);
	final List<MemorySegment> dataBuffer = this.memoryManager.allocatePages(new DummyInvokable(), 3);
	ChannelReaderInputViewIterator<IntPair> iterator = new ChannelReaderInputViewIterator(readerInputView, dataBuffer, this.serializer);

	record = iterator.next(record);
	int i =1;
	while (record != null) {
		Assert.assertEquals(i, record.getKey());
		record = iterator.next(record);
		i++;
	}

	Assert.assertEquals(NUM_RECORDS, i);

	this.memoryManager.release(dataBuffer);
	// release the memory occupied by the buffers
	sorter.dispose();
	this.memoryManager.release(memory);
}
 
Example 11
Source File: SpillChannelManager.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Add a new File channel.
 */
public synchronized void addChannel(FileIOChannel.ID id) {
	checkArgument(!closed);
	channels.add(id);
}
 
Example 12
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadSmallRecords() {
	try {
		List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 13
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadOneBufferOnly() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), 1);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), 1);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 14
Source File: SpillChannelManager.java    From flink with Apache License 2.0 4 votes vote down vote up
public synchronized void removeChannel(FileIOChannel.ID id) {
	checkArgument(!closed);
	channels.remove(id);
}
 
Example 15
Source File: ChannelViewsTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadSmallRecords() throws Exception
{
	final TestData.TupleGenerator generator = new TestData.TupleGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
	final FileIOChannel.ID channel = this.ioManager.createChannel();
	final TypeSerializer<Tuple2<Integer, String>> serializer = TestData.getIntStringTupleSerializer();
	
	// create the writer output view
	List<MemorySegment> memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	final ChannelWriterOutputView outView = new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
	// write a number of pairs
	final Tuple2<Integer, String> rec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.serialize(rec, outView);
	}
	this.memoryManager.release(outView.close());
	
	// create the reader input view
	memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
	final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channel);
	final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
	generator.reset();
	
	// read and re-generate all records and compare them
	final Tuple2<Integer, String> readRec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.deserialize(readRec, inView);
		
		int k1 = rec.f0;
		String v1 = rec.f1;
		
		int k2 = readRec.f0;
		String v2 = readRec.f1;
		
		Assert.assertTrue("The re-generated and the read record do not match.", k1 == k2 && v1.equals(v2));
	}
	
	this.memoryManager.release(inView.close());
	reader.deleteChannel();
}
 
Example 16
Source File: SpillChannelManager.java    From flink with Apache License 2.0 4 votes vote down vote up
public synchronized void removeChannel(FileIOChannel.ID id) {
	checkArgument(!closed);
	channels.remove(id);
}
 
Example 17
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testReadTooMany() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);

		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();

		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();

		// read and re-generate all records and compare them
		try {
			Pair readPair = new Pair();
			for (int i = 0; i < NUM_PAIRS_SHORT + 1; i++) {
				generator.next(pair);
				readPair.read(inView);
				assertEquals("The re-generated and the read record do not match.", pair, readPair);
			}
			fail("Expected an EOFException which did not occur.");
		}
		catch (EOFException eofex) {
			// expected
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 18
Source File: FileChannelStreamsITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadNotAll() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT / 2; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 19
Source File: ChannelViewsTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadOneBufferOnly() throws Exception
{
	final TestData.TupleGenerator generator = new TestData.TupleGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
	final FileIOChannel.ID channel = this.ioManager.createChannel();
	final TypeSerializer<Tuple2<Integer, String>> serializer = TestData.getIntStringTupleSerializer();
	
	// create the writer output view
	List<MemorySegment> memory = this.memoryManager.allocatePages(this.parentTask, 1);
	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	final ChannelWriterOutputView outView = new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
	
	// write a number of pairs
	final Tuple2<Integer, String> rec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.serialize(rec, outView);
	}
	this.memoryManager.release(outView.close());
	
	// create the reader input view
	memory = this.memoryManager.allocatePages(this.parentTask, 1);
	final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channel);
	final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
	generator.reset();
	
	// read and re-generate all records and compare them
	final Tuple2<Integer, String> readRec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.deserialize(readRec, inView);
		
		int k1 = rec.f0;
		String v1 = rec.f1;
		
		int k2 = readRec.f0;
		String v2 = readRec.f1;
		
		Assert.assertTrue("The re-generated and the read record do not match.", k1 == k2 && v1.equals(v2));
	}
	
	this.memoryManager.release(inView.close());
	reader.deleteChannel();
}
 
Example 20
Source File: ChannelViewsTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testReadWithoutKnownBlockCount() throws Exception
{
	final TestData.TupleGenerator generator = new TestData.TupleGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
	final FileIOChannel.ID channel = this.ioManager.createChannel();
	final TypeSerializer<Tuple2<Integer, String>> serializer = TestData.getIntStringTupleSerializer();
	
	// create the writer output view
	List<MemorySegment> memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	final ChannelWriterOutputView outView = new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);
	
	// write a number of pairs
	final Tuple2<Integer, String> rec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.serialize(rec, outView);
	}
	this.memoryManager.release(outView.close());
	
	// create the reader input view
	memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
	final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channel);
	final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, true);
	generator.reset();
	
	// read and re-generate all records and compare them
	final Tuple2<Integer, String> readRec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.deserialize(readRec, inView);
		
		int k1 = rec.f0;
		String v1 = rec.f1;
		
		int k2 = readRec.f0;
		String v2 = readRec.f1;
		
		Assert.assertTrue("The re-generated and the read record do not match.", k1 == k2 && v1.equals(v2));
	}
	
	this.memoryManager.release(inView.close());
	reader.deleteChannel();
}