org.apache.flink.runtime.io.disk.iomanager.BlockChannelWriter Java Examples

The following examples show how to use org.apache.flink.runtime.io.disk.iomanager.BlockChannelWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FileChannelUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
public static BlockChannelWriter<MemorySegment> createBlockChannelWriter(
		IOManager ioManager,
		FileIOChannel.ID channel,
		LinkedBlockingQueue<MemorySegment> bufferReturnQueue,
		boolean compressionEnable,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize,
		int segmentSize) throws IOException {
	if (compressionEnable) {
		return new CompressedBlockChannelWriter(
				ioManager,
				channel,
				bufferReturnQueue,
				compressionCodecFactory,
				compressionBlockSize,
				segmentSize
		);
	} else {
		return ioManager.createBlockChannelWriter(channel, bufferReturnQueue);
	}
}
 
Example #2
Source File: FileChannelUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
public static AbstractChannelWriterOutputView createOutputView(
		IOManager ioManager,
		FileIOChannel.ID channel,
		boolean compressionEnable,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize,
		int segmentSize) throws IOException {
	if (compressionEnable) {
		BufferFileWriter bufferWriter = ioManager.createBufferFileWriter(channel);
		return new CompressedHeaderlessChannelWriterOutputView(
				bufferWriter,
				compressionCodecFactory,
				compressionBlockSize);
	} else {
		BlockChannelWriter<MemorySegment> blockWriter =
				ioManager.createBlockChannelWriter(channel);
		return new HeaderlessChannelWriterOutputView(
				blockWriter,
				Arrays.asList(
						allocateUnpooledSegment(segmentSize),
						allocateUnpooledSegment(segmentSize)
				),
				segmentSize
		);
	}
}
 
Example #3
Source File: FileChannelOutputView.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public FileChannelOutputView(BlockChannelWriter<MemorySegment> writer, MemoryManager memManager, List<MemorySegment> memory, int segmentSize) throws IOException {
	super(segmentSize, 0);
	
	checkNotNull(writer);
	checkNotNull(memManager);
	checkNotNull(memory);
	checkArgument(!writer.isClosed());
	
	this.writer = writer;
	this.memManager = memManager;
	this.memory = memory;
	
	
	for (MemorySegment next : memory) {
		writer.getReturnQueue().add(next);
	}
	
	// move to the first page
	advance();
}
 
Example #4
Source File: FileChannelOutputView.java    From flink with Apache License 2.0 6 votes vote down vote up
public FileChannelOutputView(BlockChannelWriter<MemorySegment> writer, MemoryManager memManager, List<MemorySegment> memory, int segmentSize) throws IOException {
	super(segmentSize, 0);
	
	checkNotNull(writer);
	checkNotNull(memManager);
	checkNotNull(memory);
	checkArgument(!writer.isClosed());
	
	this.writer = writer;
	this.memManager = memManager;
	this.memory = memory;
	
	
	for (MemorySegment next : memory) {
		writer.getReturnQueue().add(next);
	}
	
	// move to the first page
	advance();
}
 
Example #5
Source File: FileChannelOutputView.java    From flink with Apache License 2.0 6 votes vote down vote up
public FileChannelOutputView(BlockChannelWriter<MemorySegment> writer, MemoryManager memManager, List<MemorySegment> memory, int segmentSize) throws IOException {
	super(segmentSize, 0);
	
	checkNotNull(writer);
	checkNotNull(memManager);
	checkNotNull(memory);
	checkArgument(!writer.isClosed());
	
	this.writer = writer;
	this.memManager = memManager;
	this.memory = memory;
	
	
	for (MemorySegment next : memory) {
		writer.getReturnQueue().add(next);
	}
	
	// move to the first page
	advance();
}
 
Example #6
Source File: FileChannelUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
public static AbstractChannelWriterOutputView createOutputView(
		IOManager ioManager,
		FileIOChannel.ID channel,
		boolean compressionEnable,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize,
		int segmentSize) throws IOException {
	if (compressionEnable) {
		BufferFileWriter bufferWriter = ioManager.createBufferFileWriter(channel);
		return new CompressedHeaderlessChannelWriterOutputView(
				bufferWriter,
				compressionCodecFactory,
				compressionBlockSize);
	} else {
		BlockChannelWriter<MemorySegment> blockWriter =
				ioManager.createBlockChannelWriter(channel);
		return new HeaderlessChannelWriterOutputView(
				blockWriter,
				Arrays.asList(
						allocateUnpooledSegment(segmentSize),
						allocateUnpooledSegment(segmentSize)
				),
				segmentSize
		);
	}
}
 
Example #7
Source File: FileChannelUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
public static BlockChannelWriter<MemorySegment> createBlockChannelWriter(
		IOManager ioManager,
		FileIOChannel.ID channel,
		LinkedBlockingQueue<MemorySegment> bufferReturnQueue,
		boolean compressionEnable,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize,
		int segmentSize) throws IOException {
	if (compressionEnable) {
		return new CompressedBlockChannelWriter(
				ioManager,
				channel,
				bufferReturnQueue,
				compressionCodecFactory,
				compressionBlockSize,
				segmentSize
		);
	} else {
		return ioManager.createBlockChannelWriter(channel, bufferReturnQueue);
	}
}
 
Example #8
Source File: HeaderlessChannelWriterOutputView.java    From flink with Apache License 2.0 5 votes vote down vote up
public HeaderlessChannelWriterOutputView(
		BlockChannelWriter<MemorySegment> writer,
		List<MemorySegment> memory,
		int segmentSize) {
	super(segmentSize, 0);

	if (writer == null) {
		throw new NullPointerException();
	}

	this.writer = writer;

	Preconditions.checkNotNull(memory);

	// load the segments into the queue
	final LinkedBlockingQueue<MemorySegment> queue = writer.getReturnQueue();
	for (int i = memory.size() - 1; i >= 0; --i) {
		final MemorySegment seg = memory.get(i);
		if (seg.size() != segmentSize) {
			throw new IllegalArgumentException("This segment are not of the specified size.");
		}
		queue.add(seg);
	}

	// get the first segment
	try {
		advance();
	} catch (IOException ioex) {
		throw new RuntimeException(ioex);
	}
}
 
Example #9
Source File: ResettableExternalBuffer.java    From flink with Apache License 2.0 5 votes vote down vote up
private void spill() throws IOException {
	FileIOChannel.ID channel = ioManager.createChannel();

	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	int numRecordBuffers = inMemoryBuffer.getNumRecordBuffers();
	ArrayList<MemorySegment> segments = inMemoryBuffer.getRecordBufferSegments();
	try {
		// spill in memory buffer in zero-copy.
		for (int i = 0; i < numRecordBuffers; i++) {
			writer.writeBlock(segments.get(i));
		}
		LOG.info("here spill the reset buffer data with {} bytes", writer.getSize());
		writer.close();
	} catch (IOException e) {
		writer.closeAndDelete();
		throw e;
	}

	spillSize += numRecordBuffers * segmentSize;
	spilledChannelIDs.add(new ChannelWithMeta(
		channel,
		inMemoryBuffer.getNumRecordBuffers(),
		inMemoryBuffer.getNumBytesInLastBuffer()));
	this.spilledChannelRowOffsets.add(numRows);

	inMemoryBuffer.reset();
}
 
Example #10
Source File: BinaryHashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (MemorySegment segment : this.targetList) {
		this.writer.writeBlock(segment);
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #11
Source File: HashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (int i = 0; i < numSegments; i++) {
		this.writer.writeBlock(this.targetList.get(i));
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #12
Source File: LongHashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (MemorySegment segment : this.targetList) {
		this.writer.writeBlock(segment);
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #13
Source File: HashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (int i = 0; i < numSegments; i++) {
		this.writer.writeBlock(this.targetList.get(i));
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #14
Source File: FileChannelStreamsTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteOutputView() {
	try (IOManager ioManager = new IOManagerAsync()) {
		MemoryManager memMan = MemoryManagerBuilder.newBuilder().build();
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
		
		FileChannelOutputView out = new FileChannelOutputView(writer, memMan, memory, memMan.getPageSize());
		new StringValue("Some test text").write(out);
		
		// close for the first time, make sure all memory returns
		out.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		out.close();
		
		// delete, make sure file is removed
		out.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #15
Source File: HeaderlessChannelWriterOutputView.java    From flink with Apache License 2.0 5 votes vote down vote up
public HeaderlessChannelWriterOutputView(
		BlockChannelWriter<MemorySegment> writer,
		List<MemorySegment> memory,
		int segmentSize) {
	super(segmentSize, 0);

	if (writer == null) {
		throw new NullPointerException();
	}

	this.writer = writer;

	Preconditions.checkNotNull(memory);

	// load the segments into the queue
	final LinkedBlockingQueue<MemorySegment> queue = writer.getReturnQueue();
	for (int i = memory.size() - 1; i >= 0; --i) {
		final MemorySegment seg = memory.get(i);
		if (seg.size() != segmentSize) {
			throw new IllegalArgumentException("This segment are not of the specified size.");
		}
		queue.add(seg);
	}

	// get the first segment
	try {
		advance();
	} catch (IOException ioex) {
		throw new RuntimeException(ioex);
	}
}
 
Example #16
Source File: FileChannelStreamsTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteOutputView() {
	try (IOManager ioManager = new IOManagerAsync()) {
		MemoryManager memMan = new MemoryManager(4 * 16*1024, 1, 16*1024, MemoryType.HEAP, true);
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
		
		FileChannelOutputView out = new FileChannelOutputView(writer, memMan, memory, memMan.getPageSize());
		new StringValue("Some test text").write(out);
		
		// close for the first time, make sure all memory returns
		out.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		out.close();
		
		// delete, make sure file is removed
		out.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #17
Source File: ResettableExternalBuffer.java    From flink with Apache License 2.0 5 votes vote down vote up
private void spill() throws IOException {
	FileIOChannel.ID channel = ioManager.createChannel();

	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	int numRecordBuffers = inMemoryBuffer.getNumRecordBuffers();
	ArrayList<MemorySegment> segments = inMemoryBuffer.getRecordBufferSegments();
	try {
		// spill in memory buffer in zero-copy.
		for (int i = 0; i < numRecordBuffers; i++) {
			writer.writeBlock(segments.get(i));
		}
		LOG.info("here spill the reset buffer data with {} bytes", writer.getSize());
		writer.close();
	} catch (IOException e) {
		writer.closeAndDelete();
		throw e;
	}

	spillSize += numRecordBuffers * segmentSize;
	spilledChannelIDs.add(new ChannelWithMeta(
		channel,
		inMemoryBuffer.getNumRecordBuffers(),
		inMemoryBuffer.getNumBytesInLastBuffer()));
	this.spilledChannelRowOffsets.add(numRows);

	inMemoryBuffer.reset();
}
 
Example #18
Source File: BinaryHashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (MemorySegment segment : this.targetList) {
		this.writer.writeBlock(segment);
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #19
Source File: LongHashPartition.java    From flink with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (MemorySegment segment : this.targetList) {
		this.writer.writeBlock(segment);
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #20
Source File: FileChannelStreamsTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testCloseAndDeleteOutputView() {
	final IOManager ioManager = new IOManagerAsync();
	try {
		MemoryManager memMan = new MemoryManager(4 * 16*1024, 1, 16*1024, MemoryType.HEAP, true);
		List<MemorySegment> memory = new ArrayList<MemorySegment>();
		memMan.allocatePages(new DummyInvokable(), memory, 4);
		
		FileIOChannel.ID channel = ioManager.createChannel();
		BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
		
		FileChannelOutputView out = new FileChannelOutputView(writer, memMan, memory, memMan.getPageSize());
		new StringValue("Some test text").write(out);
		
		// close for the first time, make sure all memory returns
		out.close();
		assertTrue(memMan.verifyEmpty());
		
		// close again, should not cause an exception
		out.close();
		
		// delete, make sure file is removed
		out.closeAndDelete();
		assertFalse(new File(channel.getPath()).exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
	finally {
		ioManager.shutdown();
	}
}
 
Example #21
Source File: HashPartition.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
int spill(BlockChannelWriter<MemorySegment> writer) throws IOException {
	this.writer = writer;
	final int numSegments = this.targetList.size();
	for (int i = 0; i < numSegments; i++) {
		this.writer.writeBlock(this.targetList.get(i));
	}
	this.targetList.clear();
	return numSegments;
}
 
Example #22
Source File: HashPartition.java    From flink with Apache License 2.0 4 votes vote down vote up
public BlockChannelWriter<MemorySegment> getBuildSideChannel() {
	return this.buildSideChannel;
}
 
Example #23
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadSmallRecords() {
	try {
		List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #24
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteAndReadLongRecords() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_LONG_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_LONG; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_LONG; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #25
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadOneBufferOnly() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), 1);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), 1);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #26
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testReadTooMany() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);

		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();

		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();

		// read and re-generate all records and compare them
		try {
			Pair readPair = new Pair();
			for (int i = 0; i < NUM_PAIRS_SHORT + 1; i++) {
				generator.next(pair);
				readPair.read(inView);
				assertEquals("The re-generated and the read record do not match.", pair, readPair);
			}
			fail("Expected an EOFException which did not occur.");
		}
		catch (EOFException eofex) {
			// expected
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #27
Source File: BinaryHashPartition.java    From flink with Apache License 2.0 4 votes vote down vote up
BlockChannelWriter<MemorySegment> getBuildSideChannel() {
	return this.buildSideChannel;
}
 
Example #28
Source File: LongHashPartition.java    From flink with Apache License 2.0 4 votes vote down vote up
BlockChannelWriter<MemorySegment> getBuildSideChannel() {
	return this.buildSideChannel;
}
 
Example #29
Source File: FileChannelStreamsITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteReadNotAll() {
	try {
		final List<MemorySegment> memory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final PairGenerator generator = new PairGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
		final FileIOChannel.ID channel = this.ioManager.createChannel();
		
		// create the writer output view
		final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
		final FileChannelOutputView outView = new FileChannelOutputView(writer, memManager, memory, MEMORY_PAGE_SIZE);
		
		// write a number of pairs
		Pair pair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
			generator.next(pair);
			pair.write(outView);
		}
		outView.close();
		
		// create the reader input view
		List<MemorySegment> readMemory = memManager.allocatePages(new DummyInvokable(), NUM_MEMORY_SEGMENTS);
		
		final BlockChannelReader<MemorySegment> reader = ioManager.createBlockChannelReader(channel);
		final FileChannelInputView inView = new FileChannelInputView(reader, memManager, readMemory, outView.getBytesInLatestSegment());
		generator.reset();
		
		// read and re-generate all records and compare them
		Pair readPair = new Pair();
		for (int i = 0; i < NUM_PAIRS_SHORT / 2; i++) {
			generator.next(pair);
			readPair.read(inView);
			assertEquals("The re-generated and the read record do not match.", pair, readPair);
		}
		
		inView.close();
		reader.deleteChannel();
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #30
Source File: ChannelViewsTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testReadTooMany() throws Exception
{
	final TestData.TupleGenerator generator = new TestData.TupleGenerator(SEED, KEY_MAX, VALUE_SHORT_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
	final FileIOChannel.ID channel = this.ioManager.createChannel();
	final TypeSerializer<Tuple2<Integer, String>> serializer = TestData.getIntStringTupleSerializer();
	
	// create the writer output view
	List<MemorySegment> memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
	final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(channel);
	final ChannelWriterOutputView outView = new ChannelWriterOutputView(writer, memory, MEMORY_PAGE_SIZE);

	// write a number of pairs
	final Tuple2<Integer, String> rec = new Tuple2<>();
	for (int i = 0; i < NUM_PAIRS_SHORT; i++) {
		generator.next(rec);
		serializer.serialize(rec, outView);
	}
	this.memoryManager.release(outView.close());

	// create the reader input view
	memory = this.memoryManager.allocatePages(this.parentTask, NUM_MEMORY_SEGMENTS);
	final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channel);
	final ChannelReaderInputView inView = new ChannelReaderInputView(reader, memory, outView.getBlockCount(), true);
	generator.reset();

	// read and re-generate all records and compare them
	try {
		final Tuple2<Integer, String> readRec = new Tuple2<>();
		for (int i = 0; i < NUM_PAIRS_SHORT + 1; i++) {
			generator.next(rec);
			serializer.deserialize(readRec, inView);
			final int k1 = rec.f0;
			final String v1 = rec.f1;
			final int k2 = readRec.f0;
			final String v2 = readRec.f1;
			Assert.assertTrue("The re-generated and the read record do not match.", k1 == k2 && v1.equals(v2));
		}
		Assert.fail("Expected an EOFException which did not occur.");
	}
	catch (EOFException eofex) {
		// expected
	}
	catch (Throwable t) {
		// unexpected
		Assert.fail("Unexpected Exception: " + t.getMessage());
	}
	
	this.memoryManager.release(inView.close());
	reader.deleteChannel();
}