org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter Java Examples

The following examples show how to use org.apache.flink.runtime.io.disk.iomanager.BufferFileWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SpilledSubpartitionView.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
SpilledSubpartitionView(
	SpillableSubpartition parent,
	int memorySegmentSize,
	BufferFileWriter spillWriter,
	long numberOfSpilledBuffers,
	BufferAvailabilityListener availabilityListener) throws IOException {

	this.parent = checkNotNull(parent);
	this.bufferPool = new SpillReadBufferPool(2, memorySegmentSize);
	this.spillWriter = checkNotNull(spillWriter);
	this.fileReader = new SynchronousBufferFileReader(spillWriter.getChannelID(), false);
	checkArgument(numberOfSpilledBuffers >= 0);
	this.numberOfSpilledBuffers = numberOfSpilledBuffers;
	this.availabilityListener = checkNotNull(availabilityListener);

	// Check whether async spilling is still in progress. If not, this returns
	// false and we can notify our availability listener about all available buffers.
	// Otherwise, we notify only when the spill writer callback happens.
	if (!spillWriter.registerAllRequestsProcessedListener(this)) {
		isSpillInProgress = false;
		availabilityListener.notifyDataAvailable();
		LOG.debug("No spilling in progress. Notified about {} available buffers.", numberOfSpilledBuffers);
	} else {
		LOG.debug("Spilling in progress. Waiting with notification about {} available buffers.", numberOfSpilledBuffers);
	}
}
 
Example #2
Source File: FileChannelUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
public static AbstractChannelWriterOutputView createOutputView(
		IOManager ioManager,
		FileIOChannel.ID channel,
		boolean compressionEnable,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize,
		int segmentSize) throws IOException {
	if (compressionEnable) {
		BufferFileWriter bufferWriter = ioManager.createBufferFileWriter(channel);
		return new CompressedHeaderlessChannelWriterOutputView(
				bufferWriter,
				compressionCodecFactory,
				compressionBlockSize);
	} else {
		BlockChannelWriter<MemorySegment> blockWriter =
				ioManager.createBlockChannelWriter(channel);
		return new HeaderlessChannelWriterOutputView(
				blockWriter,
				Arrays.asList(
						allocateUnpooledSegment(segmentSize),
						allocateUnpooledSegment(segmentSize)
				),
				segmentSize
		);
	}
}
 
Example #3
Source File: CompressedHeaderlessChannelWriterOutputView.java    From flink with Apache License 2.0 6 votes vote down vote up
public CompressedHeaderlessChannelWriterOutputView(
		BufferFileWriter writer,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize) {
	super(compressionBlockSize, 0);

	this.compressionBlockSize = compressionBlockSize;
	buffer = MemorySegmentFactory.wrap(new byte[compressionBlockSize]);
	compressor = compressionCodecFactory.getCompressor();
	for (int i = 0; i < 2; i++) {
		compressedBuffers.add(MemorySegmentFactory.wrap(
				new byte[compressor.getMaxCompressedSize(compressionBlockSize)]));
	}
	this.writer = writer;

	try {
		advance();
	} catch (IOException ioex) {
		throw new RuntimeException(ioex);
	}
}
 
Example #4
Source File: FileChannelUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
public static AbstractChannelWriterOutputView createOutputView(
		IOManager ioManager,
		FileIOChannel.ID channel,
		boolean compressionEnable,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize,
		int segmentSize) throws IOException {
	if (compressionEnable) {
		BufferFileWriter bufferWriter = ioManager.createBufferFileWriter(channel);
		return new CompressedHeaderlessChannelWriterOutputView(
				bufferWriter,
				compressionCodecFactory,
				compressionBlockSize);
	} else {
		BlockChannelWriter<MemorySegment> blockWriter =
				ioManager.createBlockChannelWriter(channel);
		return new HeaderlessChannelWriterOutputView(
				blockWriter,
				Arrays.asList(
						allocateUnpooledSegment(segmentSize),
						allocateUnpooledSegment(segmentSize)
				),
				segmentSize
		);
	}
}
 
Example #5
Source File: CompressedHeaderlessChannelWriterOutputView.java    From flink with Apache License 2.0 6 votes vote down vote up
public CompressedHeaderlessChannelWriterOutputView(
		BufferFileWriter writer,
		BlockCompressionFactory compressionCodecFactory,
		int compressionBlockSize) {
	super(compressionBlockSize, 0);

	this.compressionBlockSize = compressionBlockSize;
	buffer = MemorySegmentFactory.wrap(new byte[compressionBlockSize]);
	compressor = compressionCodecFactory.getCompressor();
	for (int i = 0; i < 2; i++) {
		compressedBuffers.add(MemorySegmentFactory.wrap(
				new byte[compressor.getMaxCompressedSize(compressionBlockSize)]));
	}
	this.writer = writer;

	try {
		advance();
	} catch (IOException ioex) {
		throw new RuntimeException(ioex);
	}
}
 
Example #6
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public BufferFileWriter createBufferFileWriter(FileIOChannel.ID channelID)
		throws IOException {
	BufferFileWriter bufferFileWriter = super.createBufferFileWriter(channelID);
	bufferFileWriter.close();
	return bufferFileWriter;
}
 
Example #7
Source File: SpillableSubpartitionTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public BufferFileWriter createBufferFileWriter(FileIOChannel.ID channelID) throws IOException {
	blockLatch.countDown();
	try {
		doneLatch.await();
	} catch (InterruptedException e) {
		throw new IOException("Blocking operation was interrupted.", e);
	}

	return super.createBufferFileWriter(channelID);
}
 
Example #8
Source File: CompressedHeaderlessChannelTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCompressedView() throws IOException {
	for (int testTime = 0; testTime < 10; testTime++) {
		int testRounds = new Random().nextInt(20000);
		FileIOChannel.ID channel = ioManager.createChannel();
		BufferFileWriter writer = this.ioManager.createBufferFileWriter(channel);
		CompressedHeaderlessChannelWriterOutputView outputView =
				new CompressedHeaderlessChannelWriterOutputView(
						writer,
						compressionFactory,
						BUFFER_SIZE
				);

		for (int i = 0; i < testRounds; i++) {
			outputView.writeInt(i);
		}
		outputView.close();
		int blockCount = outputView.getBlockCount();

		CompressedHeaderlessChannelReaderInputView inputView =
				new CompressedHeaderlessChannelReaderInputView(
						channel,
						ioManager,
						compressionFactory,
						BUFFER_SIZE,
						blockCount
				);

		for (int i = 0; i < testRounds; i++) {
			assertEquals(i, inputView.readInt());
		}
		inputView.close();
	}
}
 
Example #9
Source File: CompressedHeaderlessChannelTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCompressedView() throws IOException {
	for (int testTime = 0; testTime < 10; testTime++) {
		int testRounds = new Random().nextInt(20000);
		FileIOChannel.ID channel = ioManager.createChannel();
		BufferFileWriter writer = this.ioManager.createBufferFileWriter(channel);
		CompressedHeaderlessChannelWriterOutputView outputView =
				new CompressedHeaderlessChannelWriterOutputView(
						writer,
						compressionFactory,
						BUFFER_SIZE
				);

		for (int i = 0; i < testRounds; i++) {
			outputView.writeInt(i);
		}
		outputView.close();
		int blockCount = outputView.getBlockCount();

		CompressedHeaderlessChannelReaderInputView inputView =
				new CompressedHeaderlessChannelReaderInputView(
						channel,
						ioManager,
						compressionFactory,
						BUFFER_SIZE,
						blockCount
				);

		for (int i = 0; i < testRounds; i++) {
			assertEquals(i, inputView.readInt());
		}
		inputView.close();
	}
}
 
Example #10
Source File: SpillableSubpartitionView.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
int releaseMemory() throws IOException {
	synchronized (buffers) {
		if (spilledView != null || nextBuffer == null) {
			// Already spilled or nothing in-memory
			return 0;
		} else {
			// We don't touch next buffer, because a notification has
			// already been sent for it. Only when it is consumed, will
			// it be recycled.

			// Create the spill writer and write all buffers to disk
			BufferFileWriter spillWriter = ioManager.createBufferFileWriter(ioManager.createChannel());

			long spilledBytes = 0;

			int numBuffers = buffers.size();
			for (int i = 0; i < numBuffers; i++) {
				try (BufferConsumer bufferConsumer = buffers.remove()) {
					Buffer buffer = bufferConsumer.build();
					checkState(bufferConsumer.isFinished(), "BufferConsumer must be finished before " +
						"spilling. Otherwise we would not be able to simply remove it from the queue. This should " +
						"be guaranteed by creating ResultSubpartitionView only once Subpartition isFinished.");
					parent.updateStatistics(buffer);
					spilledBytes += buffer.getSize();
					spillWriter.writeBlock(buffer);
				}
			}

			spilledView = new SpilledSubpartitionView(
				parent,
				memorySegmentSize,
				spillWriter,
				numBuffers,
				listener);

			LOG.debug("Spilling {} bytes for sub partition {} of {}.",
				spilledBytes,
				parent.index,
				parent.parent.getPartitionId());

			return numBuffers;
		}
	}
}