org.apache.flink.runtime.io.network.buffer.NetworkBuffer Java Examples

The following examples show how to use org.apache.flink.runtime.io.network.buffer.NetworkBuffer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AsynchronousBufferFileWriterTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddWithFailingWriter() throws Exception {
	AsynchronousBufferFileWriter writer =
		new AsynchronousBufferFileWriter(ioManager.createChannel(), new RequestQueue<>());
	writer.close();

	exception.expect(IOException.class);

	Buffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(4096),
		FreeingBufferRecycler.INSTANCE);
	try {
		writer.writeBlock(buffer);
	} finally {
		if (!buffer.isRecycled()) {
			buffer.recycleBuffer();
			Assert.fail("buffer not recycled");
		}
		assertEquals("Shouln't increment number of outstanding requests.", 0, writer.getNumberOfOutstandingRequests());
	}
}
 
Example #2
Source File: CheckpointBarrierAlignerTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static BufferOrEvent createBuffer(int channel) {
	final int size = sizeCounter++;
	byte[] bytes = new byte[size];
	RND.nextBytes(bytes);

	MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
	memory.put(0, bytes);

	Buffer buf = new NetworkBuffer(memory, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);

	// retain an additional time so it does not get disposed after being read by the input gate
	buf.retainBuffer();

	return new BufferOrEvent(buf, new InputChannelInfo(0, channel));
}
 
Example #3
Source File: ChannelStateWriterImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test(expected = TestException.class)
public void testBuffersRecycledOnError() throws Exception {
	unwrappingError(TestException.class, () -> {
		NetworkBuffer buffer = getBuffer();
		try (ChannelStateWriterImpl writer = new ChannelStateWriterImpl(
				TASK_NAME,
				new ConcurrentHashMap<>(),
				failingWorker(),
				5)) {
			writer.open();
			callAddInputData(writer, buffer);
		} finally {
			assertTrue(buffer.isRecycled());
		}
	});
}
 
Example #4
Source File: ChannelStateReaderImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void readAndVerify(int bufferSize, InputChannelInfo channelInfo, byte[] data, ChannelStateReader reader) throws IOException {
	int dataSize = data.length;
	int iterations = dataSize / bufferSize + (-(dataSize % bufferSize) >>> 31);
	NetworkBuffer buffer = getBuffer(bufferSize);
	try {
		for (int i = 0; i < iterations; i++) {
			String hint = String.format("dataSize=%d, bufferSize=%d, iteration=%d/%d", dataSize, bufferSize, i + 1, iterations);
			boolean isLast = i == iterations - 1;
			assertEquals(hint, isLast ? NO_MORE_DATA : HAS_MORE_DATA, reader.readInputData(channelInfo, buffer));
			assertEquals(hint, isLast ? dataSize - bufferSize * i : bufferSize, buffer.readableBytes());
			assertArrayEquals(hint, Arrays.copyOfRange(data, i * bufferSize, Math.min(dataSize, (i + 1) * bufferSize)), toBytes(buffer));
			buffer.resetReaderIndex();
			buffer.resetWriterIndex();
		}
	} finally {
		buffer.release();
	}
}
 
Example #5
Source File: RemoteInputChannel.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Exclusive buffer is recycled to this input channel directly and it may trigger return extra
 * floating buffer and notify increased credit to the producer.
 *
 * @param segment The exclusive segment of this channel.
 */
@Override
public void recycle(MemorySegment segment) {
	int numAddedBuffers;

	synchronized (bufferQueue) {
		// Similar to notifyBufferAvailable(), make sure that we never add a buffer
		// after releaseAllResources() released all buffers (see below for details).
		if (isReleased.get()) {
			try {
				inputGate.returnExclusiveSegments(Collections.singletonList(segment));
				return;
			} catch (Throwable t) {
				ExceptionUtils.rethrow(t);
			}
		}
		numAddedBuffers = bufferQueue.addExclusiveBuffer(new NetworkBuffer(segment, this), numRequiredBuffers);
	}

	if (numAddedBuffers > 0 && unannouncedCredit.getAndAdd(numAddedBuffers) == 0) {
		notifyCreditAvailable();
	}
}
 
Example #6
Source File: RemoteInputChannel.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Assigns exclusive buffers to this input channel, and this method should be called only once
 * after this input channel is created.
 */
void assignExclusiveSegments(List<MemorySegment> segments) {
	checkState(this.initialCredit == 0, "Bug in input channel setup logic: exclusive buffers have " +
		"already been set for this input channel.");

	checkNotNull(segments);
	checkArgument(segments.size() > 0, "The number of exclusive buffers per channel should be larger than 0.");

	this.initialCredit = segments.size();
	this.numRequiredBuffers = segments.size();

	synchronized (bufferQueue) {
		for (MemorySegment segment : segments) {
			bufferQueue.addExclusiveBuffer(new NetworkBuffer(segment, this), numRequiredBuffers);
		}
	}
}
 
Example #7
Source File: ChannelStateCheckpointWriterTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("ConstantConditions")
public void testSmallFilesNotWritten() throws Exception {
	int threshold = 100;
	File checkpointsDir = temporaryFolder.newFolder("checkpointsDir");
	File sharedStateDir = temporaryFolder.newFolder("sharedStateDir");
	FsCheckpointStreamFactory checkpointStreamFactory = new FsCheckpointStreamFactory(getSharedInstance(), fromLocalFile(checkpointsDir), fromLocalFile(sharedStateDir), threshold, threshold);
	ChannelStateWriteResult result = new ChannelStateWriteResult();
	ChannelStateCheckpointWriter writer = createWriter(result, checkpointStreamFactory.createCheckpointStateOutputStream(EXCLUSIVE));
	NetworkBuffer buffer = new NetworkBuffer(HeapMemorySegment.FACTORY.allocateUnpooledSegment(threshold / 2, null), FreeingBufferRecycler.INSTANCE);
	writer.writeInput(new InputChannelInfo(1, 2), buffer);
	writer.completeOutput();
	writer.completeInput();
	assertTrue(result.isDone());
	assertEquals(0, checkpointsDir.list().length);
	assertEquals(0, sharedStateDir.list().length);
}
 
Example #8
Source File: BarrierBufferTestBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static BufferOrEvent createBuffer(int channel, int pageSize) {
	final int size = sizeCounter++;
	byte[] bytes = new byte[size];
	RND.nextBytes(bytes);

	MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(pageSize);
	memory.put(0, bytes);

	Buffer buf = new NetworkBuffer(memory, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);

	// retain an additional time so it does not get disposed after being read by the input gate
	buf.retainBuffer();

	return new BufferOrEvent(buf, channel);
}
 
Example #9
Source File: BufferReaderWriterUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
@Nullable
static Buffer sliceNextBuffer(ByteBuffer memory) {
	final int remaining = memory.remaining();

	// we only check the correct case where data is exhausted
	// all other cases can only occur if our write logic is wrong and will already throw
	// buffer underflow exceptions which will cause the read to fail.
	if (remaining == 0) {
		return null;
	}

	final boolean isEvent = memory.getShort() == HEADER_VALUE_IS_EVENT;
	final boolean isCompressed = memory.getShort() == BUFFER_IS_COMPRESSED;
	final int size = memory.getInt();

	memory.limit(memory.position() + size);
	ByteBuffer buf = memory.slice();
	memory.position(memory.limit());
	memory.limit(memory.capacity());

	MemorySegment memorySegment = MemorySegmentFactory.wrapOffHeapMemory(buf);

	Buffer.DataType dataType = isEvent ? Buffer.DataType.EVENT_BUFFER : Buffer.DataType.DATA_BUFFER;
	return new NetworkBuffer(memorySegment, FreeingBufferRecycler.INSTANCE, dataType, isCompressed, size);
}
 
Example #10
Source File: CompressedBlockChannelReader.java    From flink with Apache License 2.0 6 votes vote down vote up
public CompressedBlockChannelReader(
		IOManager ioManager,
		ID channel,
		LinkedBlockingQueue<MemorySegment> blockQueue,
		BlockCompressionFactory codecFactory,
		int preferBlockSize,
		int segmentSize) throws IOException {
	this.reader = ioManager.createBufferFileReader(channel, this);
	this.blockQueue = blockQueue;
	copyCompress = preferBlockSize > segmentSize * 2;
	int blockSize = copyCompress ? preferBlockSize : segmentSize;
	this.decompressor = codecFactory.getDecompressor();
	cause = new AtomicReference<>();

	if (copyCompress) {
		this.buf = new byte[blockSize];
		this.bufWrapper = ByteBuffer.wrap(buf);
	}

	BlockCompressor compressor = codecFactory.getCompressor();
	for (int i = 0; i < 2; i++) {
		MemorySegment segment = MemorySegmentFactory.wrap(new byte[compressor.getMaxCompressedSize(blockSize)]);
		reader.readInto(new NetworkBuffer(segment, this));
	}
}
 
Example #11
Source File: CompressedBlockChannelReader.java    From flink with Apache License 2.0 6 votes vote down vote up
public CompressedBlockChannelReader(
		IOManager ioManager,
		ID channel,
		LinkedBlockingQueue<MemorySegment> blockQueue,
		BlockCompressionFactory codecFactory,
		int preferBlockSize,
		int segmentSize) throws IOException {
	this.reader = ioManager.createBufferFileReader(channel, this);
	this.blockQueue = blockQueue;
	copyCompress = preferBlockSize > segmentSize * 2;
	int blockSize = copyCompress ? preferBlockSize : segmentSize;
	this.decompressor = codecFactory.getDecompressor();
	cause = new AtomicReference<>();

	if (copyCompress) {
		this.buf = new byte[blockSize];
		this.bufWrapper = ByteBuffer.wrap(buf);
	}

	BlockCompressor compressor = codecFactory.getCompressor();
	for (int i = 0; i < 2; i++) {
		MemorySegment segment = MemorySegmentFactory.wrap(new byte[compressor.getMaxCompressedSize(blockSize)]);
		reader.readInto(new NetworkBuffer(segment, this));
	}
}
 
Example #12
Source File: RemoteInputChannel.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Assigns exclusive buffers to this input channel, and this method should be called only once
 * after this input channel is created.
 */
void assignExclusiveSegments() throws IOException {
	checkState(initialCredit == 0, "Bug in input channel setup logic: exclusive buffers have " +
		"already been set for this input channel.");

	Collection<MemorySegment> segments = checkNotNull(memorySegmentProvider.requestMemorySegments());
	checkArgument(!segments.isEmpty(), "The number of exclusive buffers per channel should be larger than 0.");

	initialCredit = segments.size();
	numRequiredBuffers = segments.size();

	synchronized (bufferQueue) {
		for (MemorySegment segment : segments) {
			bufferQueue.addExclusiveBuffer(new NetworkBuffer(segment, this), numRequiredBuffers);
		}
	}
}
 
Example #13
Source File: RemoteInputChannel.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Exclusive buffer is recycled to this input channel directly and it may trigger return extra
 * floating buffer and notify increased credit to the producer.
 *
 * @param segment The exclusive segment of this channel.
 */
@Override
public void recycle(MemorySegment segment) {
	int numAddedBuffers;

	synchronized (bufferQueue) {
		// Similar to notifyBufferAvailable(), make sure that we never add a buffer
		// after releaseAllResources() released all buffers (see below for details).
		if (isReleased.get()) {
			try {
				memorySegmentProvider.recycleMemorySegments(Collections.singletonList(segment));
				return;
			} catch (Throwable t) {
				ExceptionUtils.rethrow(t);
			}
		}
		numAddedBuffers = bufferQueue.addExclusiveBuffer(new NetworkBuffer(segment, this), numRequiredBuffers);
	}

	if (numAddedBuffers > 0 && unannouncedCredit.getAndAdd(numAddedBuffers) == 0) {
		notifyCreditAvailable();
	}
}
 
Example #14
Source File: AsynchronousBufferFileWriterTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddWithFailingWriter() throws Exception {
	AsynchronousBufferFileWriter writer =
		new AsynchronousBufferFileWriter(ioManager.createChannel(), new RequestQueue<>());
	writer.close();

	exception.expect(IOException.class);

	Buffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(4096),
		FreeingBufferRecycler.INSTANCE);
	try {
		writer.writeBlock(buffer);
	} finally {
		if (!buffer.isRecycled()) {
			buffer.recycleBuffer();
			Assert.fail("buffer not recycled");
		}
		assertEquals("Shouln't increment number of outstanding requests.", 0, writer.getNumberOfOutstandingRequests());
	}
}
 
Example #15
Source File: AsynchronousBufferFileWriterTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testAddWithFailingWriter() throws Exception {
	AsynchronousBufferFileWriter writer =
		new AsynchronousBufferFileWriter(ioManager.createChannel(), new RequestQueue<>());
	writer.close();

	exception.expect(IOException.class);

	Buffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(4096),
		FreeingBufferRecycler.INSTANCE);
	try {
		writer.writeBlock(buffer);
	} finally {
		if (!buffer.isRecycled()) {
			buffer.recycleBuffer();
			Assert.fail("buffer not recycled");
		}
		assertEquals("Shouln't increment number of outstanding requests.", 0, writer.getNumberOfOutstandingRequests());
	}
}
 
Example #16
Source File: ChannelStateReaderImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private byte[] writeSomeBytes(int bytesCount, DataOutputStream out, ChannelStateSerializer serializer) throws IOException {
	byte[] bytes = generateData(bytesCount);
	NetworkBuffer buf = getBuffer(bytesCount);
	try {
		buf.writeBytes(bytes);
		serializer.writeData(out, buf);
		return bytes;
	} finally {
		buf.release();
	}
}
 
Example #17
Source File: ChannelStateCheckpointWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testRecyclingBuffers() throws Exception {
	ChannelStateCheckpointWriter writer = createWriter(new ChannelStateWriteResult());
	NetworkBuffer buffer = new NetworkBuffer(HeapMemorySegment.FACTORY.allocateUnpooledSegment(10, null), FreeingBufferRecycler.INSTANCE);
	writer.writeInput(new InputChannelInfo(1, 2), buffer);
	assertTrue(buffer.isRecycled());
}
 
Example #18
Source File: ChannelStateSerializerImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void write(byte[] data, ChannelStateSerializerImpl serializer, OutputStream baos) throws IOException {
	DataOutputStream out = new DataOutputStream(baos);
	serializer.writeHeader(out);
	NetworkBuffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(data.length), FreeingBufferRecycler.INSTANCE);
	try {
		buffer.writeBytes(data);
		serializer.writeData(out, buffer);
		out.flush();
	} finally {
		buffer.release();
	}
}
 
Example #19
Source File: ChannelStateSerializerImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void readAndCheck(byte[] data, ChannelStateSerializerImpl serializer, ByteArrayInputStream is) throws IOException {
	serializer.readHeader(is);
	int size = serializer.readLength(is);
	assertEquals(data.length, size);
	NetworkBuffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(data.length), FreeingBufferRecycler.INSTANCE);
	try {
		int read = serializer.readData(is, wrap(buffer), size);
		assertEquals(size, read);
		assertArrayEquals(data, readBytes(buffer));
	} finally {
		buffer.release();
	}
}
 
Example #20
Source File: ChannelStateWriterImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testAbort() throws Exception {
	NetworkBuffer buffer = getBuffer();
	runWithSyncWorker((writer, worker) -> {
		callStart(writer);
		ChannelStateWriteResult result = writer.getAndRemoveWriteResult(CHECKPOINT_ID);
		callAddInputData(writer, buffer);
		callAbort(writer);
		worker.processAllRequests();
		assertTrue(result.isDone());
		assertTrue(buffer.isRecycled());
	});
}
 
Example #21
Source File: ChannelStateWriterImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testAddEventBuffer() throws Exception {

	NetworkBuffer dataBuf = getBuffer();
	NetworkBuffer eventBuf = getBuffer();
	eventBuf.setDataType(Buffer.DataType.EVENT_BUFFER);
	try {
		runWithSyncWorker(writer -> {
			callStart(writer);
			writer.addInputData(CHECKPOINT_ID, new InputChannelInfo(1, 1), 1, ofElements(Buffer::recycleBuffer, eventBuf, dataBuf));
		});
	} finally {
		assertTrue(dataBuf.isRecycled());
	}
}
 
Example #22
Source File: ChannelPersistenceITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testReadWritten() throws Exception {
	long checkpointId = 1L;

	InputChannelInfo inputChannelInfo = new InputChannelInfo(2, 3);
	byte[] inputChannelInfoData = randomBytes(1024);

	ResultSubpartitionInfo resultSubpartitionInfo = new ResultSubpartitionInfo(4, 5);
	byte[] resultSubpartitionInfoData = randomBytes(1024);

	ChannelStateWriteResult handles = write(
		checkpointId,
		singletonMap(inputChannelInfo, inputChannelInfoData),
		singletonMap(resultSubpartitionInfo, resultSubpartitionInfoData)
	);

	assertArrayEquals(inputChannelInfoData, read(
		toTaskStateSnapshot(handles),
		inputChannelInfoData.length,
		(reader, mem) -> reader.readInputData(inputChannelInfo, new NetworkBuffer(mem, FreeingBufferRecycler.INSTANCE))
	));

	assertArrayEquals(resultSubpartitionInfoData, read(
		toTaskStateSnapshot(handles),
		resultSubpartitionInfoData.length,
		(reader, mem) -> reader.readOutputData(resultSubpartitionInfo, new BufferBuilder(mem, FreeingBufferRecycler.INSTANCE))
	));
}
 
Example #23
Source File: CompressedHeaderlessChannelWriterOutputView.java    From flink with Apache License 2.0 5 votes vote down vote up
private void writeCompressed(MemorySegment current, int size) throws IOException {
	MemorySegment compressedBuffer;
	try {
		compressedBuffer = compressedBuffers.take();
	} catch (InterruptedException e) {
		throw new IOException(e);
	}
	int compressedLen = compressor.compress(current.getArray(), 0, size, compressedBuffer.getArray(), 0);
	NetworkBuffer networkBuffer = new NetworkBuffer(compressedBuffer, this);
	networkBuffer.setSize(compressedLen);
	writer.writeBlock(networkBuffer);
	blockCount++;
	numBytes += size;
	numCompressedBytes += compressedLen;
}
 
Example #24
Source File: CompressedHeaderlessChannelReaderInputView.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void recycle(MemorySegment segment) {
	try {
		reader.readInto(new NetworkBuffer(segment, this));
	}
	catch (IOException e) {
		throw new RuntimeException(e);
	}
}
 
Example #25
Source File: NettyMessageClientDecoderDelegateTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private Buffer createDataBuffer(int size, Buffer.DataType dataType) {
	MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(size);
	NetworkBuffer buffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE, dataType);
	for (int i = 0; i < size / 4; ++i) {
		buffer.writeInt(i);
	}

	return buffer;
}
 
Example #26
Source File: CompressedBlockChannelWriter.java    From flink with Apache License 2.0 5 votes vote down vote up
private void compressBuffer(ByteBuffer buffer, int len) throws IOException {
	MemorySegment compressedBuffer;
	try {
		compressedBuffer = compressedBuffers.take();
	} catch (InterruptedException e) {
		throw new IOException(e);
	}
	int compressedLen = compressor.compress(
			buffer, 0, len,
			compressedBuffer.wrap(0, compressedBuffer.size()), 0);
	NetworkBuffer networkBuffer = new NetworkBuffer(compressedBuffer, this);
	networkBuffer.setSize(compressedLen);
	writer.writeBlock(networkBuffer);
}
 
Example #27
Source File: BufferStorageTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
public static BufferOrEvent generateRandomBuffer(int size, int channelIndex) {
	MemorySegment seg = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
	for (int i = 0; i < size; i++) {
		seg.put(i, (byte) i);
	}

	Buffer buf = new NetworkBuffer(seg, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);
	return new BufferOrEvent(buf, channelIndex);
}
 
Example #28
Source File: CheckpointBarrierAlignerAlignmentLimitTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static BufferOrEvent createBuffer(int channel, int size) {
	byte[] bytes = new byte[size];
	RND.nextBytes(bytes);

	MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
	memory.put(0, bytes);

	Buffer buf = new NetworkBuffer(memory, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);

	// retain an additional time so it does not get disposed after being read by the input gate
	buf.retainBuffer();

	return new BufferOrEvent(buf, channel);
}
 
Example #29
Source File: NettyMessageClientSideSerializationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private Buffer decompress(Buffer buffer) {
	MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(BUFFER_SIZE);
	Buffer compressedBuffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE);
	buffer.asByteBuf().readBytes(compressedBuffer.asByteBuf(), buffer.readableBytes());
	compressedBuffer.setCompressed(true);
	return DECOMPRESSOR.decompressToOriginalBuffer(compressedBuffer);
}
 
Example #30
Source File: CompressedBlockChannelReader.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void recycle(MemorySegment segment) {
	try {
		reader.readInto(new NetworkBuffer(segment, this));
	} catch (IOException e) {
		throw new RuntimeException(e);
	}
}