Java Code Examples for org.apache.flink.runtime.io.network.buffer.Buffer

The following examples show how to use org.apache.flink.runtime.io.network.buffer.Buffer. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: BufferManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Recycles all the exclusive and floating buffers from the given buffer queue.
 */
void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException {
	// Gather all exclusive buffers and recycle them to global pool in batch, because
	// we do not want to trigger redistribution of buffers after each recycle.
	final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>();

	Buffer buffer;
	while ((buffer = buffers.poll()) != null) {
		if (buffer.getRecycler() == this) {
			exclusiveRecyclingSegments.add(buffer.getMemorySegment());
		} else {
			buffer.recycleBuffer();
		}
	}
	synchronized (bufferQueue) {
		bufferQueue.releaseAll(exclusiveRecyclingSegments);
		bufferQueue.notifyAll();
	}

	if (exclusiveRecyclingSegments.size() > 0) {
		globalPool.recycleMemorySegments(exclusiveRecyclingSegments);
	}
}
 
Example 2
Source Project: flink   Source File: BufferReaderWriterUtilTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testReadFromByteBufferNotEnoughData() {
	final ByteBuffer memory = ByteBuffer.allocateDirect(1200);
	final Buffer buffer = createTestBuffer();
	BufferReaderWriterUtil.writeBuffer(buffer, memory);

	memory.flip().limit(memory.limit() - 1);
	ByteBuffer tooSmall = memory.slice();

	try {
		BufferReaderWriterUtil.sliceNextBuffer(tooSmall);
		fail();
	}
	catch (Exception e) {
		// expected
	}
}
 
Example 3
Source Project: flink   Source File: RemoteInputChannel.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void spillInflightBuffers(long checkpointId, ChannelStateWriter channelStateWriter) throws IOException {
	synchronized (receivedBuffers) {
		checkState(checkpointId > lastRequestedCheckpointId, "Need to request the next checkpointId");

		final List<Buffer> inflightBuffers = new ArrayList<>(receivedBuffers.size());
		for (Buffer buffer : receivedBuffers) {
			CheckpointBarrier checkpointBarrier = parseCheckpointBarrierOrNull(buffer);
			if (checkpointBarrier != null && checkpointBarrier.getId() >= checkpointId) {
				break;
			}
			if (buffer.isBuffer()) {
				inflightBuffers.add(buffer.retainBuffer());
			}
		}

		lastRequestedCheckpointId = checkpointId;

		channelStateWriter.addInputData(
			checkpointId,
			channelInfo,
			ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN,
			CloseableIterator.fromList(inflightBuffers, Buffer::recycleBuffer));
	}
}
 
Example 4
Source Project: flink   Source File: RemoteInputChannel.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Releases all exclusive and floating buffers, closes the partition request client.
 */
@Override
void releaseAllResources() throws IOException {
	if (isReleased.compareAndSet(false, true)) {

		final ArrayDeque<Buffer> releasedBuffers;
		synchronized (receivedBuffers) {
			releasedBuffers = new ArrayDeque<>(receivedBuffers);
			receivedBuffers.clear();
		}
		bufferManager.releaseAllBuffers(releasedBuffers);

		// The released flag has to be set before closing the connection to ensure that
		// buffers received concurrently with closing are properly recycled.
		if (partitionRequestClient != null) {
			partitionRequestClient.close(this);
		} else {
			connectionManager.closeOpenChannelConnections(connectionId);
		}
	}
}
 
Example 5
Source Project: flink   Source File: PartitionRequestClientHandlerTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns a deserialized buffer message as it would be received during runtime.
 */
static BufferResponse createBufferResponse(
		Buffer buffer,
		int sequenceNumber,
		InputChannelID receivingChannelId,
		int backlog) throws IOException {

	// Mock buffer to serialize
	BufferResponse resp = new BufferResponse(buffer, sequenceNumber, receivingChannelId, backlog);

	ByteBuf serialized = resp.write(UnpooledByteBufAllocator.DEFAULT);

	// Skip general header bytes
	serialized.readBytes(NettyMessage.FRAME_HEADER_LENGTH);

	// Deserialize the bytes again. We have to go this way, because we only partly deserialize
	// the header of the response and wait for a buffer from the buffer pool to copy the payload
	// data into.
	BufferResponse deserialized = BufferResponse.readFrom(serialized);

	return deserialized;
}
 
Example 6
Source Project: flink   Source File: BufferManager.java    License: Apache License 2.0 6 votes vote down vote up
Buffer requestBufferBlocking() throws IOException, InterruptedException {
	synchronized (bufferQueue) {
		Buffer buffer;
		while ((buffer = bufferQueue.takeBuffer()) == null) {
			if (inputChannel.isReleased()) {
				throw new CancelTaskException("Input channel [" + inputChannel.channelInfo + "] has already been released.");
			}
			if (!isWaitingForFloatingBuffers) {
				BufferPool bufferPool = inputChannel.inputGate.getBufferPool();
				buffer = bufferPool.requestBuffer();
				if (buffer == null && shouldContinueRequest(bufferPool)) {
					continue;
				}
			}

			if (buffer != null) {
				return buffer;
			}
			bufferQueue.wait();
		}
		return buffer;
	}
}
 
Example 7
Source Project: Flink-CEPplus   Source File: SpilledSubpartitionView.java    License: Apache License 2.0 6 votes vote down vote up
@Nullable
@Override
public BufferAndBacklog getNextBuffer() throws IOException, InterruptedException {
	if (isSpillInProgress) {
		return null;
	}

	Buffer current;
	boolean nextBufferIsEvent;
	synchronized (this) {
		if (nextBuffer == null) {
			current = requestAndFillBuffer();
		} else {
			current = nextBuffer;
		}
		nextBuffer = requestAndFillBuffer();
		nextBufferIsEvent = nextBuffer != null && !nextBuffer.isBuffer();
	}

	if (current == null) {
		return null;
	}

	int newBacklog = parent.decreaseBuffersInBacklog(current);
	return new BufferAndBacklog(current, newBacklog > 0 || nextBufferIsEvent, newBacklog, nextBufferIsEvent);
}
 
Example 8
Source Project: flink   Source File: CheckpointBarrierAlignerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
private static BufferOrEvent createBuffer(int channel) {
	final int size = sizeCounter++;
	byte[] bytes = new byte[size];
	RND.nextBytes(bytes);

	MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
	memory.put(0, bytes);

	Buffer buf = new NetworkBuffer(memory, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);

	// retain an additional time so it does not get disposed after being read by the input gate
	buf.retainBuffer();

	return new BufferOrEvent(buf, channel);
}
 
Example 9
/**
 * Tests a simple fake-back pressured task. Back pressure is assumed when
 * sampled stack traces are in blocking buffer requests.
 */
@Test
public void testBackPressureShouldBeReflectedInStats() throws Exception {
	final List<Buffer> buffers = requestAllBuffers();
	try {
		final JobGraph jobGraph = createJobWithBackPressure();
		testingMiniCluster.submitJob(jobGraph).get(TIMEOUT_SECONDS, TimeUnit.SECONDS);

		final OperatorBackPressureStats stats = getBackPressureStatsForTestVertex();

		assertThat(stats.getNumberOfSubTasks(), is(equalTo(JOB_PARALLELISM)));
		assertThat(stats, isFullyBackpressured());
	} finally {
		releaseBuffers(buffers);
	}
}
 
Example 10
Source Project: Flink-CEPplus   Source File: EventSerializerTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link EventSerializer#isEvent(Buffer, Class)}
 * whether it peaks into the buffer only, i.e. after the call, the buffer
 * is still de-serializable.
 */
@Test
public void testIsEventPeakOnly() throws Exception {
	final Buffer serializedEvent =
		EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
	try {
		final ClassLoader cl = getClass().getClassLoader();
		assertTrue(
			EventSerializer.isEvent(serializedEvent, EndOfPartitionEvent.class));
		EndOfPartitionEvent event = (EndOfPartitionEvent) EventSerializer
			.fromBuffer(serializedEvent, cl);
		assertEquals(EndOfPartitionEvent.INSTANCE, event);
	} finally {
		serializedEvent.recycleBuffer();
	}
}
 
Example 11
Source Project: Flink-CEPplus   Source File: RecordWriterTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that broadcasted events' buffers are independent (in their (reader) indices) once they
 * are put into the queue for Netty when broadcasting events to multiple channels.
 */
@Test
public void testBroadcastEventBufferIndependence() throws Exception {
	@SuppressWarnings("unchecked")
	ArrayDeque<BufferConsumer>[] queues =
		new ArrayDeque[]{new ArrayDeque(), new ArrayDeque()};

	ResultPartitionWriter partition =
		new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE));
	RecordWriter<?> writer = new RecordWriter<>(partition);

	writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);

	// Verify added to all queues
	assertEquals(1, queues[0].size());
	assertEquals(1, queues[1].size());

	// these two buffers may share the memory but not the indices!
	Buffer buffer1 = buildSingleBuffer(queues[0].remove());
	Buffer buffer2 = buildSingleBuffer(queues[1].remove());
	assertEquals(0, buffer1.getReaderIndex());
	assertEquals(0, buffer2.getReaderIndex());
	buffer1.setReaderIndex(1);
	assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex());
}
 
Example 12
Source Project: flink   Source File: BufferManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Requests floating buffers from the buffer pool based on the given required amount, and returns the actual
 * requested amount. If the required amount is not fully satisfied, it will register as a listener.
 */
int requestFloatingBuffers(int numRequired) throws IOException {
	int numRequestedBuffers = 0;
	synchronized (bufferQueue) {
		// Similar to notifyBufferAvailable(), make sure that we never add a buffer after channel
		// released all buffers via releaseAllResources().
		if (inputChannel.isReleased()) {
			return numRequestedBuffers;
		}

		numRequiredBuffers = numRequired;

		while (bufferQueue.getAvailableBufferSize() < numRequiredBuffers && !isWaitingForFloatingBuffers) {
			BufferPool bufferPool = inputChannel.inputGate.getBufferPool();
			Buffer buffer = bufferPool.requestBuffer();
			if (buffer != null) {
				bufferQueue.addFloatingBuffer(buffer);
				numRequestedBuffers++;
			} else if (bufferPool.addBufferListener(this)) {
				isWaitingForFloatingBuffers = true;
				break;
			}
		}
	}
	return numRequestedBuffers;
}
 
Example 13
private void processBufferConsumers() throws IOException {
	while (!bufferConsumers.isEmpty()) {
		BufferConsumer bufferConsumer = bufferConsumers.peek();
		Buffer buffer = bufferConsumer.build();
		try {
			deserializeBuffer(buffer);
			if (!bufferConsumer.isFinished()) {
				break;
			}
			bufferConsumers.pop().close();
		}
		finally {
			buffer.recycleBuffer();
		}
	}
}
 
Example 14
@Override
public void setNextBuffer(Buffer buffer) throws IOException {
	currentBuffer = buffer;

	int offset = buffer.getMemorySegmentOffset();
	MemorySegment segment = buffer.getMemorySegment();
	int numBytes = buffer.getSize();

	// check if some spanning record deserialization is pending
	if (spanningWrapper.getNumGatheredBytes() > 0) {
		spanningWrapper.addNextChunkFromMemorySegment(segment, offset, numBytes);
	} else {
		nonSpanningWrapper.initializeFromMemorySegment(segment, offset, numBytes + offset);
	}
}
 
Example 15
Source Project: Flink-CEPplus   Source File: TestPooledBufferProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Buffer requestBuffer() throws IOException {
	final Buffer buffer = buffers.poll();
	if (buffer != null) {
		return buffer;
	}

	return bufferFactory.create();
}
 
Example 16
Source Project: flink   Source File: BufferReaderWriterUtil.java    License: Apache License 2.0 5 votes vote down vote up
static long writeToByteChannelIfBelowSize(
		FileChannel channel,
		Buffer buffer,
		ByteBuffer[] arrayWithHeaderBuffer,
		long bytesLeft) throws IOException {

	if (bytesLeft >= HEADER_LENGTH + buffer.getSize()) {
		return writeToByteChannel(channel, buffer, arrayWithHeaderBuffer);
	}

	return -1L;
}
 
Example 17
Source Project: Flink-CEPplus   Source File: NettyMessage.java    License: Apache License 2.0 5 votes vote down vote up
BufferResponse(
		Buffer buffer,
		int sequenceNumber,
		InputChannelID receiverId,
		int backlog) {
	this.buffer = checkNotNull(buffer).asByteBuf();
	this.isBuffer = buffer.isBuffer();
	this.sequenceNumber = sequenceNumber;
	this.receiverId = checkNotNull(receiverId);
	this.backlog = backlog;
}
 
Example 18
Source Project: Flink-CEPplus   Source File: StreamTwoInputProcessor.java    License: Apache License 2.0 5 votes vote down vote up
public void cleanup() throws IOException {
	// clear the buffers first. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	// cleanup the barrier handler resources
	barrierHandler.cleanup();
}
 
Example 19
Source Project: flink   Source File: MockChannelStateWriter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void addOutputData(long checkpointId, ResultSubpartitionInfo info, int startSeqNum, Buffer... data) {
	checkCheckpointId(checkpointId);
	for (final Buffer buffer : data) {
		buffer.recycleBuffer();
	}
}
 
Example 20
@Override
protected MemorySegment nextSegment(MemorySegment current) throws IOException {
	if (cause.get() != null) {
		throw cause.get();
	}

	// check for end-of-stream
	if (this.numBlocksRemaining <= 0) {
		this.reader.close();
		throw new EOFException();
	}

	try {
		Buffer buffer;
		while ((buffer = retBuffers.poll(1, TimeUnit.SECONDS)) == null) {
			if (cause.get() != null) {
				throw cause.get();
			}
		}
		this.currentSegmentLimit = decompressor.decompress(
				buffer.getMemorySegment().getArray(), 0, buffer.getSize(),
				uncompressedBuffer.getArray(), 0
		);

		buffer.recycleBuffer();
		this.numBlocksRemaining--;
		return uncompressedBuffer;
	}
	catch (InterruptedException e) {
		throw new IOException(e);
	}
}
 
Example 21
Source Project: flink   Source File: ChannelStateSerializer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void writeData(DataOutputStream stream, Buffer... flinkBuffers) throws IOException {
	stream.writeInt(getSize(flinkBuffers));
	for (Buffer buffer : flinkBuffers) {
		ByteBuf nettyByteBuf = buffer.asByteBuf();
		nettyByteBuf.getBytes(nettyByteBuf.readerIndex(), stream, nettyByteBuf.readableBytes());
	}
}
 
Example 22
Source Project: flink   Source File: TestBufferFactory.java    License: Apache License 2.0 5 votes vote down vote up
public synchronized Buffer create() {
	if (numberOfCreatedBuffers >= poolSize) {
		return null;
	}

	numberOfCreatedBuffers++;
	return new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(bufferSize), bufferRecycler);
}
 
Example 23
@Test
public void testBarrierOvertaking() throws Exception {
	subpartition.add(createFilledFinishedBufferConsumer(1));
	assertEquals(0, availablityListener.getNumNotifications());
	assertEquals(0, availablityListener.getNumPriorityEvents());

	subpartition.add(createFilledFinishedBufferConsumer(2));
	assertEquals(1, availablityListener.getNumNotifications());
	assertEquals(0, availablityListener.getNumPriorityEvents());

	BufferConsumer eventBuffer = EventSerializer.toBufferConsumer(EndOfSuperstepEvent.INSTANCE);
	subpartition.add(eventBuffer);
	assertEquals(1, availablityListener.getNumNotifications());
	assertEquals(0, availablityListener.getNumPriorityEvents());

	subpartition.add(createFilledFinishedBufferConsumer(4));
	assertEquals(1, availablityListener.getNumNotifications());
	assertEquals(0, availablityListener.getNumPriorityEvents());

	CheckpointOptions options = new CheckpointOptions(
		CheckpointType.CHECKPOINT,
		new CheckpointStorageLocationReference(new byte[]{0, 1, 2}),
		true,
		true);
	BufferConsumer barrierBuffer = EventSerializer.toBufferConsumer(new CheckpointBarrier(0, 0, options));
	subpartition.add(barrierBuffer, true);
	assertEquals(2, availablityListener.getNumNotifications());
	assertEquals(0, availablityListener.getNumPriorityEvents());

	List<Buffer> inflight = subpartition.requestInflightBufferSnapshot();
	assertEquals(Arrays.asList(1, 2, 4), inflight.stream().map(Buffer::getSize).collect(Collectors.toList()));
	inflight.forEach(Buffer::recycleBuffer);

	assertNextEvent(readView, barrierBuffer.getWrittenBytes(), CheckpointBarrier.class, true, 2, false, true);
	assertNextBuffer(readView, 1, true, 1, false, true);
	assertNextBuffer(readView, 2, true, 0, true, true);
	assertNextEvent(readView, eventBuffer.getWrittenBytes(), EndOfSuperstepEvent.class, false, 0, false, true);
	assertNextBuffer(readView, 4, false, 0, false, true);
	assertNoNextBuffer(readView);
}
 
Example 24
Source Project: flink   Source File: ChannelStateWriteRequest.java    License: Apache License 2.0 5 votes vote down vote up
static ThrowingConsumer<Throwable, Exception> recycle(Buffer[] flinkBuffers) {
	return unused -> {
		for (Buffer b : flinkBuffers) {
			b.recycleBuffer();
		}
	};
}
 
Example 25
Source Project: Flink-CEPplus   Source File: BufferBlockerTestBase.java    License: Apache License 2.0 5 votes vote down vote up
public static BufferOrEvent generateRandomBuffer(int size, int channelIndex) {
	MemorySegment seg = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
	for (int i = 0; i < size; i++) {
		seg.put(i, (byte) i);
	}

	Buffer buf = new NetworkBuffer(seg, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);
	return new BufferOrEvent(buf, channelIndex);
}
 
Example 26
Source Project: flink   Source File: RemoteInputChannelTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testExceptionOnReordering() throws Exception {
	// Setup
	final SingleInputGate inputGate = mock(SingleInputGate.class);
	final RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate);
	final Buffer buffer = TestBufferFactory.createBuffer(TestBufferFactory.BUFFER_SIZE);

	// The test
	inputChannel.onBuffer(buffer.retainBuffer(), 0, -1);

	// This does not yet throw the exception, but sets the error at the channel.
	inputChannel.onBuffer(buffer, 29, -1);

	try {
		inputChannel.getNextBuffer();

		fail("Did not throw expected exception after enqueuing an out-of-order buffer.");
	}
	catch (Exception expected) {
		assertFalse(buffer.isRecycled());
		// free remaining buffer instances
		inputChannel.releaseAllResources();
		assertTrue(buffer.isRecycled());
	}

	// Need to notify the input gate for the out-of-order buffer as well. Otherwise the
	// receiving task will not notice the error.
	verify(inputGate, times(2)).notifyChannelNonEmpty(eq(inputChannel));
}
 
Example 27
Source Project: flink   Source File: FileChannelMemoryMappedBoundedData.java    License: Apache License 2.0 5 votes vote down vote up
private boolean tryWriteBuffer(Buffer buffer) throws IOException {
	final long spaceLeft = endOfCurrentRegion - pos;
	final long bytesWritten = BufferReaderWriterUtil.writeToByteChannelIfBelowSize(
			fileChannel, buffer, headerAndBufferArray, spaceLeft);

	if (bytesWritten >= 0) {
		pos += bytesWritten;
		return true;
	}
	else {
		return false;
	}
}
 
Example 28
Source Project: flink   Source File: StreamTaskNetworkInput.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
	// clear the buffers. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	checkpointedInputGate.cleanup();
}
 
Example 29
Source Project: flink   Source File: BufferReaderWriterUtil.java    License: Apache License 2.0 5 votes vote down vote up
@Nullable
static Buffer readFromByteChannel(
		FileChannel channel,
		ByteBuffer headerBuffer,
		MemorySegment memorySegment,
		BufferRecycler bufferRecycler) throws IOException {

	headerBuffer.clear();
	if (!tryReadByteBuffer(channel, headerBuffer)) {
		return null;
	}
	headerBuffer.flip();

	final ByteBuffer targetBuf;
	final boolean isEvent;
	final boolean isCompressed;
	final int size;

	try {
		isEvent = headerBuffer.getShort() == HEADER_VALUE_IS_EVENT;
		isCompressed = headerBuffer.getShort() == BUFFER_IS_COMPRESSED;
		size = headerBuffer.getInt();
		targetBuf = memorySegment.wrap(0, size);
	}
	catch (BufferUnderflowException | IllegalArgumentException e) {
		// buffer underflow if header buffer is undersized
		// IllegalArgumentException if size is outside memory segment size
		throwCorruptDataException();
		return null; // silence compiler
	}

	readByteBufferFully(channel, targetBuf);

	Buffer.DataType dataType = isEvent ? Buffer.DataType.EVENT_BUFFER : Buffer.DataType.DATA_BUFFER;
	return new NetworkBuffer(memorySegment, bufferRecycler, dataType, isCompressed, size);
}
 
Example 30
Source Project: flink   Source File: RecordWriterTest.java    License: Apache License 2.0 5 votes vote down vote up
static BufferOrEvent parseBuffer(BufferConsumer bufferConsumer, int targetChannel) throws IOException {
	Buffer buffer = buildSingleBuffer(bufferConsumer);
	if (buffer.isBuffer()) {
		return new BufferOrEvent(buffer, new InputChannelInfo(0, targetChannel));
	} else {
		// is event:
		AbstractEvent event = EventSerializer.fromBuffer(buffer, RecordWriterTest.class.getClassLoader());
		buffer.recycleBuffer(); // the buffer is not needed anymore
		return new BufferOrEvent(event, new InputChannelInfo(0, targetChannel));
	}
}