org.apache.flink.runtime.io.network.buffer.Buffer Java Examples

The following examples show how to use org.apache.flink.runtime.io.network.buffer.Buffer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BufferManager.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Recycles all the exclusive and floating buffers from the given buffer queue.
 */
void releaseAllBuffers(ArrayDeque<Buffer> buffers) throws IOException {
	// Gather all exclusive buffers and recycle them to global pool in batch, because
	// we do not want to trigger redistribution of buffers after each recycle.
	final List<MemorySegment> exclusiveRecyclingSegments = new ArrayList<>();

	Buffer buffer;
	while ((buffer = buffers.poll()) != null) {
		if (buffer.getRecycler() == this) {
			exclusiveRecyclingSegments.add(buffer.getMemorySegment());
		} else {
			buffer.recycleBuffer();
		}
	}
	synchronized (bufferQueue) {
		bufferQueue.releaseAll(exclusiveRecyclingSegments);
		bufferQueue.notifyAll();
	}

	if (exclusiveRecyclingSegments.size() > 0) {
		globalPool.recycleMemorySegments(exclusiveRecyclingSegments);
	}
}
 
Example #2
Source File: RemoteInputChannel.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void spillInflightBuffers(long checkpointId, ChannelStateWriter channelStateWriter) throws IOException {
	synchronized (receivedBuffers) {
		checkState(checkpointId > lastRequestedCheckpointId, "Need to request the next checkpointId");

		final List<Buffer> inflightBuffers = new ArrayList<>(receivedBuffers.size());
		for (Buffer buffer : receivedBuffers) {
			CheckpointBarrier checkpointBarrier = parseCheckpointBarrierOrNull(buffer);
			if (checkpointBarrier != null && checkpointBarrier.getId() >= checkpointId) {
				break;
			}
			if (buffer.isBuffer()) {
				inflightBuffers.add(buffer.retainBuffer());
			}
		}

		lastRequestedCheckpointId = checkpointId;

		channelStateWriter.addInputData(
			checkpointId,
			channelInfo,
			ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN,
			CloseableIterator.fromList(inflightBuffers, Buffer::recycleBuffer));
	}
}
 
Example #3
Source File: AbstractCollectingResultPartitionWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
private void processBufferConsumers() throws IOException {
	while (!bufferConsumers.isEmpty()) {
		BufferConsumer bufferConsumer = bufferConsumers.peek();
		Buffer buffer = bufferConsumer.build();
		try {
			deserializeBuffer(buffer);
			if (!bufferConsumer.isFinished()) {
				break;
			}
			bufferConsumers.pop().close();
		}
		finally {
			buffer.recycleBuffer();
		}
	}
}
 
Example #4
Source File: BufferReaderWriterUtilTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testReadFromByteBufferNotEnoughData() {
	final ByteBuffer memory = ByteBuffer.allocateDirect(1200);
	final Buffer buffer = createTestBuffer();
	BufferReaderWriterUtil.writeBuffer(buffer, memory);

	memory.flip().limit(memory.limit() - 1);
	ByteBuffer tooSmall = memory.slice();

	try {
		BufferReaderWriterUtil.sliceNextBuffer(tooSmall);
		fail();
	}
	catch (Exception e) {
		// expected
	}
}
 
Example #5
Source File: RemoteInputChannel.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Releases all exclusive and floating buffers, closes the partition request client.
 */
@Override
void releaseAllResources() throws IOException {
	if (isReleased.compareAndSet(false, true)) {

		final ArrayDeque<Buffer> releasedBuffers;
		synchronized (receivedBuffers) {
			releasedBuffers = new ArrayDeque<>(receivedBuffers);
			receivedBuffers.clear();
		}
		bufferManager.releaseAllBuffers(releasedBuffers);

		// The released flag has to be set before closing the connection to ensure that
		// buffers received concurrently with closing are properly recycled.
		if (partitionRequestClient != null) {
			partitionRequestClient.close(this);
		} else {
			connectionManager.closeOpenChannelConnections(connectionId);
		}
	}
}
 
Example #6
Source File: BufferManager.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Requests floating buffers from the buffer pool based on the given required amount, and returns the actual
 * requested amount. If the required amount is not fully satisfied, it will register as a listener.
 */
int requestFloatingBuffers(int numRequired) throws IOException {
	int numRequestedBuffers = 0;
	synchronized (bufferQueue) {
		// Similar to notifyBufferAvailable(), make sure that we never add a buffer after channel
		// released all buffers via releaseAllResources().
		if (inputChannel.isReleased()) {
			return numRequestedBuffers;
		}

		numRequiredBuffers = numRequired;

		while (bufferQueue.getAvailableBufferSize() < numRequiredBuffers && !isWaitingForFloatingBuffers) {
			BufferPool bufferPool = inputChannel.inputGate.getBufferPool();
			Buffer buffer = bufferPool.requestBuffer();
			if (buffer != null) {
				bufferQueue.addFloatingBuffer(buffer);
				numRequestedBuffers++;
			} else if (bufferPool.addBufferListener(this)) {
				isWaitingForFloatingBuffers = true;
				break;
			}
		}
	}
	return numRequestedBuffers;
}
 
Example #7
Source File: RecordWriterTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that broadcasted events' buffers are independent (in their (reader) indices) once they
 * are put into the queue for Netty when broadcasting events to multiple channels.
 */
@Test
public void testBroadcastEventBufferIndependence() throws Exception {
	@SuppressWarnings("unchecked")
	ArrayDeque<BufferConsumer>[] queues =
		new ArrayDeque[]{new ArrayDeque(), new ArrayDeque()};

	ResultPartitionWriter partition =
		new CollectingPartitionWriter(queues, new TestPooledBufferProvider(Integer.MAX_VALUE));
	RecordWriter<?> writer = new RecordWriter<>(partition);

	writer.broadcastEvent(EndOfPartitionEvent.INSTANCE);

	// Verify added to all queues
	assertEquals(1, queues[0].size());
	assertEquals(1, queues[1].size());

	// these two buffers may share the memory but not the indices!
	Buffer buffer1 = buildSingleBuffer(queues[0].remove());
	Buffer buffer2 = buildSingleBuffer(queues[1].remove());
	assertEquals(0, buffer1.getReaderIndex());
	assertEquals(0, buffer2.getReaderIndex());
	buffer1.setReaderIndex(1);
	assertEquals("Buffer 2 shares the same reader index as buffer 1", 0, buffer2.getReaderIndex());
}
 
Example #8
Source File: PartitionRequestClientHandlerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Returns a deserialized buffer message as it would be received during runtime.
 */
static BufferResponse createBufferResponse(
		Buffer buffer,
		int sequenceNumber,
		InputChannelID receivingChannelId,
		int backlog) throws IOException {

	// Mock buffer to serialize
	BufferResponse resp = new BufferResponse(buffer, sequenceNumber, receivingChannelId, backlog);

	ByteBuf serialized = resp.write(UnpooledByteBufAllocator.DEFAULT);

	// Skip general header bytes
	serialized.readBytes(NettyMessage.FRAME_HEADER_LENGTH);

	// Deserialize the bytes again. We have to go this way, because we only partly deserialize
	// the header of the response and wait for a buffer from the buffer pool to copy the payload
	// data into.
	BufferResponse deserialized = BufferResponse.readFrom(serialized);

	return deserialized;
}
 
Example #9
Source File: EventSerializerTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests {@link EventSerializer#isEvent(Buffer, Class)}
 * whether it peaks into the buffer only, i.e. after the call, the buffer
 * is still de-serializable.
 */
@Test
public void testIsEventPeakOnly() throws Exception {
	final Buffer serializedEvent =
		EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE);
	try {
		final ClassLoader cl = getClass().getClassLoader();
		assertTrue(
			EventSerializer.isEvent(serializedEvent, EndOfPartitionEvent.class));
		EndOfPartitionEvent event = (EndOfPartitionEvent) EventSerializer
			.fromBuffer(serializedEvent, cl);
		assertEquals(EndOfPartitionEvent.INSTANCE, event);
	} finally {
		serializedEvent.recycleBuffer();
	}
}
 
Example #10
Source File: BackPressureStatsTrackerImplITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests a simple fake-back pressured task. Back pressure is assumed when
 * sampled stack traces are in blocking buffer requests.
 */
@Test
public void testBackPressureShouldBeReflectedInStats() throws Exception {
	final List<Buffer> buffers = requestAllBuffers();
	try {
		final JobGraph jobGraph = createJobWithBackPressure();
		testingMiniCluster.submitJob(jobGraph).get(TIMEOUT_SECONDS, TimeUnit.SECONDS);

		final OperatorBackPressureStats stats = getBackPressureStatsForTestVertex();

		assertThat(stats.getNumberOfSubTasks(), is(equalTo(JOB_PARALLELISM)));
		assertThat(stats, isFullyBackpressured());
	} finally {
		releaseBuffers(buffers);
	}
}
 
Example #11
Source File: BufferManager.java    From flink with Apache License 2.0 6 votes vote down vote up
Buffer requestBufferBlocking() throws IOException, InterruptedException {
	synchronized (bufferQueue) {
		Buffer buffer;
		while ((buffer = bufferQueue.takeBuffer()) == null) {
			if (inputChannel.isReleased()) {
				throw new CancelTaskException("Input channel [" + inputChannel.channelInfo + "] has already been released.");
			}
			if (!isWaitingForFloatingBuffers) {
				BufferPool bufferPool = inputChannel.inputGate.getBufferPool();
				buffer = bufferPool.requestBuffer();
				if (buffer == null && shouldContinueRequest(bufferPool)) {
					continue;
				}
			}

			if (buffer != null) {
				return buffer;
			}
			bufferQueue.wait();
		}
		return buffer;
	}
}
 
Example #12
Source File: CheckpointBarrierAlignerTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static BufferOrEvent createBuffer(int channel) {
	final int size = sizeCounter++;
	byte[] bytes = new byte[size];
	RND.nextBytes(bytes);

	MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(PAGE_SIZE);
	memory.put(0, bytes);

	Buffer buf = new NetworkBuffer(memory, FreeingBufferRecycler.INSTANCE);
	buf.setSize(size);

	// retain an additional time so it does not get disposed after being read by the input gate
	buf.retainBuffer();

	return new BufferOrEvent(buf, channel);
}
 
Example #13
Source File: SpilledSubpartitionView.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Nullable
@Override
public BufferAndBacklog getNextBuffer() throws IOException, InterruptedException {
	if (isSpillInProgress) {
		return null;
	}

	Buffer current;
	boolean nextBufferIsEvent;
	synchronized (this) {
		if (nextBuffer == null) {
			current = requestAndFillBuffer();
		} else {
			current = nextBuffer;
		}
		nextBuffer = requestAndFillBuffer();
		nextBufferIsEvent = nextBuffer != null && !nextBuffer.isBuffer();
	}

	if (current == null) {
		return null;
	}

	int newBacklog = parent.decreaseBuffersInBacklog(current);
	return new BufferAndBacklog(current, newBacklog > 0 || nextBufferIsEvent, newBacklog, nextBufferIsEvent);
}
 
Example #14
Source File: RecoveredInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
protected void readRecoveredState(ChannelStateReader reader) throws IOException, InterruptedException {
	ReadResult result = ReadResult.HAS_MORE_DATA;
	while (result == ReadResult.HAS_MORE_DATA) {
		Buffer buffer = bufferManager.requestBufferBlocking();
		result = internalReaderRecoveredState(reader, buffer);
	}
	finishReadRecoveredState();
}
 
Example #15
Source File: TestPooledBufferProvider.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Buffer requestBufferBlocking() throws IOException, InterruptedException {
	Buffer buffer = buffers.poll();
	if (buffer != null) {
		return buffer;
	}

	buffer = bufferFactory.create();
	if (buffer != null) {
		return buffer;
	}

	return buffers.take();
}
 
Example #16
Source File: BufferManager.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * The buffer pool notifies this listener of an available floating buffer. If the listener is released or
 * currently does not need extra buffers, the buffer should be returned to the buffer pool. Otherwise,
 * the buffer will be added into the <tt>bufferQueue</tt>.
 *
 * @param buffer Buffer that becomes available in buffer pool.
 * @return NotificationResult indicates whether this channel accepts the buffer and is waiting for
 * more floating buffers.
 */
@Override
public BufferListener.NotificationResult notifyBufferAvailable(Buffer buffer) {
	BufferListener.NotificationResult notificationResult = BufferListener.NotificationResult.BUFFER_NOT_USED;
	try {
		synchronized (bufferQueue) {
			checkState(isWaitingForFloatingBuffers, "This channel should be waiting for floating buffers.");

			// Important: make sure that we never add a buffer after releaseAllResources()
			// released all buffers. Following scenarios exist:
			// 1) releaseAllBuffers() already released buffers inside bufferQueue
			// -> while isReleased is set correctly in InputChannel
			// 2) releaseAllBuffers() did not yet release buffers from bufferQueue
			// -> we may or may not have set isReleased yet but will always wait for the
			// lock on bufferQueue to release buffers
			if (inputChannel.isReleased() || bufferQueue.getAvailableBufferSize() >= numRequiredBuffers) {
				isWaitingForFloatingBuffers = false;
				return notificationResult;
			}

			bufferQueue.addFloatingBuffer(buffer);
			bufferQueue.notifyAll();

			if (bufferQueue.getAvailableBufferSize() == numRequiredBuffers) {
				isWaitingForFloatingBuffers = false;
				notificationResult = BufferListener.NotificationResult.BUFFER_USED_NO_NEED_MORE;
			} else {
				notificationResult = BufferListener.NotificationResult.BUFFER_USED_NEED_MORE;
			}
		}

		if (notificationResult != NotificationResult.BUFFER_NOT_USED) {
			inputChannel.notifyBufferAvailable(1);
		}
	} catch (Throwable t) {
		inputChannel.setError(t);
	}

	return notificationResult;
}
 
Example #17
Source File: TestInputChannel.java    From flink with Apache License 2.0 5 votes vote down vote up
private void assertReturnedBuffersAreRecycled(boolean assertBuffers, boolean assertEvents) {
	for (Buffer b : allReturnedBuffers) {
		if (b.isBuffer() && assertBuffers && !b.isRecycled()) {
			fail("Data Buffer " + b + " not recycled");
		}
		if (!b.isBuffer() && assertEvents && !b.isRecycled()) {
			fail("Event Buffer " + b + " not recycled");
		}
	}
}
 
Example #18
Source File: TestPooledBufferProvider.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void recycle(MemorySegment segment) {
	synchronized (listenerRegistrationLock) {
		final Buffer buffer = new NetworkBuffer(segment, this);

		BufferListener listener = registeredListeners.poll();

		if (listener == null) {
			buffers.add(buffer);
		}
		else {
			listener.notifyBufferAvailable(buffer);
		}
	}
}
 
Example #19
Source File: RecordWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
protected void verifyDeserializationResults(
		Queue<BufferConsumer> queue,
		RecordDeserializer<SerializationTestType> deserializer,
		ArrayDeque<SerializationTestType> expectedRecords,
		int numRequiredBuffers,
		int numValues) throws Exception {
	int assertRecords = 0;
	for (int j = 0; j < numRequiredBuffers; j++) {
		Buffer buffer = buildSingleBuffer(queue.remove());
		deserializer.setNextBuffer(buffer);

		assertRecords += DeserializationUtils.deserializeRecords(expectedRecords, deserializer);
	}
	Assert.assertEquals(numValues, assertRecords);
}
 
Example #20
Source File: BufferOrEvent.java    From flink with Apache License 2.0 5 votes vote down vote up
public BufferOrEvent(Buffer buffer, int channelIndex, boolean moreAvailable) {
	this.buffer = checkNotNull(buffer);
	this.event = null;
	this.channelIndex = channelIndex;
	this.moreAvailable = moreAvailable;
	this.size = buffer.getSize();
}
 
Example #21
Source File: RecordingChannelStateWriter.java    From flink with Apache License 2.0 5 votes vote down vote up
public void reset() {
	lastStartedCheckpointId = -1;
	lastFinishedCheckpointId = -1;
	addedInput.values().forEach(Buffer::recycleBuffer);
	addedInput.clear();
	adedOutput.values().forEach(Buffer::recycleBuffer);
	adedOutput.clear();
}
 
Example #22
Source File: NettyMessage.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Parses the message header part and composes a new BufferResponse with an empty data buffer. The
 * data buffer will be filled in later.
 *
 * @param messageHeader the serialized message header.
 * @param bufferAllocator the allocator for network buffer.
 * @return a BufferResponse object with the header parsed and the data buffer to fill in later. The
 *			data buffer will be null if the target channel has been released or the buffer size is 0.
 */
static BufferResponse readFrom(ByteBuf messageHeader, NetworkBufferAllocator bufferAllocator) {
	InputChannelID receiverId = InputChannelID.fromByteBuf(messageHeader);
	int sequenceNumber = messageHeader.readInt();
	int backlog = messageHeader.readInt();
	Buffer.DataType dataType = Buffer.DataType.values()[messageHeader.readByte()];
	boolean isCompressed = messageHeader.readBoolean();
	int size = messageHeader.readInt();

	Buffer dataBuffer = null;

	if (size != 0) {
		if (dataType.isBuffer()) {
			dataBuffer = bufferAllocator.allocatePooledNetworkBuffer(receiverId);
		} else {
			dataBuffer = bufferAllocator.allocateUnPooledNetworkBuffer(size, dataType);
		}
	}

	if (dataBuffer != null) {
		dataBuffer.setCompressed(isCompressed);
	}

	return new BufferResponse(
		dataBuffer,
		dataType,
		isCompressed,
		sequenceNumber,
		receiverId,
		backlog,
		size);
}
 
Example #23
Source File: BufferReaderWriterUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
static boolean writeBuffer(Buffer buffer, ByteBuffer memory) {
	final int bufferSize = buffer.getSize();

	if (memory.remaining() < bufferSize + HEADER_LENGTH) {
		return false;
	}

	memory.putInt(buffer.isBuffer() ? HEADER_VALUE_IS_BUFFER : HEADER_VALUE_IS_EVENT);
	memory.putInt(bufferSize);
	memory.put(buffer.getNioBufferReadable());
	return true;
}
 
Example #24
Source File: EventSerializerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the result of
 * {@link EventSerializer#isEvent(Buffer, Class)} on a buffer
 * that encodes the given <tt>event</tt>.
 *
 * @param event the event to encode
 * @param eventClass the event class to check against
 *
 * @return whether {@link EventSerializer#isEvent(ByteBuffer, Class)}
 * 		thinks the encoded buffer matches the class
 */
private boolean checkIsEvent(
		AbstractEvent event,
		Class<?> eventClass) throws IOException {

	final Buffer serializedEvent = EventSerializer.toBuffer(event);
	try {
		return EventSerializer.isEvent(serializedEvent, eventClass);
	} finally {
		serializedEvent.recycleBuffer();
	}
}
 
Example #25
Source File: RecordWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
static BufferOrEvent parseBuffer(BufferConsumer bufferConsumer, int targetChannel) throws IOException {
	Buffer buffer = buildSingleBuffer(bufferConsumer);
	if (buffer.isBuffer()) {
		return new BufferOrEvent(buffer, new InputChannelInfo(0, targetChannel));
	} else {
		// is event:
		AbstractEvent event = EventSerializer.fromBuffer(buffer, RecordWriterTest.class.getClassLoader());
		buffer.recycleBuffer(); // the buffer is not needed anymore
		return new BufferOrEvent(event, new InputChannelInfo(0, targetChannel));
	}
}
 
Example #26
Source File: BufferReaderWriterUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nullable
static Buffer readFromByteChannel(
		FileChannel channel,
		ByteBuffer headerBuffer,
		MemorySegment memorySegment,
		BufferRecycler bufferRecycler) throws IOException {

	headerBuffer.clear();
	if (!tryReadByteBuffer(channel, headerBuffer)) {
		return null;
	}
	headerBuffer.flip();

	final ByteBuffer targetBuf;
	final boolean isEvent;
	final boolean isCompressed;
	final int size;

	try {
		isEvent = headerBuffer.getShort() == HEADER_VALUE_IS_EVENT;
		isCompressed = headerBuffer.getShort() == BUFFER_IS_COMPRESSED;
		size = headerBuffer.getInt();
		targetBuf = memorySegment.wrap(0, size);
	}
	catch (BufferUnderflowException | IllegalArgumentException e) {
		// buffer underflow if header buffer is undersized
		// IllegalArgumentException if size is outside memory segment size
		throwCorruptDataException();
		return null; // silence compiler
	}

	readByteBufferFully(channel, targetBuf);

	Buffer.DataType dataType = isEvent ? Buffer.DataType.EVENT_BUFFER : Buffer.DataType.DATA_BUFFER;
	return new NetworkBuffer(memorySegment, bufferRecycler, dataType, isCompressed, size);
}
 
Example #27
Source File: StreamTaskNetworkInput.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
	// clear the buffers. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	checkpointedInputGate.cleanup();
}
 
Example #28
Source File: FileChannelMemoryMappedBoundedData.java    From flink with Apache License 2.0 5 votes vote down vote up
private boolean tryWriteBuffer(Buffer buffer) throws IOException {
	final long spaceLeft = endOfCurrentRegion - pos;
	final long bytesWritten = BufferReaderWriterUtil.writeToByteChannelIfBelowSize(
			fileChannel, buffer, headerAndBufferArray, spaceLeft);

	if (bytesWritten >= 0) {
		pos += bytesWritten;
		return true;
	}
	else {
		return false;
	}
}
 
Example #29
Source File: SpillingAdaptiveSpanningRecordDeserializer.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void setNextBuffer(Buffer buffer) throws IOException {
	currentBuffer = buffer;

	int offset = buffer.getMemorySegmentOffset();
	MemorySegment segment = buffer.getMemorySegment();
	int numBytes = buffer.getSize();

	// check if some spanning record deserialization is pending
	if (spanningWrapper.getNumGatheredBytes() > 0) {
		spanningWrapper.addNextChunkFromMemorySegment(segment, offset, numBytes);
	} else {
		nonSpanningWrapper.initializeFromMemorySegment(segment, offset, numBytes + offset);
	}
}
 
Example #30
Source File: TestConsumerCallback.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void onBuffer(Buffer buffer) {
	final MemorySegment segment = buffer.getMemorySegment();

	int expected = getNumberOfReadBuffers() * (segment.size() / 4);

	for (int i = 0; i < segment.size(); i += 4) {
		assertEquals(expected, segment.getInt(i));

		expected++;
	}

	super.onBuffer(buffer);
}