org.apache.flink.runtime.io.network.api.serialization.RecordDeserializer Java Examples

The following examples show how to use org.apache.flink.runtime.io.network.api.serialization.RecordDeserializer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RecordCollectingResultPartitionWriter.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeBuffer(Buffer buffer) throws IOException {
	deserializer.setNextBuffer(buffer);

	while (deserializer.hasUnfinishedData()) {
		RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(record);

		if (result.isFullRecord()) {
			output.add(record.createCopy());
		}

		if (result == RecordDeserializer.DeserializationResult.LAST_RECORD_FROM_BUFFER
			|| result == RecordDeserializer.DeserializationResult.PARTIAL_RECORD) {
			break;
		}
	}
}
 
Example #2
Source File: RecordOrEventCollectingResultPartitionWriter.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeBuffer(Buffer buffer) throws IOException {
	if (buffer.isBuffer()) {
		deserializer.setNextBuffer(buffer);

		while (deserializer.hasUnfinishedData()) {
			RecordDeserializer.DeserializationResult result =
				deserializer.getNextRecord(delegate);

			if (result.isFullRecord()) {
				output.add(delegate.getInstance());
			}

			if (result == RecordDeserializer.DeserializationResult.LAST_RECORD_FROM_BUFFER
				|| result == RecordDeserializer.DeserializationResult.PARTIAL_RECORD) {
				break;
			}
		}
	} else {
		// is event
		AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
		output.add(event);
	}
}
 
Example #3
Source File: DeserializationUtils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Iterates over the provided records to deserialize, verifies the results and stats
 * the number of full records.
 *
 * @param records records to be deserialized
 * @param deserializer the record deserializer
 * @return the number of full deserialized records
 */
public static int deserializeRecords(
		ArrayDeque<SerializationTestType> records,
		RecordDeserializer<SerializationTestType> deserializer) throws Exception {
	int deserializedRecords = 0;

	while (!records.isEmpty()) {
		SerializationTestType expected = records.poll();
		SerializationTestType actual = expected.getClass().newInstance();

		if (deserializer.getNextRecord(actual).isFullRecord()) {
			Assert.assertEquals(expected, actual);
			deserializedRecords++;
		} else {
			records.addFirst(expected);
			break;
		}
	}

	return deserializedRecords;
}
 
Example #4
Source File: StreamTaskNetworkInput.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<Void> prepareSnapshot(
		ChannelStateWriter channelStateWriter,
		long checkpointId) throws IOException {
	for (int channelIndex = 0; channelIndex < recordDeserializers.length; channelIndex++) {
		final InputChannel channel = checkpointedInputGate.getChannel(channelIndex);

		// Assumption for retrieving buffers = one concurrent checkpoint
		RecordDeserializer<?> deserializer = recordDeserializers[channelIndex];
		if (deserializer != null) {
			channelStateWriter.addInputData(
				checkpointId,
				channel.getChannelInfo(),
				ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN,
				deserializer.getUnconsumedBuffer());
		}

		checkpointedInputGate.spillInflightBuffers(checkpointId, channelIndex, channelStateWriter);
	}
	return checkpointedInputGate.getAllBarriersReceivedFuture(checkpointId);
}
 
Example #5
Source File: StreamTaskNetworkInput.java    From flink with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
StreamTaskNetworkInput(
	CheckpointedInputGate checkpointedInputGate,
	TypeSerializer<?> inputSerializer,
	StatusWatermarkValve statusWatermarkValve,
	int inputIndex,
	RecordDeserializer<DeserializationDelegate<StreamElement>>[] recordDeserializers) {

	this.checkpointedInputGate = checkpointedInputGate;
	this.deserializationDelegate = new NonReusingDeserializationDelegate<>(
		new StreamElementSerializer<>(inputSerializer));
	this.recordDeserializers = recordDeserializers;
	this.statusWatermarkValve = statusWatermarkValve;
	this.inputIndex = inputIndex;
	this.channelIndexes = getChannelIndexes(checkpointedInputGate);
}
 
Example #6
Source File: DeserializationUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Iterates over the provided records to deserialize, verifies the results and stats
 * the number of full records.
 *
 * @param records records to be deserialized
 * @param deserializer the record deserializer
 * @return the number of full deserialized records
 */
public static int deserializeRecords(
		ArrayDeque<SerializationTestType> records,
		RecordDeserializer<SerializationTestType> deserializer) throws Exception {
	int deserializedRecords = 0;

	while (!records.isEmpty()) {
		SerializationTestType expected = records.poll();
		SerializationTestType actual = expected.getClass().newInstance();

		if (deserializer.getNextRecord(actual).isFullRecord()) {
			Assert.assertEquals(expected, actual);
			deserializedRecords++;
		} else {
			records.addFirst(expected);
			break;
		}
	}

	return deserializedRecords;
}
 
Example #7
Source File: RecordCollectingResultPartitionWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeBuffer(Buffer buffer) throws IOException {
	deserializer.setNextBuffer(buffer);

	while (deserializer.hasUnfinishedData()) {
		RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(record);

		if (result.isFullRecord()) {
			output.add(record.createCopy());
		}

		if (result == RecordDeserializer.DeserializationResult.LAST_RECORD_FROM_BUFFER
			|| result == RecordDeserializer.DeserializationResult.PARTIAL_RECORD) {
			break;
		}
	}
}
 
Example #8
Source File: RecordOrEventCollectingResultPartitionWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeBuffer(Buffer buffer) throws IOException {
	if (buffer.isBuffer()) {
		deserializer.setNextBuffer(buffer);

		while (deserializer.hasUnfinishedData()) {
			RecordDeserializer.DeserializationResult result =
				deserializer.getNextRecord(delegate);

			if (result.isFullRecord()) {
				output.add(delegate.getInstance());
			}

			if (result == RecordDeserializer.DeserializationResult.LAST_RECORD_FROM_BUFFER
				|| result == RecordDeserializer.DeserializationResult.PARTIAL_RECORD) {
				break;
			}
		}
	} else {
		// is event
		AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
		output.add(event);
	}
}
 
Example #9
Source File: RecordOrEventCollectingResultPartitionWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeBuffer(Buffer buffer) throws IOException {
	if (buffer.isBuffer()) {
		deserializer.setNextBuffer(buffer);

		while (deserializer.hasUnfinishedData()) {
			RecordDeserializer.DeserializationResult result =
				deserializer.getNextRecord(delegate);

			if (result.isFullRecord()) {
				output.add(delegate.getInstance());
			}

			if (result == RecordDeserializer.DeserializationResult.LAST_RECORD_FROM_BUFFER
				|| result == RecordDeserializer.DeserializationResult.PARTIAL_RECORD) {
				break;
			}
		}
	} else {
		// is event
		AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
		output.add(event);
	}
}
 
Example #10
Source File: DeserializationUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Iterates over the provided records to deserialize, verifies the results and stats
 * the number of full records.
 *
 * @param records records to be deserialized
 * @param deserializer the record deserializer
 * @return the number of full deserialized records
 */
public static int deserializeRecords(
		ArrayDeque<SerializationTestType> records,
		RecordDeserializer<SerializationTestType> deserializer) throws Exception {
	int deserializedRecords = 0;

	while (!records.isEmpty()) {
		SerializationTestType expected = records.poll();
		SerializationTestType actual = expected.getClass().newInstance();

		if (deserializer.getNextRecord(actual).isFullRecord()) {
			Assert.assertEquals(expected, actual);
			deserializedRecords++;
		} else {
			records.addFirst(expected);
			break;
		}
	}

	return deserializedRecords;
}
 
Example #11
Source File: RecordCollectingResultPartitionWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void deserializeBuffer(Buffer buffer) throws IOException {
	deserializer.setNextBuffer(buffer);

	while (deserializer.hasUnfinishedData()) {
		RecordDeserializer.DeserializationResult result = deserializer.getNextRecord(record);

		if (result.isFullRecord()) {
			output.add(record.createCopy());
		}

		if (result == RecordDeserializer.DeserializationResult.LAST_RECORD_FROM_BUFFER
			|| result == RecordDeserializer.DeserializationResult.PARTIAL_RECORD) {
			break;
		}
	}
}
 
Example #12
Source File: StreamTaskNetworkInput.java    From flink with Apache License 2.0 5 votes vote down vote up
private void releaseDeserializer(int channelIndex) {
	RecordDeserializer<?> deserializer = recordDeserializers[channelIndex];
	if (deserializer != null) {
		// recycle buffers and clear the deserializer.
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();

		recordDeserializers[channelIndex] = null;
	}
}
 
Example #13
Source File: AbstractRecordReader.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public void clearBuffers() {
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}
}
 
Example #14
Source File: RecordWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
protected void verifyDeserializationResults(
		Queue<BufferConsumer> queue,
		RecordDeserializer<SerializationTestType> deserializer,
		ArrayDeque<SerializationTestType> expectedRecords,
		int numRequiredBuffers,
		int numValues) throws Exception {
	int assertRecords = 0;
	for (int j = 0; j < numRequiredBuffers; j++) {
		Buffer buffer = buildSingleBuffer(queue.remove());
		deserializer.setNextBuffer(buffer);

		assertRecords += DeserializationUtils.deserializeRecords(expectedRecords, deserializer);
	}
	Assert.assertEquals(numValues, assertRecords);
}
 
Example #15
Source File: RecordWriterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that records are broadcast via {@link RecordWriter#broadcastEmit(IOReadableWritable)}.
 */
@Test
public void testBroadcastEmitRecord() throws Exception {
	final int numberOfChannels = 4;
	final int bufferSize = 32;
	final int numValues = 8;
	final int serializationLength = 4;

	@SuppressWarnings("unchecked")
	final Queue<BufferConsumer>[] queues = new Queue[numberOfChannels];
	for (int i = 0; i < numberOfChannels; i++) {
		queues[i] = new ArrayDeque<>();
	}

	final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize);
	final ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider);
	final RecordWriter<SerializationTestType> writer = createRecordWriter(partitionWriter);
	final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(
		new String[]{ tempFolder.getRoot().getAbsolutePath() });

	final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>();
	final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT);
	for (SerializationTestType record : records) {
		serializedRecords.add(record);
		writer.broadcastEmit(record);
	}

	final int numRequiredBuffers = numValues / (bufferSize / (4 + serializationLength));
	if (isBroadcastWriter) {
		assertEquals(numRequiredBuffers, bufferProvider.getNumberOfCreatedBuffers());
	} else {
		assertEquals(numRequiredBuffers * numberOfChannels, bufferProvider.getNumberOfCreatedBuffers());
	}

	for (int i = 0; i < numberOfChannels; i++) {
		assertEquals(numRequiredBuffers, queues[i].size());
		verifyDeserializationResults(queues[i], deserializer, serializedRecords.clone(), numRequiredBuffers, numValues);
	}
}
 
Example #16
Source File: AbstractRecordReader.java    From flink with Apache License 2.0 5 votes vote down vote up
public void clearBuffers() {
	for (RecordDeserializer<?> deserializer : recordDeserializers.values()) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}
}
 
Example #17
Source File: StreamTwoInputProcessor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
	// clear the buffers first. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	// cleanup the barrier handler resources
	barrierHandler.cleanup();
}
 
Example #18
Source File: StreamTaskNetworkInput.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
	// clear the buffers. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	checkpointedInputGate.cleanup();
}
 
Example #19
Source File: AbstractRecordReader.java    From flink with Apache License 2.0 5 votes vote down vote up
public void clearBuffers() {
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}
}
 
Example #20
Source File: StreamTwoInputProcessor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public void cleanup() throws IOException {
	// clear the buffers first. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	// cleanup the barrier handler resources
	barrierHandler.cleanup();
}
 
Example #21
Source File: StreamInputProcessor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public void cleanup() throws IOException {
	// clear the buffers first. this part should not ever fail
	for (RecordDeserializer<?> deserializer : recordDeserializers) {
		Buffer buffer = deserializer.getCurrentBuffer();
		if (buffer != null && !buffer.isRecycled()) {
			buffer.recycleBuffer();
		}
		deserializer.clear();
	}

	// cleanup the barrier handler resources
	barrierHandler.cleanup();
}
 
Example #22
Source File: BroadcastRecordWriterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the number of requested buffers and results are correct in the case of switching
 * modes between {@link BroadcastRecordWriter#broadcastEmit(IOReadableWritable)} and
 * {@link BroadcastRecordWriter#randomEmit(IOReadableWritable)}.
 */
@Test
public void testBroadcastMixedRandomEmitRecord() throws Exception {
	final int numberOfChannels = 4;
	final int numberOfRecords = 8;
	final int bufferSize = 32;

	@SuppressWarnings("unchecked")
	final Queue<BufferConsumer>[] queues = new Queue[numberOfChannels];
	for (int i = 0; i < numberOfChannels; i++) {
		queues[i] = new ArrayDeque<>();
	}

	final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize);
	final ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider);
	final BroadcastRecordWriter<SerializationTestType> writer = new BroadcastRecordWriter<>(partitionWriter, 0, "test");
	final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(
		new String[]{ tempFolder.getRoot().getAbsolutePath() });

	// generate the configured number of int values as global record set
	final Iterable<SerializationTestType> records = Util.randomRecords(numberOfRecords, SerializationTestTypeFactory.INT);
	// restore the corresponding record set for every input channel
	final Map<Integer, ArrayDeque<SerializationTestType>> serializedRecords = new HashMap<>();
	for (int i = 0; i < numberOfChannels; i++) {
		serializedRecords.put(i, new ArrayDeque<>());
	}

	// every record in global set would both emit into one random channel and broadcast to all the channels
	int index = 0;
	for (SerializationTestType record : records) {
		int randomChannel = index++ % numberOfChannels;
		writer.randomEmit(record, randomChannel);
		serializedRecords.get(randomChannel).add(record);

		writer.broadcastEmit(record);
		for (int i = 0; i < numberOfChannels; i++) {
			serializedRecords.get(i).add(record);
		}
	}

	final int numberOfCreatedBuffers = bufferProvider.getNumberOfCreatedBuffers();
	// verify the expected number of requested buffers, and it would always request a new buffer while random emitting
	assertEquals(numberOfRecords, numberOfCreatedBuffers);

	for (int i = 0; i < numberOfChannels; i++) {
		// every channel would queue the number of above crated buffers
		assertEquals(numberOfRecords, queues[i].size());

		final int excessRandomRecords = i < numberOfRecords % numberOfChannels ? 1 : 0;
		final int numberOfRandomRecords = numberOfRecords / numberOfChannels + excessRandomRecords;
		final int numberOfTotalRecords = numberOfRecords + numberOfRandomRecords;
		// verify the data correctness in every channel queue
		verifyDeserializationResults(
			queues[i],
			deserializer,
			serializedRecords.get(i),
			numberOfCreatedBuffers,
			numberOfTotalRecords);
	}
}
 
Example #23
Source File: RecordWriterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * The results of emitting records via BroadcastPartitioner or broadcasting records directly are the same,
 * that is all the target channels can receive the whole outputs.
 *
 * @param isBroadcastEmit whether using {@link RecordWriter#broadcastEmit(IOReadableWritable)} or not
 */
private void emitRecordWithBroadcastPartitionerOrBroadcastEmitRecord(boolean isBroadcastEmit) throws Exception {
	final int numberOfChannels = 4;
	final int bufferSize = 32;
	final int numValues = 8;
	final int serializationLength = 4;

	@SuppressWarnings("unchecked")
	final Queue<BufferConsumer>[] queues = new Queue[numberOfChannels];
	for (int i = 0; i < numberOfChannels; i++) {
		queues[i] = new ArrayDeque<>();
	}

	final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize);
	final ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider);
	final ChannelSelector selector = new OutputEmitter(ShipStrategyType.BROADCAST, 0);
	final RecordWriter<SerializationTestType> writer = new RecordWriterBuilder()
		.setChannelSelector(selector)
		.setTimeout(0)
		.build(partitionWriter);
	final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(
		new String[]{ tempFolder.getRoot().getAbsolutePath() });

	final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>();
	final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT);
	for (SerializationTestType record : records) {
		serializedRecords.add(record);

		if (isBroadcastEmit) {
			writer.broadcastEmit(record);
		} else {
			writer.emit(record);
		}
	}

	final int requiredBuffers = numValues / (bufferSize / (4 + serializationLength));
	for (int i = 0; i < numberOfChannels; i++) {
		assertEquals(requiredBuffers, queues[i].size());

		final ArrayDeque<SerializationTestType> expectedRecords = serializedRecords.clone();
		int assertRecords = 0;
		for (int j = 0; j < requiredBuffers; j++) {
			Buffer buffer = buildSingleBuffer(queues[i].remove());
			deserializer.setNextBuffer(buffer);

			assertRecords += DeserializationUtils.deserializeRecords(expectedRecords, deserializer);
		}
		Assert.assertEquals(numValues, assertRecords);
	}
}
 
Example #24
Source File: RecordWriterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * The results of emitting records via BroadcastPartitioner or broadcasting records directly are the same,
 * that is all the target channels can receive the whole outputs.
 *
 * @param isBroadcastEmit whether using {@link RecordWriter#broadcastEmit(IOReadableWritable)} or not
 */
private void emitRecordWithBroadcastPartitionerOrBroadcastEmitRecord(boolean isBroadcastEmit) throws Exception {
	final int numberOfChannels = 4;
	final int bufferSize = 32;
	final int numValues = 8;
	final int serializationLength = 4;

	@SuppressWarnings("unchecked")
	final Queue<BufferConsumer>[] queues = new Queue[numberOfChannels];
	for (int i = 0; i < numberOfChannels; i++) {
		queues[i] = new ArrayDeque<>();
	}

	final TestPooledBufferProvider bufferProvider = new TestPooledBufferProvider(Integer.MAX_VALUE, bufferSize);
	final ResultPartitionWriter partitionWriter = new CollectingPartitionWriter(queues, bufferProvider);
	final ChannelSelector selector = new OutputEmitter(ShipStrategyType.BROADCAST, 0);
	final RecordWriter<SerializationTestType> writer = RecordWriter.createRecordWriter(partitionWriter, selector, 0, "test");
	final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(
		new String[]{ tempFolder.getRoot().getAbsolutePath() });

	final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>();
	final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT);
	for (SerializationTestType record : records) {
		serializedRecords.add(record);

		if (isBroadcastEmit) {
			writer.broadcastEmit(record);
		} else {
			writer.emit(record);
		}
	}

	final int requiredBuffers = numValues / (bufferSize / (4 + serializationLength));
	for (int i = 0; i < numberOfChannels; i++) {
		assertEquals(requiredBuffers, queues[i].size());

		final ArrayDeque<SerializationTestType> expectedRecords = serializedRecords.clone();
		int assertRecords = 0;
		for (int j = 0; j < requiredBuffers; j++) {
			Buffer buffer = buildSingleBuffer(queues[i].remove());
			deserializer.setNextBuffer(buffer);

			assertRecords += DeserializationUtils.deserializeRecords(expectedRecords, deserializer);
		}
		Assert.assertEquals(numValues, assertRecords);
	}
}