com.amazonaws.services.kinesis.clientlibrary.types.UserRecord Java Examples

The following examples show how to use com.amazonaws.services.kinesis.clientlibrary.types.UserRecord. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SimplifiedKinesisClient.java    From beam with Apache License 2.0 6 votes vote down vote up
/**
 * Gets records from Kinesis and deaggregates them if needed.
 *
 * @return list of deaggregated records
 * @throws TransientKinesisException - in case of recoverable situation
 */
public GetKinesisRecordsResult getRecords(
    final String shardIterator,
    final String streamName,
    final String shardId,
    final Integer limit)
    throws TransientKinesisException {
  return wrapExceptions(
      () -> {
        GetRecordsResult response =
            kinesis.getRecords(
                new GetRecordsRequest().withShardIterator(shardIterator).withLimit(limit));
        return new GetKinesisRecordsResult(
            UserRecord.deaggregate(response.getRecords()),
            response.getNextShardIterator(),
            response.getMillisBehindLatest(),
            streamName,
            shardId);
      });
}
 
Example #2
Source File: EchoHandler.java    From kinesis-aggregation with Apache License 2.0 6 votes vote down vote up
@Override
public Void handleRequest(KinesisEvent event, Context context) {
	LambdaLogger logger = context.getLogger();

	// extract the records from the event
	List<KinesisEventRecord> records = event.getRecords();

	logger.log(String.format("Recieved %s Raw Records", records.size()));

	// now deaggregate the message contents
	List<UserRecord> deaggregated = new RecordDeaggregator<KinesisEventRecord>().deaggregate(records);
	logger.log(String.format("Received %s Deaggregated User Records", deaggregated.size()));
	
	deaggregated.stream().forEachOrdered(rec -> {
		logger.log(rec.getPartitionKey());
	});

	return null;
}
 
Example #3
Source File: GetKinesisRecordsResult.java    From beam with Apache License 2.0 6 votes vote down vote up
public GetKinesisRecordsResult(
    List<UserRecord> records,
    String nextShardIterator,
    long millisBehindLatest,
    final String streamName,
    final String shardId) {
  this.records =
      records.stream()
          .map(
              input -> {
                assert input != null; // to make FindBugs happy
                return new KinesisRecord(input, streamName, shardId);
              })
          .collect(Collectors.toList());
  this.nextShardIterator = nextShardIterator;
  this.millisBehindLatest = millisBehindLatest;
}
 
Example #4
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 6 votes vote down vote up
@Test
public void testAggregatedRecord() {
	// create a new KinesisEvent.Record from the aggregated data
	KinesisEvent.Record r = new KinesisEvent.Record();
	r.setPartitionKey(aggregated.getPartitionKey());
	r.setApproximateArrivalTimestamp(new Date(System.currentTimeMillis()));
	r.setData(ByteBuffer.wrap(aggregated.toRecordBytes()));
	r.setKinesisSchemaVersion("1.0");
	KinesisEventRecord ker = new KinesisEventRecord();
	ker.setKinesis(r);

	// deaggregate the record
	List<UserRecord> userRecords = deaggregator.deaggregate(Arrays.asList(ker));

	assertEquals("Deaggregated Count Matches", aggregated.getNumUserRecords(), userRecords.size());
	verifyOneToOneMapping(userRecords);
}
 
Example #5
Source File: KinesisLambdaReceiver.java    From kinesis-aggregation with Apache License 2.0 6 votes vote down vote up
/**
    * @see com.amazonaws.services.lambda.runtime.RequestHandler#handleRequest(java.lang.Object, com.amazonaws.services.lambda.runtime.Context)
    */
   public Void handleRequestWithLists(KinesisEvent event, Context context) {
	LambdaLogger logger = context.getLogger();
	logger.log("Received " + event.getRecords().size() + " raw Event Records.");

	try {
		// process the user records with an anonymous record processor
		// instance
		RecordDeaggregator.processRecords(event.getRecords(), new KinesisUserRecordProcessor() {
			public Void process(List<UserRecord> userRecords) {
				for (UserRecord userRecord : userRecords) {
					// Your User Record Processing Code Here!
					logger.log(new String(userRecord.getData().array()));
				}

				return null;
			}
		});
	} catch (Exception e) {
		logger.log(e.getMessage());
	}

	return null;
}
 
Example #6
Source File: KinesisLambdaReceiver.java    From kinesis-aggregation with Apache License 2.0 6 votes vote down vote up
/**
    * @see com.amazonaws.services.lambda.runtime.RequestHandler#handleRequest(java.lang.Object, com.amazonaws.services.lambda.runtime.Context)
    */
   public Void handleRequestBulkList(KinesisEvent event, Context context) {
	LambdaLogger logger = context.getLogger();
	logger.log("Received " + event.getRecords().size() + " raw Event Records.");

	try {
		List<UserRecord> userRecords = RecordDeaggregator.deaggregate(event.getRecords());
		for (UserRecord userRecord : userRecords) {
			// Your User Record Processing Code Here!
			logger.log(new String(userRecord.getData().array()));
		}
	} catch (Exception e) {
		logger.log(e.getMessage());
	}

	return null;
}
 
Example #7
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testOne() {
	// invoke deaggregation on the static records, returning a List of UserRecord
	List<UserRecord> records = deaggregator.deaggregate(recordList.get(0));

	assertEquals("Processed Record Count Correct", records.size(), 1);
	verifyOneToOneMapping(records);
}
 
Example #8
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testEmpty() {
	// invoke deaggregation on the static records, returning a List of UserRecord
	List<UserRecord> records = deaggregator.deaggregate(new ArrayList<KinesisEventRecord>());

	assertEquals("Processed Record Count Correct", records.size(), 0);
	verifyOneToOneMapping(records);
}
 
Example #9
Source File: RecordAggregator.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
/**
 * Add a new user record to this aggregated record (will trigger a callback
 * via onRecordComplete if aggregated record is full).
 *
 * @param userRecord
 *            The Kinesis user record to add to this aggregated record
 * @return A AggRecord if this aggregated record is full and ready to be
 *         transmitted or null otherwise.
 * @throws Exception If the user record can't be added to the current agg record
 */
public AggRecord addUserRecord(UserRecord userRecord) throws Exception {
	if (userRecord == null) {
		throw new IllegalArgumentException("Input user record cannot be null.");
	} else if (!userRecord.getData().hasArray()) {
		throw new IllegalStateException(
				"The addUserRecord method only works for UserRecord objects whose data ByteBuffer "
						+ " has a backing byte[] available.");
	}
	return addUserRecord(userRecord.getPartitionKey(), userRecord.getExplicitHashKey(),
			userRecord.getData().array());
}
 
Example #10
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testList() {
	// invoke deaggregation on the static records, returning a List of UserRecord
	List<UserRecord> records = deaggregator.deaggregate(recordList);

	assertEquals("Processed Record Count Correct", records.size(), recordList.size());
	verifyOneToOneMapping(records);
}
 
Example #11
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
private void verifyOneToOneMapping(List<UserRecord> userRecords) {
	userRecords.stream().forEachOrdered(userRecord -> {
		// get the original checkset record by ID
		KinesisEventRecord toCheck = checkset.get(userRecord.getPartitionKey());

		// confirm that toCheck is not null
		assertNotNull("Found Original CheckSet Record", toCheck);

		// confirm that the data is the same
		assertEquals("Data Correct", userRecord.getData().toString(), toCheck.getKinesis().getData().toString());
	});
}
 
Example #12
Source File: ShardConsumer.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
protected String getShardIteratorForAggregatedSequenceNumber(SequenceNumber sequenceNumber)
		throws Exception {

	String itrForLastAggregatedRecord =
			kinesis.getShardIterator(
					subscribedShard,
					ShardIteratorType.AT_SEQUENCE_NUMBER.toString(),
					sequenceNumber.getSequenceNumber());

	// get only the last aggregated record
	GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);

	List<UserRecord> fetchedRecords = deaggregateRecords(
			getRecordsResult.getRecords(),
			subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
			subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

	long lastSubSequenceNum = sequenceNumber.getSubSequenceNumber();
	for (UserRecord record : fetchedRecords) {
		// we have found a dangling sub-record if it has a larger subsequence number
		// than our last sequence number; if so, collect the record and update state
		if (record.getSubSequenceNumber() > lastSubSequenceNum) {
			deserializeRecordForCollectionAndUpdateState(record);
		}
	}

	return getRecordsResult.getNextShardIterator();
}
 
Example #13
Source File: KinesisRecord.java    From beam with Apache License 2.0 5 votes vote down vote up
public KinesisRecord(UserRecord record, String streamName, String shardId) {
  this(
      record.getData(),
      record.getSequenceNumber(),
      record.getSubSequenceNumber(),
      record.getPartitionKey(),
      new Instant(record.getApproximateArrivalTimestamp()),
      Instant.now(),
      streamName,
      shardId);
}
 
Example #14
Source File: TestDirectDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
private void verifyOneToOneMapping(List<UserRecord> userRecords) {
	userRecords.stream().forEachOrdered(userRecord -> {
		// get the original checkset record by ID
		Record toCheck = checkset.get(userRecord.getPartitionKey());

		// confirm that toCheck is not null
		assertNotNull("Found Original CheckSet Record", toCheck);

		// confirm that the data is the same
		assertEquals("Data Correct", userRecord.getData().toString(), toCheck.getData().toString());
	});
}
 
Example #15
Source File: TestDirectDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testAggregatedRecord() {
	// create a new KinesisEvent.Record from the aggregated data
	Record r = new Record();
	r.setPartitionKey(aggregated.getPartitionKey());
	r.setApproximateArrivalTimestamp(new Date(System.currentTimeMillis()));
	r.setData(ByteBuffer.wrap(aggregated.toRecordBytes()));

	// deaggregate the record
	List<UserRecord> userRecords = deaggregator.deaggregate(Arrays.asList(r));

	assertEquals("Deaggregated Count Matches", aggregated.getNumUserRecords(), userRecords.size());
	verifyOneToOneMapping(userRecords);
}
 
Example #16
Source File: TestDirectDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testOne() {
	// invoke deaggregation on the static records, returning a List of UserRecord
	List<UserRecord> records = deaggregator.deaggregate(recordList.get(0));

	assertEquals("Processed Record Count Correct", records.size(), 1);
	verifyOneToOneMapping(records);
}
 
Example #17
Source File: TestDirectDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testEmpty() {
	// invoke deaggregation on the static records, returning a List of UserRecord
	List<UserRecord> records = deaggregator.deaggregate(new ArrayList<Record>());

	assertEquals("Processed Record Count Correct", records.size(), 0);
	verifyOneToOneMapping(records);
}
 
Example #18
Source File: TestDirectDeaggregation.java    From kinesis-aggregation with Apache License 2.0 5 votes vote down vote up
@Test
public void testList() {
	// invoke deaggregation on the static records, returning a List of UserRecord
	List<UserRecord> records = deaggregator.deaggregate(recordList);

	assertEquals("Processed Record Count Correct", records.size(), recordList.size());
	verifyOneToOneMapping(records);
}
 
Example #19
Source File: StartingPointShardsFinderTest.java    From beam with Apache License 2.0 5 votes vote down vote up
private void prepareShard(
    Shard shard,
    String nextIterator,
    ShardIteratorType shardIteratorType,
    Instant startTimestamp) {
  try {
    String shardIterator = shardIteratorType + shard.getShardId() + "-current";
    if (shardIteratorType == ShardIteratorType.AT_TIMESTAMP) {
      when(kinesis.getShardIterator(
              STREAM_NAME,
              shard.getShardId(),
              ShardIteratorType.AT_TIMESTAMP,
              null,
              startTimestamp))
          .thenReturn(shardIterator);
    } else {
      when(kinesis.getShardIterator(
              STREAM_NAME, shard.getShardId(), shardIteratorType, null, null))
          .thenReturn(shardIterator);
    }
    GetKinesisRecordsResult result =
        new GetKinesisRecordsResult(
            Collections.<UserRecord>emptyList(),
            nextIterator,
            0,
            STREAM_NAME,
            shard.getShardId());
    when(kinesis.getRecords(shardIterator, STREAM_NAME, shard.getShardId())).thenReturn(result);
  } catch (TransientKinesisException e) {
    throw new RuntimeException(e);
  }
}
 
Example #20
Source File: SpringBootKinesisEventHandler.java    From spring-cloud-function with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private E deserializeUserRecord(UserRecord userRecord) {
	try {
		byte[] jsonBytes = userRecord.getData().array();
		return (E) this.mapper.readValue(jsonBytes, getInputType());
	}
	catch (Exception e) {
		throw new IllegalStateException("Cannot convert event", e);
	}
}
 
Example #21
Source File: KinesisSource.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void previewProcess(
    int maxBatchSize,
    BatchMaker batchMaker
) throws IOException, StageException {
  ClientConfiguration awsClientConfig = AWSUtil.getClientConfiguration(conf.proxyConfig);

  String shardId = KinesisUtil.getLastShardId(awsClientConfig, conf, conf.streamName);

  GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();
  getShardIteratorRequest.setStreamName(conf.streamName);
  getShardIteratorRequest.setShardId(shardId);
  getShardIteratorRequest.setShardIteratorType(conf.initialPositionInStream.name());

  if (conf.initialPositionInStream == InitialPositionInStream.AT_TIMESTAMP) {
    getShardIteratorRequest.setTimestamp(new Date(conf.initialTimestamp));
  }

  if (!getContext().isPreview() && conf.maxBatchSize > maxBatchSize) {
    getContext().reportError(Errors.KINESIS_18, maxBatchSize);
  }

  List<com.amazonaws.services.kinesis.model.Record> results = KinesisUtil.getPreviewRecords(
      awsClientConfig,
      conf,
      Math.min(conf.maxBatchSize, maxBatchSize),
      getShardIteratorRequest
  );

  int batchSize = results.size() > maxBatchSize ? maxBatchSize : results.size();

  for (int index = 0; index < batchSize; index++) {
    com.amazonaws.services.kinesis.model.Record record = results.get(index);
    UserRecord userRecord = new UserRecord(record);
    KinesisUtil.processKinesisRecord(
        getShardIteratorRequest.getShardId(),
        userRecord,
        parserFactory
    ).forEach(batchMaker::addRecord);
  }
}
 
Example #22
Source File: KinesisTestUtil.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public static List<com.amazonaws.services.kinesis.model.Record> getConsumerTestRecords(int i) {
  List<com.amazonaws.services.kinesis.model.Record> records = new ArrayList<>(i);

  for (int j = 0; j < i; j++) {
    com.amazonaws.services.kinesis.model.Record record = new com.amazonaws.services.kinesis.model.Record()
        .withData(ByteBuffer.wrap(String.format("{\"seq\": %s}", j).getBytes()))
        .withPartitionKey(StringUtils.repeat("0", 19))
        .withSequenceNumber(String.valueOf(j))
        .withApproximateArrivalTimestamp(Calendar.getInstance().getTime());
    records.add(new UserRecord(record));
  }

  return records;
}
 
Example #23
Source File: KinesisTestUtil.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public static com.amazonaws.services.kinesis.model.Record getBadConsumerTestRecord(int seqNo) {
  com.amazonaws.services.kinesis.model.Record record = new com.amazonaws.services.kinesis.model.Record()
      .withData(ByteBuffer.wrap(String.format("{\"seq\": %s", seqNo).getBytes()))
      .withPartitionKey(StringUtils.repeat("0", 19))
      .withSequenceNumber(String.valueOf(seqNo))
      .withApproximateArrivalTimestamp(Calendar.getInstance().getTime());

  return new UserRecord(record);
}
 
Example #24
Source File: KinesisTestUtil.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public static com.amazonaws.services.kinesis.model.Record getConsumerTestRecord(int seqNo) {
  com.amazonaws.services.kinesis.model.Record record = new com.amazonaws.services.kinesis.model.Record()
      .withData(ByteBuffer.wrap(String.format("{\"seq\": %s}", seqNo).getBytes()))
      .withPartitionKey(StringUtils.repeat("0", 19))
      .withSequenceNumber(String.valueOf(seqNo))
      .withApproximateArrivalTimestamp(Calendar.getInstance().getTime());

  return new UserRecord(record);
}
 
Example #25
Source File: ShardConsumer.java    From flink with Apache License 2.0 5 votes vote down vote up
protected String getShardIteratorForAggregatedSequenceNumber(SequenceNumber sequenceNumber)
		throws Exception {

	String itrForLastAggregatedRecord =
			kinesis.getShardIterator(
					subscribedShard,
					ShardIteratorType.AT_SEQUENCE_NUMBER.toString(),
					sequenceNumber.getSequenceNumber());

	// get only the last aggregated record
	GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);

	List<UserRecord> fetchedRecords = deaggregateRecords(
			getRecordsResult.getRecords(),
			subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
			subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

	long lastSubSequenceNum = sequenceNumber.getSubSequenceNumber();
	for (UserRecord record : fetchedRecords) {
		// we have found a dangling sub-record if it has a larger subsequence number
		// than our last sequence number; if so, collect the record and update state
		if (record.getSubSequenceNumber() > lastSubSequenceNum) {
			deserializeRecordForCollectionAndUpdateState(record);
		}
	}

	return getRecordsResult.getNextShardIterator();
}
 
Example #26
Source File: ShardConsumer.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Deserializes a record for collection, and accordingly updates the shard state in the fetcher. The last
 * successfully collected sequence number in this shard consumer is also updated so that
 * {@link ShardConsumer#getRecords(String, int)} may be able to use the correct sequence number to refresh shard
 * iterators if necessary.
 *
 * <p>Note that the server-side Kinesis timestamp is attached to the record when collected. When the
 * user programs uses {@link TimeCharacteristic#EventTime}, this timestamp will be used by default.
 *
 * @param record record to deserialize and collect
 * @throws IOException
 */
private void deserializeRecordForCollectionAndUpdateState(UserRecord record)
	throws IOException {
	ByteBuffer recordData = record.getData();

	byte[] dataBytes = new byte[recordData.remaining()];
	recordData.get(dataBytes);

	final long approxArrivalTimestamp = record.getApproximateArrivalTimestamp().getTime();

	final T value = deserializer.deserialize(
		dataBytes,
		record.getPartitionKey(),
		record.getSequenceNumber(),
		approxArrivalTimestamp,
		subscribedShard.getStreamName(),
		subscribedShard.getShard().getShardId());

	SequenceNumber collectedSequenceNumber = (record.isAggregated())
		? new SequenceNumber(record.getSequenceNumber(), record.getSubSequenceNumber())
		: new SequenceNumber(record.getSequenceNumber());

	fetcherRef.emitRecordAndUpdateState(
		value,
		approxArrivalTimestamp,
		subscribedShardStateIndex,
		collectedSequenceNumber);

	lastSequenceNum = collectedSequenceNumber;
}
 
Example #27
Source File: ShardConsumer.java    From flink with Apache License 2.0 5 votes vote down vote up
protected String getShardIteratorForAggregatedSequenceNumber(SequenceNumber sequenceNumber)
		throws Exception {

	String itrForLastAggregatedRecord =
			kinesis.getShardIterator(
					subscribedShard,
					ShardIteratorType.AT_SEQUENCE_NUMBER.toString(),
					sequenceNumber.getSequenceNumber());

	// get only the last aggregated record
	GetRecordsResult getRecordsResult = getRecords(itrForLastAggregatedRecord, 1);

	List<UserRecord> fetchedRecords = deaggregateRecords(
			getRecordsResult.getRecords(),
			subscribedShard.getShard().getHashKeyRange().getStartingHashKey(),
			subscribedShard.getShard().getHashKeyRange().getEndingHashKey());

	long lastSubSequenceNum = sequenceNumber.getSubSequenceNumber();
	for (UserRecord record : fetchedRecords) {
		// we have found a dangling sub-record if it has a larger subsequence number
		// than our last sequence number; if so, collect the record and update state
		if (record.getSubSequenceNumber() > lastSubSequenceNum) {
			deserializeRecordForCollectionAndUpdateState(record);
		}
	}

	return getRecordsResult.getNextShardIterator();
}
 
Example #28
Source File: ShardConsumer.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Deserializes a record for collection, and accordingly updates the shard state in the fetcher. The last
 * successfully collected sequence number in this shard consumer is also updated so that
 * {@link ShardConsumer#getRecords(String, int)} may be able to use the correct sequence number to refresh shard
 * iterators if necessary.
 *
 * <p>Note that the server-side Kinesis timestamp is attached to the record when collected. When the
 * user programs uses {@link TimeCharacteristic#EventTime}, this timestamp will be used by default.
 *
 * @param record record to deserialize and collect
 * @throws IOException
 */
private void deserializeRecordForCollectionAndUpdateState(UserRecord record)
	throws IOException {
	ByteBuffer recordData = record.getData();

	byte[] dataBytes = new byte[recordData.remaining()];
	recordData.get(dataBytes);

	final long approxArrivalTimestamp = record.getApproximateArrivalTimestamp().getTime();

	final T value = deserializer.deserialize(
		dataBytes,
		record.getPartitionKey(),
		record.getSequenceNumber(),
		approxArrivalTimestamp,
		subscribedShard.getStreamName(),
		subscribedShard.getShard().getShardId());

	SequenceNumber collectedSequenceNumber = (record.isAggregated())
		? new SequenceNumber(record.getSequenceNumber(), record.getSubSequenceNumber())
		: new SequenceNumber(record.getSequenceNumber());

	fetcherRef.emitRecordAndUpdateState(
		value,
		approxArrivalTimestamp,
		subscribedShardStateIndex,
		collectedSequenceNumber);

	lastSequenceNum = collectedSequenceNumber;
}
 
Example #29
Source File: ShardConsumer.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Deserializes a record for collection, and accordingly updates the shard state in the fetcher. The last
 * successfully collected sequence number in this shard consumer is also updated so that
 * {@link ShardConsumer#getRecords(String, int)} may be able to use the correct sequence number to refresh shard
 * iterators if necessary.
 *
 * <p>Note that the server-side Kinesis timestamp is attached to the record when collected. When the
 * user programs uses {@link TimeCharacteristic#EventTime}, this timestamp will be used by default.
 *
 * @param record record to deserialize and collect
 * @throws IOException
 */
private void deserializeRecordForCollectionAndUpdateState(UserRecord record)
	throws IOException {
	ByteBuffer recordData = record.getData();

	byte[] dataBytes = new byte[recordData.remaining()];
	recordData.get(dataBytes);

	final long approxArrivalTimestamp = record.getApproximateArrivalTimestamp().getTime();

	final T value = deserializer.deserialize(
		dataBytes,
		record.getPartitionKey(),
		record.getSequenceNumber(),
		approxArrivalTimestamp,
		subscribedShard.getStreamName(),
		subscribedShard.getShard().getShardId());

	SequenceNumber collectedSequenceNumber = (record.isAggregated())
		? new SequenceNumber(record.getSequenceNumber(), record.getSubSequenceNumber())
		: new SequenceNumber(record.getSequenceNumber());

	fetcherRef.emitRecordAndUpdateState(
		value,
		approxArrivalTimestamp,
		subscribedShardStateIndex,
		collectedSequenceNumber);

	lastSequenceNum = collectedSequenceNumber;
}
 
Example #30
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 4 votes vote down vote up
@Override
public Void process(List<UserRecord> userRecords) {
	recordsProcessed += userRecords.size();

	return null;
}