com.amazonaws.services.kinesis.model.Record Java Examples

The following examples show how to use com.amazonaws.services.kinesis.model.Record. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SimplifiedKinesisClientTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldReturnLimitedNumberOfRecords() throws Exception {
  final Integer limit = 100;

  doAnswer(
          (Answer<GetRecordsResult>)
              invocation -> {
                GetRecordsRequest request = (GetRecordsRequest) invocation.getArguments()[0];
                List<Record> records = generateRecords(request.getLimit());
                return new GetRecordsResult().withRecords(records).withMillisBehindLatest(1000L);
              })
      .when(kinesis)
      .getRecords(any(GetRecordsRequest.class));

  GetKinesisRecordsResult result = underTest.getRecords(SHARD_ITERATOR, STREAM, SHARD_1, limit);
  assertThat(result.getRecords().size()).isEqualTo(limit);
}
 
Example #2
Source File: KinesisRecordToTupleMapper.java    From streamline with Apache License 2.0 6 votes vote down vote up
@Override
public List<Object> getTuple(Record record) {
    CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder();
    List<Object> tuple = new ArrayList<>();
    tuple.add(record.getPartitionKey());
    tuple.add(record.getSequenceNumber());
    try {
        String data = decoder.decode(record.getData()).toString();
        tuple.add(data);
    } catch (CharacterCodingException e) {
        e.printStackTrace();
        LOG.warn("Exception occured. Emitting tuple with empty string data", e);
        tuple.add("");
    }
    return tuple;
}
 
Example #3
Source File: KinesisTestConsumer.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public String processNextIterator(String iterator)
{
  GetRecordsRequest getRequest = new GetRecordsRequest();
  getRequest.setLimit(1000);

  getRequest.setShardIterator(iterator);
  // call "get" operation and get everything in this shard range
  GetRecordsResult getResponse = client.getRecords(getRequest);

  iterator = getResponse.getNextShardIterator();

  List<Record> records = getResponse.getRecords();
  processResponseRecords(records);

  return iterator;
}
 
Example #4
Source File: FakeKinesisBehavioursFactory.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) {
	BlockingQueue<String> queue = Preconditions.checkNotNull(this.shardIteratorToQueueMap.get(shardIterator),
	"no queue for iterator %s", shardIterator);
	List<Record> records = Collections.emptyList();
	try {
		String data = queue.take();
		Record record = new Record()
			.withData(
				ByteBuffer.wrap(String.valueOf(data).getBytes(ConfigConstants.DEFAULT_CHARSET)))
			.withPartitionKey(UUID.randomUUID().toString())
			.withApproximateArrivalTimestamp(new Date(System.currentTimeMillis()))
			.withSequenceNumber(String.valueOf(0));
		records = Collections.singletonList(record);
	} catch (InterruptedException e) {
		shardIterator = null;
	}
	return new GetRecordsResult()
		.withRecords(records)
		.withMillisBehindLatest(0L)
		.withNextShardIterator(shardIterator);
}
 
Example #5
Source File: FakeKinesisBehavioursFactory.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static List<Record> createRecordBatchWithRange(int min, int max) {
	List<Record> batch = new LinkedList<>();
	long	sumRecordBatchBytes = 0L;
	// Create record of size 10Kb
	String data = createDataSize(10 * 1024L);

	for (int i = min; i < max; i++) {
		Record record = new Record()
						.withData(
								ByteBuffer.wrap(String.valueOf(data).getBytes(ConfigConstants.DEFAULT_CHARSET)))
						.withPartitionKey(UUID.randomUUID().toString())
						.withApproximateArrivalTimestamp(new Date(System.currentTimeMillis()))
						.withSequenceNumber(String.valueOf(i));
		batch.add(record);
		sumRecordBatchBytes += record.getData().remaining();

	}
	if (batch.size() != 0) {
		averageRecordSizeBytes = sumRecordBatchBytes / batch.size();
	}

	return batch;
}
 
Example #6
Source File: KinesisUtil.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public static List<com.streamsets.pipeline.api.Record> processKinesisRecord(
    String shardId,
    Record kRecord,
    DataParserFactory parserFactory
) throws DataParserException, IOException {
  final String recordId = createKinesisRecordId(shardId, kRecord);
  DataParser parser = parserFactory.getParser(recordId, kRecord.getData().array());

  List<com.streamsets.pipeline.api.Record> records = new ArrayList<>();
  com.streamsets.pipeline.api.Record r;
  while ((r = parser.parse()) != null) {
    records.add(r);
  }
  parser.close();
  return records;
}
 
Example #7
Source File: KinesisClientLibraryPipelinedRecordProcessor.java    From amazon-kinesis-connectors with Apache License 2.0 6 votes vote down vote up
/**
 * Processes the records in the queue using the wrapped {@link IRecordProcessor}.
 */
private void consumeQueue() {
    final List<Record> records = new ArrayList<Record>();
    int drained = 0;
    // Use blocking queue's poll with timeout to wait for new records
    Record polled = null;
    try {
        polled = recordQueue.poll(maxQueueWaitTimeMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        LOG.error(e);
        Thread.currentThread().interrupt();
    }
    // Check if queue contained records.
    if (polled == null) {
        processRecords(records /* Empty list */, checkpointer);
        return;
    }
    records.add(polled);
    drained++;
    // Drain the remaining of the records
    drained += recordQueue.drainTo(records);
    recordProcessor.processRecords(records, checkpointer /* Protected checkpointer */);
    LOG.info("Consumed " + drained + " records");
}
 
Example #8
Source File: MockKinesisClient.java    From presto with Apache License 2.0 6 votes vote down vote up
public PutRecordResult putRecord(ByteBuffer data, String partitionKey)
{
    // Create record and insert into the shards.  Initially just do it
    // on a round robin basis.
    long timestamp = System.currentTimeMillis() - 50000;
    Record record = new Record();
    record = record.withData(data).withPartitionKey(partitionKey).withSequenceNumber(String.valueOf(sequenceNo));
    record.setApproximateArrivalTimestamp(new Date(timestamp));

    if (nextShard == shards.size()) {
        nextShard = 0;
    }
    InternalShard shard = shards.get(nextShard);
    shard.addRecord(record);

    PutRecordResult result = new PutRecordResult();
    result.setSequenceNumber(String.valueOf(sequenceNo));
    result.setShardId(shard.getShardId());

    nextShard++;
    sequenceNo++;

    return result;
}
 
Example #9
Source File: MockKinesisClient.java    From presto-kinesis with Apache License 2.0 6 votes vote down vote up
public PutRecordResult putRecord(ByteBuffer data, String partitionKey)
{
    // Create record and insert into the shards.  Initially just do it
    // on a round robin basis.
    long ts = System.currentTimeMillis() - 50000;
    Record rec = new Record();
    rec = rec.withData(data).withPartitionKey(partitionKey).withSequenceNumber(String.valueOf(sequenceNo));
    rec.setApproximateArrivalTimestamp(new Date(ts));

    if (nextShard == shards.size()) {
        nextShard = 0;
    }
    InternalShard shard = shards.get(nextShard);
    shard.addRecord(rec);

    PutRecordResult result = new PutRecordResult();
    result.setSequenceNumber(String.valueOf(sequenceNo));
    result.setShardId(shard.getShardId());

    nextShard++;
    sequenceNo++;

    return result;
}
 
Example #10
Source File: KinesisTestConsumer.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
protected void processResponseRecords( List<Record> records )
{
  if ( records == null || records.isEmpty() ) {
    return;
  }
  receiveCount += records.size();
  logger.debug("ReceiveCount= {}", receiveCount);

  for ( Record record : records ) {
    holdingBuffer.add(record);
    if ( shouldProcessRecord ) {
      processRecord( record );
    }

    if ( doneLatch != null ) {
      doneLatch.countDown();
    }
  }
}
 
Example #11
Source File: FakeKinesisBehavioursFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) {
	BlockingQueue<String> queue = Preconditions.checkNotNull(this.shardIteratorToQueueMap.get(shardIterator),
	"no queue for iterator %s", shardIterator);
	List<Record> records = Collections.emptyList();
	try {
		String data = queue.take();
		Record record = new Record()
			.withData(
				ByteBuffer.wrap(data.getBytes(ConfigConstants.DEFAULT_CHARSET)))
			.withPartitionKey(UUID.randomUUID().toString())
			.withApproximateArrivalTimestamp(new Date(System.currentTimeMillis()))
			.withSequenceNumber(String.valueOf(0));
		records = Collections.singletonList(record);
	} catch (InterruptedException e) {
		shardIterator = null;
	}
	return new GetRecordsResult()
		.withRecords(records)
		.withMillisBehindLatest(0L)
		.withNextShardIterator(shardIterator);
}
 
Example #12
Source File: KinesisVideoRekognitionLambdaExample.java    From amazon-kinesis-video-streams-parser-library with Apache License 2.0 6 votes vote down vote up
/**
 * Handle request for each lambda event.
 *
 * @param kinesisEvent Each kinesis event which describes the Rekognition output.
 * @param context      Lambda context
 * @return context
 */
@Override
public Context handleRequest(final KinesisEvent kinesisEvent, final Context context) {
    try {
        initialize(System.getProperty("KVSStreamName"), Regions.fromName(System.getenv("AWS_REGION")));
        loadProducerJNI(context);

        final List<Record> records = kinesisEvent.getRecords()
                .stream()
                .map(KinesisEvent.KinesisEventRecord::getKinesis)
                .collect(Collectors.toList());
        processRecordsWithRetries(records);
        processRekognizedOutputs();

    } catch (final Exception e) {
        log.error("Unable to process lambda request !. Exiting... ", e);
    }
    return context;
}
 
Example #13
Source File: KinesisVideoRekognitionLambdaExample.java    From amazon-kinesis-video-streams-parser-library with Apache License 2.0 6 votes vote down vote up
/**
 * Process records performing retries as needed. Skip "poison pill" records.
 *
 * @param records Data records to be processed.
 */
private void processRecordsWithRetries(final List<Record> records) {
    for (final Record record : records) {
        boolean processedSuccessfully = false;
        for (int i = 0; i < NUM_RETRIES; i++) {
            try {
                log.info("Processing single record...");
                processSingleRecord(record);
                processedSuccessfully = true;
                break;
            } catch (final Throwable t) {
                log.error("Caught throwable while processing record {}", record, t);
            }
        }
        if (!processedSuccessfully) {
            log.warn("Couldn't processRekognizedOutputs record {}. Skipping the record.", record);
        }
    }
    log.info("Processed all {} KDS records.", records.size());
}
 
Example #14
Source File: KinesisWrapper.java    From bender with Apache License 2.0 6 votes vote down vote up
private KinesisWrapper(final InternalEvent internal) {
  KinesisEventRecord eventRecord = ((KinesisInternalEvent) internal).getRecord();
  Record record = eventRecord.getKinesis();

  this.partitionKey = record.getPartitionKey();
  this.sequenceNumber = record.getSequenceNumber();
  this.eventSource = eventRecord.getEventSource();
  this.sourceArn = eventRecord.getEventSourceARN();
  this.functionName = internal.getCtx().getContext().getFunctionName();
  this.functionVersion = internal.getCtx().getContext().getFunctionVersion();
  this.processingTime = System.currentTimeMillis();
  this.arrivalTime = record.getApproximateArrivalTimestamp().getTime();
  this.timestamp = internal.getEventTime();
  this.processingDelay = processingTime - timestamp;

  if (internal.getEventObj() != null) {
    this.payload = internal.getEventObj().getPayload();
  } else {
    this.payload = null;
  }
}
 
Example #15
Source File: KinesisSpanProcessorTest.java    From zipkin-aws with Apache License 2.0 6 votes vote down vote up
@Test
public void collectorFailsWhenRecordEncodedAsSingleSpan() {
  Span span = TestObjects.LOTS_OF_SPANS[0];
  byte[] encodedSpan = SpanBytesEncoder.THRIFT.encode(span);
  Record kinesisRecord = new Record().withData(ByteBuffer.wrap(encodedSpan));
  ProcessRecordsInput kinesisInput =
      new ProcessRecordsInput().withRecords(Collections.singletonList(kinesisRecord));

  kinesisSpanProcessor.processRecords(kinesisInput);

  assertThat(storage.spanStore().getTraces().size()).isEqualTo(0);

  assertThat(metrics.messages()).isEqualTo(1);
  assertThat(metrics.messagesDropped()).isEqualTo(1);
  assertThat(metrics.bytes()).isEqualTo(encodedSpan.length);
}
 
Example #16
Source File: TestKinesisRecordProcessor.java    From samza with Apache License 2.0 6 votes vote down vote up
static Map<KinesisRecordProcessor, List<Record>> generateRecords(int numRecordsPerShard,
    List<KinesisRecordProcessor> processors) {
  Map<KinesisRecordProcessor, List<Record>> processorRecordMap = new HashMap<>();
  processors.forEach(processor -> {
    try {
      // Create records and call process records
      IRecordProcessorCheckpointer checkpointer = Mockito.mock(IRecordProcessorCheckpointer.class);
      doNothing().when(checkpointer).checkpoint(anyString());
      doNothing().when(checkpointer).checkpoint();
      ProcessRecordsInput processRecordsInput = Mockito.mock(ProcessRecordsInput.class);
      when(processRecordsInput.getCheckpointer()).thenReturn(checkpointer);
      when(processRecordsInput.getMillisBehindLatest()).thenReturn(1000L);
      List<Record> inputRecords = createRecords(numRecordsPerShard);
      processorRecordMap.put(processor, inputRecords);
      when(processRecordsInput.getRecords()).thenReturn(inputRecords);
      processor.processRecords(processRecordsInput);
    } catch (ShutdownException | InvalidStateException ex) {
      throw new RuntimeException(ex);
    }
  });
  return processorRecordMap;
}
 
Example #17
Source File: TestKinesisSystemConsumer.java    From samza with Apache License 2.0 6 votes vote down vote up
private void verifyRecords(List<IncomingMessageEnvelope> outputRecords, List<Record> inputRecords, String shardId) {
  Iterator outputRecordsIter = outputRecords.iterator();
  inputRecords.forEach(record -> {
    IncomingMessageEnvelope envelope = (IncomingMessageEnvelope) outputRecordsIter.next();
    String outputKey = (String) envelope.getKey();
    KinesisIncomingMessageEnvelope kinesisMessageEnvelope = (KinesisIncomingMessageEnvelope) envelope;
    Assert.assertEquals(outputKey, record.getPartitionKey());
    Assert.assertEquals(kinesisMessageEnvelope.getSequenceNumber(), record.getSequenceNumber());
    Assert.assertEquals(kinesisMessageEnvelope.getApproximateArrivalTimestamp(),
        record.getApproximateArrivalTimestamp());
    Assert.assertEquals(kinesisMessageEnvelope.getShardId(), shardId);
    ByteBuffer outputData = ByteBuffer.wrap((byte[]) kinesisMessageEnvelope.getMessage());
    record.getData().rewind();
    Assert.assertEquals(outputData, record.getData());
    verifyOffset(envelope.getOffset(), record, shardId);
  });
}
 
Example #18
Source File: RecordDeaggregator.java    From kinesis-aggregation with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private List<Record> convertType(List<T> inputRecords) {
	List<Record> records = null;

	if (inputRecords.size() > 0 && inputRecords.get(0) instanceof KinesisEventRecord) {
		records = convertToKinesis((List<KinesisEventRecord>) inputRecords);
	} else if (inputRecords.size() > 0 && inputRecords.get(0) instanceof Record) {
		records = (List<Record>) inputRecords;
	} else {
		if (inputRecords.size() == 0) {
			return new ArrayList<Record>();
		} else {
			throw new InvalidArgumentException("Input Types must be Kinesis Event or Model Records");
		}
	}

	return records;
}
 
Example #19
Source File: StreamsRecordProcessor.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Override
public void processRecords(ProcessRecordsInput processRecordsInput) {

    log.debug("Processing {} records from {}", processRecordsInput.getRecords().size(), kinesisShardId);

    for (Record record : processRecordsInput.getRecords()) {
        try {
            queue.put(new StreamsRecord(record));
        } catch (InterruptedException e) {
            log.warn("unable to create KinesisRecord ", e);
        }
    }

    // Checkpoint once every checkpoint interval.
    if (System.nanoTime() > nextCheckpointTimeInNanos) {
        checkpoint(processRecordsInput.getCheckpointer());
        nextCheckpointTimeInNanos = System.nanoTime() + checkpointInterval;
    }
}
 
Example #20
Source File: AbstractKinesisInputOperator.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
/**
 * Implement InputOperator Interface.
 */
@Override
public void emitTuples()
{
  if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
    return;
  }
  int count = consumer.getQueueSize();
  if (maxTuplesPerWindow > 0) {
    count = Math.min(count, maxTuplesPerWindow - emitCount);
  }
  for (int i = 0; i < count; i++) {
    Pair<String, Record> data = consumer.pollRecord();
    String shardId = data.getFirst();
    String recordId = data.getSecond().getSequenceNumber();
    emitTuple(data);
    MutablePair<String, Integer> shardOffsetAndCount = currentWindowRecoveryState.get(shardId);
    if (shardOffsetAndCount == null) {
      currentWindowRecoveryState.put(shardId, new MutablePair<String, Integer>(recordId, 1));
    } else {
      shardOffsetAndCount.setRight(shardOffsetAndCount.right + 1);
    }
    shardPosition.put(shardId, recordId);
  }
  emitCount += count;
}
 
Example #21
Source File: TestLambdaDeaggregation.java    From kinesis-aggregation with Apache License 2.0 6 votes vote down vote up
@Test
public void testAggregatedRecord() {
	// create a new KinesisEvent.Record from the aggregated data
	KinesisEvent.Record r = new KinesisEvent.Record();
	r.setPartitionKey(aggregated.getPartitionKey());
	r.setApproximateArrivalTimestamp(new Date(System.currentTimeMillis()));
	r.setData(ByteBuffer.wrap(aggregated.toRecordBytes()));
	r.setKinesisSchemaVersion("1.0");
	KinesisEventRecord ker = new KinesisEventRecord();
	ker.setKinesis(r);

	// deaggregate the record
	List<UserRecord> userRecords = deaggregator.deaggregate(Arrays.asList(ker));

	assertEquals("Deaggregated Count Matches", aggregated.getNumUserRecords(), userRecords.size());
	verifyOneToOneMapping(userRecords);
}
 
Example #22
Source File: StreamsRecordProcessor.java    From aws-big-data-blog with Apache License 2.0 6 votes vote down vote up
private void processRecordsWithRetries(List<Record> records) throws Exception {
	for (Record record : records) {
		int tryCount = 0;
		boolean processedOk = false;
		while (tryCount < NUM_RETRIES) {
			try {
				try {
					processSingleRecord(record);
					processedOk = true;
				} catch (Throwable t) {
					System.out.println("Caught throwable " + t + " while processing record " + record);
					// exponential backoff
					Thread.sleep(new Double(Math.pow(2, tryCount) * BACKOFF_TIME_IN_MILLIS).longValue());
				}
			} catch (InterruptedException e) {
				throw e;
			}
		}

		if (!processedOk) {
			throw new Exception("Unable to process record " + record.getPartitionKey() + " after " + NUM_RETRIES);
		}
	}
}
 
Example #23
Source File: KinesisRecordProcessor.java    From aws-big-data-blog with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) {
    LOG.info("Processing " + records.size() + " records from " + kinesisShardId);
    
    // Process records and perform all exception handling.
    processRecordsWithRetries(records);
    
    // Checkpoint once every checkpoint interval.
    if (System.currentTimeMillis() > nextCheckpointTimeInMillis) {
        checkpoint(checkpointer);
        nextCheckpointTimeInMillis = System.currentTimeMillis() + CHECKPOINT_INTERVAL_MILLIS;
    }
    
}
 
Example #24
Source File: StreamsRecordProcessor.java    From aws-dynamodb-examples with Apache License 2.0 5 votes vote down vote up
@Override
public void processRecords(List<Record> records,
        IRecordProcessorCheckpointer checkpointer) {
    for(Record record : records) {
        String data = new String(record.getData().array(), Charset.forName("UTF-8"));
        System.out.println(data);
        if(record instanceof RecordAdapter) {
            com.amazonaws.services.dynamodbv2.model.Record streamRecord = ((RecordAdapter) record).getInternalObject();
            
            switch(streamRecord.getEventName()) { 
            case "INSERT" : case "MODIFY" :
                StreamsAdapterDemoHelper.putItem(dynamoDBClient, tableName, streamRecord.getDynamodb().getNewImage());
                break;
            case "REMOVE" :
                StreamsAdapterDemoHelper.deleteItem(dynamoDBClient, tableName, streamRecord.getDynamodb().getKeys().get("Id").getN());
            }
        }
        checkpointCounter += 1;
        if(checkpointCounter % 10 == 0) {
            try {
                checkpointer.checkpoint();
            } catch(Exception e) {
                e.printStackTrace();
            }
        }
    }

}
 
Example #25
Source File: MockKinesisClient.java    From presto-kinesis with Apache License 2.0 5 votes vote down vote up
protected ShardIterator getNextShardIterator(ShardIterator previousIter, ArrayList<Record> records)
{
    ShardIterator newIter = null;
    if (records.size() == 0) {
        newIter = previousIter;
    }
    else {
        Record rec = records.get(records.size() - 1);
        int lastSeq = Integer.valueOf(rec.getSequenceNumber());
        newIter = new ShardIterator(previousIter.streamId, previousIter.shardIndex, lastSeq + 1);
    }

    return newIter;
}
 
Example #26
Source File: SimplifiedKinesisClientTest.java    From beam with Apache License 2.0 5 votes vote down vote up
private List<Record> generateRecords(int num) {
  List<Record> records = new ArrayList<>();
  for (int i = 0; i < num; i++) {
    byte[] value = new byte[1024];
    Arrays.fill(value, (byte) i);
    records.add(
        new Record()
            .withSequenceNumber(String.valueOf(i))
            .withPartitionKey("key")
            .withData(ByteBuffer.wrap(value)));
  }
  return records;
}
 
Example #27
Source File: KinesisTestConsumer.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public String getData(Record rc)
{
  ByteBuffer buffer = rc.getData();
  byte[] bytes = new byte[buffer.remaining()];
  buffer.get(bytes);
  return new String(bytes);
}
 
Example #28
Source File: KinesisSystemConsumer.java    From samza with Apache License 2.0 5 votes vote down vote up
private IncomingMessageEnvelope translate(SystemStreamPartition ssp, Record record) {
  String shardId = processors.get(ssp).getShardId();
  byte[] payload = new byte[record.getData().remaining()];

  metrics.updateMetrics(ssp.getStream(), record);
  record.getData().get(payload);
  KinesisSystemConsumerOffset offset = new KinesisSystemConsumerOffset(shardId, record.getSequenceNumber());
  return new KinesisIncomingMessageEnvelope(ssp, offset.toString(), record.getPartitionKey(),
      payload, shardId, record.getSequenceNumber(), record.getApproximateArrivalTimestamp());
}
 
Example #29
Source File: KinesisRecordProcessor.java    From samza with Apache License 2.0 5 votes vote down vote up
/**
 * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the
 * application. Upon fail over, the new instance will get records with sequence number greater than the checkpoint
 * position for each partition key.
 *
 * @param processRecordsInput Provides the records to be processed as well as information and capabilities related
 *        to them (eg checkpointing).
 */
@Override
public void processRecords(ProcessRecordsInput processRecordsInput) {
  // KCL does not send any records to the processor that was shutdown.
  Validate.isTrue(!shutdownRequested,
      String.format("KCL returned records after shutdown is called on the processor %s.", this));
  // KCL aways gives reference to the same checkpointer instance for a given processor instance.
  checkpointer = processRecordsInput.getCheckpointer();
  List<Record> records = processRecordsInput.getRecords();
  // Empty records are expected when KCL config has CallProcessRecordsEvenForEmptyRecordList set to true.
  if (!records.isEmpty()) {
    lastProcessedRecordSeqNumber = new ExtendedSequenceNumber(records.get(records.size() - 1).getSequenceNumber());
    listener.onReceiveRecords(ssp, records, processRecordsInput.getMillisBehindLatest());
  }
}
 
Example #30
Source File: AmazonKinesisMock.java    From beam with Apache License 2.0 5 votes vote down vote up
@Override
public GetRecordsResult getRecords(GetRecordsRequest getRecordsRequest) {
  List<String> shardIteratorParts =
      Splitter.on(':').splitToList(getRecordsRequest.getShardIterator());
  int shardId = parseInt(shardIteratorParts.get(0));
  int startingRecord = parseInt(shardIteratorParts.get(1));
  List<Record> shardData = shardedData.get(shardId);

  int toIndex = min(startingRecord + numberOfRecordsPerGet, shardData.size());
  int fromIndex = min(startingRecord, toIndex);
  return new GetRecordsResult()
      .withRecords(shardData.subList(fromIndex, toIndex))
      .withNextShardIterator(String.format("%s:%s", shardId, toIndex))
      .withMillisBehindLatest(0L);
}