Java Code Examples for com.amazonaws.services.kinesis.producer.KinesisProducer#addUserRecord()

The following examples show how to use com.amazonaws.services.kinesis.producer.KinesisProducer#addUserRecord() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AmazonKinesisSinkTask.java    From kinesis-kafka-connector with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<UserRecordResult> addUserRecord(KinesisProducer kp, String streamName, String partitionKey,
		boolean usePartitionAsHashKey, SinkRecord sinkRecord) {

	// If configured use kafka partition key as explicit hash key
	// This will be useful when sending data from same partition into
	// same shard
	if (usePartitionAsHashKey)
		return kp.addUserRecord(streamName, partitionKey, Integer.toString(sinkRecord.kafkaPartition()),
				DataUtility.parseValue(sinkRecord.valueSchema(), sinkRecord.value()));
	else
		return kp.addUserRecord(streamName, partitionKey,
				DataUtility.parseValue(sinkRecord.valueSchema(), sinkRecord.value()));

}
 
Example 2
Source File: SampleProducer.java    From real-time-analytics-spark-streaming with Apache License 2.0 4 votes vote down vote up
/** The main method.
 *  @param args  The command line args for the Sample Producer. It takes 3 optional position parameters:
 *  1. The stream name to use (default-data-stream is default)
 *  2. The region name to use (us-east-1 is default)
 *  3. The duration of the test in seconds, 5 is the default.
 */
public static void main(String[] args) throws Exception {
    final String streamName = getArgIfPresent(args, 0, STREAM_NAME);
    final String region = getArgIfPresent(args, 1, REGION);
    final String secondsToRunString = getArgIfPresent(args, 2, String.valueOf(SECONDS_TO_RUN_DEFAULT));
    final int secondsToRun = Integer.parseInt(secondsToRunString);
    if (secondsToRun <= 0) {
        LOGGER.error("Seconds to Run should be a positive integer");
        System.exit(1);
    }

    final KinesisProducer producer = getKinesisProducer(region);
    final AtomicLong sequenceNumber = new AtomicLong(0);
    final AtomicLong completed = new AtomicLong(0);

    FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() {
        @Override public void onFailure(Throwable t) {
            // If we see any failures, we will log them.
            if (t instanceof UserRecordFailedException) {
                Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts());
                LOGGER.error(String.format("Record failed to put - %s : %s", last.getErrorCode(), last.getErrorMessage()));
            }
            LOGGER.error("Exception during put", t);
        };

        @Override public void onSuccess(UserRecordResult result) {
            completed.getAndIncrement();
        };
    };

    final ExecutorService callbackThreadPool = Executors.newCachedThreadPool();

    // The lines within run() are the essence of the KPL API.
    final Runnable putOneRecord = new Runnable() {
        @Override
        public void run() {
            ByteBuffer data = generateData();
            // TIMESTAMP is our partition key
            ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, TIMESTAMP, randomExplicitHashKey(), data);
            Futures.addCallback(f, callback, callbackThreadPool);
        }
    };

    EXECUTOR.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            long put = sequenceNumber.get();
            long total = RECORDS_PER_SECOND * secondsToRun;
            double putPercent = 100.0 * put / total;
            long done = completed.get();
            double donePercent = 100.0 * done / total;
            LOGGER.info(String.format(
                "Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)",
                put, total, putPercent, done, donePercent
            ));
        }
    }, 1, 1, TimeUnit.SECONDS);

    executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, secondsToRun, RECORDS_PER_SECOND);

    EXECUTOR.awaitTermination(secondsToRun + 1, TimeUnit.SECONDS);

    LOGGER.info("Waiting for remaining puts to finish...");
    producer.flushSync();
    LOGGER.info("All records complete.");

    producer.destroy();
    LOGGER.info("Finished.");
}
 
Example 3
Source File: SampleKPLProducer.java    From kinesis-aggregation with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception
{
    if (args.length != 2)
    {
        System.err.println("Usage SampleKPLProducer <stream name> <region>");
        System.exit(1);
    }

    String streamName = args[0];
    String regionName = args[1];

    final KinesisProducer producer = getKinesisProducer(regionName);

    final AtomicLong sequenceNumber = new AtomicLong(0);
    final AtomicLong completed = new AtomicLong(0);

    final FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>()
    {
        @Override
        public void onFailure(Throwable t)
        {
            if (t instanceof UserRecordFailedException)
            {
                Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts());
                System.err.println(String.format("Record failed to put - %s : %s", last.getErrorCode(), last.getErrorMessage()));
            }
            System.err.println("Exception during put: " + t.getMessage());
            t.printStackTrace();
            System.exit(1);
        }

        @Override
        public void onSuccess(UserRecordResult result)
        {
            completed.getAndIncrement();
        }
    };

    final Runnable putOneRecord = new Runnable()
    {
        @Override
        public void run()
        {
            byte[] data = ProducerUtils.randomData(sequenceNumber.get(), ProducerConfig.RECORD_SIZE_BYTES);
            ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, ProducerUtils.randomPartitionKey(),
                    ProducerUtils.randomExplicitHashKey(), ByteBuffer.wrap(data));
            Futures.addCallback(f, callback);
        }
    };

    EXECUTOR.scheduleAtFixedRate(new Runnable()
    {
        @Override
        public void run()
        {
            long put = sequenceNumber.get();
            long total = RECORDS_PER_SECOND * SECONDS_TO_RUN;
            double putPercent = 100.0 * put / total;
            long done = completed.get();
            double donePercent = 100.0 * done / total;
            System.out.println(String.format("Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total, putPercent, done, donePercent));
        }
    }, 1, 1, TimeUnit.SECONDS);

    System.out.println(String.format("Starting puts... will run for %d seconds at %d records per second", SECONDS_TO_RUN, RECORDS_PER_SECOND));

    executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, SECONDS_TO_RUN, RECORDS_PER_SECOND);

    EXECUTOR.awaitTermination(SECONDS_TO_RUN + 1, TimeUnit.SECONDS);

    System.out.println("Waiting for remaining puts to finish...");
    producer.flushSync();
    System.out.println("All records complete.");

    producer.destroy();
    System.out.println("Finished.");
}