com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker Java Examples

The following examples show how to use com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KinesisCollector.java    From zipkin-aws with Apache License 2.0 6 votes vote down vote up
@Override
public KinesisCollector start() {
  String workerId;
  try {
    workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID();
  } catch (UnknownHostException e) {
    workerId = UUID.randomUUID().toString();
  }

  KinesisClientLibConfiguration config =
      new KinesisClientLibConfiguration(appName, streamName, credentialsProvider, workerId);
  config.withRegionName(regionName);

  processor = new KinesisRecordProcessorFactory(collector, metrics);
  worker = new Worker.Builder().recordProcessorFactory(processor).config(config).build();

  executor.execute(worker);
  return this;
}
 
Example #2
Source File: CommandLineInterface.java    From dynamodb-cross-region-library with Apache License 2.0 6 votes vote down vote up
static Optional<Worker> mainUnsafe(String[] args) {
    // Initialize command line arguments and JCommander parser
    CommandLineArgs params = new CommandLineArgs();
    JCommander cmd = new JCommander(params);

    // parse given arguments
    cmd.parse(args);

    // show usage information if help flag exists
    if (params.isHelp()) {
        cmd.usage();
        return Optional.absent();
    }

    final CommandLineInterface cli = new CommandLineInterface(params);
    // create worker
    return Optional.of(cli.createWorker());
}
 
Example #3
Source File: DynamoDBTableReplicator.java    From podyn with Apache License 2.0 5 votes vote down vote up
public void startReplicatingChanges() throws StreamNotEnabledException {
	if (tableSchema == null) {
		throw new TableExistsException("table %s does not exist in destination", dynamoTableName);
	}

	String tableStreamArn = getStreamArn();

	if (tableStreamArn == null) {
		throw new StreamNotEnabledException("table %s does not have a stream enabled\n", dynamoTableName);
	}

	AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsClient);
	AmazonCloudWatch cloudWatchClient = AmazonCloudWatchClientBuilder.standard().build();

	String workerId = generateWorkerId();

	final KinesisClientLibConfiguration workerConfig = new KinesisClientLibConfiguration(
			APPLICATION_NAME, tableStreamArn, awsCredentialsProvider, workerId).
			withMaxRecords(1000).
			withIdleTimeBetweenReadsInMillis(500).
			withCallProcessRecordsEvenForEmptyRecordList(false).
			withCleanupLeasesUponShardCompletion(false).
			withFailoverTimeMillis(20000).
			withTableName(LEASE_TABLE_PREFIX + dynamoTableName).
			withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

	Worker worker = new Worker.Builder().
			recordProcessorFactory(recordProcessorFactory).
			config(workerConfig).
			kinesisClient(adapterClient).
			cloudWatchClient(cloudWatchClient).
			dynamoDBClient(dynamoDBClient).
			execService(executor).
			build();

	executor.execute(worker);
}
 
Example #4
Source File: KinesisDataStreamsWorker.java    From amazon-kinesis-video-streams-parser-library with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    try {
        String workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID();
        KinesisClientLibConfiguration kinesisClientLibConfiguration =
                new KinesisClientLibConfiguration(APPLICATION_NAME,
                        kdsStreamName,
                        credentialsProvider,
                        workerId);
        kinesisClientLibConfiguration.withInitialPositionInStream(SAMPLE_APPLICATION_INITIAL_POSITION_IN_STREAM)
                .withRegionName(region.getName());

        final IRecordProcessorFactory recordProcessorFactory =
                () -> new KinesisRecordProcessor(rekognizedFragmentsIndex, credentialsProvider);
        final Worker worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);

        System.out.printf("Running %s to process stream %s as worker %s...",
                APPLICATION_NAME,
                kdsStreamName,
                workerId);

        int exitCode = 0;
        try {
            worker.run();
        } catch (Throwable t) {
            System.err.println("Caught throwable while processing data.");
            t.printStackTrace();
            exitCode = 1;
        }
        System.out.println("Exit code : " + exitCode);
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example #5
Source File: KinesisSystemConsumer.java    From samza with Apache License 2.0 5 votes vote down vote up
@Override
public void start() {
  LOG.info("Start samza consumer for system {}.", system);

  metrics.initializeMetrics(streams);

  ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
      .setNameFormat("kinesis-worker-thread-" + system + "-%d")
      .build();
  // launch kinesis workers in separate threads, one per stream
  executorService = Executors.newFixedThreadPool(streams.size(), namedThreadFactory);

  for (String stream : streams) {
    // KCL Dynamodb table is used for storing the state of processing. By default, the table name is the same as the
    // application name. Dynamodb table name must be unique for a given account and region (even across different
    // streams). So, let's create the default one with the combination of job name, job id and stream name. The table
    // name could be changed by providing a different TableName via KCL specific config.
    String kinesisApplicationName =
        kConfig.get(JobConfig.JOB_NAME) + "-" + kConfig.get(JobConfig.JOB_ID) + "-" + stream;

    Worker worker = new Worker.Builder()
        .recordProcessorFactory(createRecordProcessorFactory(stream))
        .config(kConfig.getKinesisClientLibConfig(system, stream, kinesisApplicationName))
        .build();

    workers.add(worker);

    // launch kinesis workers in separate thread-pools, one per stream
    executorService.execute(worker);
    LOG.info("Started worker for system {} stream {}.", system, stream);
  }
}
 
Example #6
Source File: KinesisSystemConsumer.java    From samza with Apache License 2.0 5 votes vote down vote up
@Override
public void stop() {
  LOG.info("Stop samza consumer for system {}.", system);
  workers.forEach(Worker::shutdown);
  workers.clear();
  executorService.shutdownNow();
  LOG.info("Kinesis system consumer executor service for system {} is shutdown.", system);
}
 
Example #7
Source File: KinesisEventConsumer.java    From koupler with MIT License 5 votes vote down vote up
public KinesisEventConsumer(String propertiesFile, String streamName, String appName, String initialPosition) {
    KinesisProducerConfiguration config = KinesisProducerConfiguration.fromPropertiesFile(propertiesFile);

    InitialPositionInStream position = InitialPositionInStream.valueOf(initialPosition);
    
    KinesisClientLibConfiguration clientConfig = new KinesisClientLibConfiguration(appName, streamName,
            new DefaultAWSCredentialsProviderChain(), appName)
                    .withRegionName(config.getRegion())
                    .withInitialPositionInStream(position);
    
    this.builder = new Worker.Builder().recordProcessorFactory(this).config(clientConfig);
}
 
Example #8
Source File: KinesisSource.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private Worker createKinesisWorker(IRecordProcessorFactory recordProcessorFactory, int maxBatchSize) {
  KinesisClientLibConfiguration kclConfig =
      new KinesisClientLibConfiguration(
          conf.applicationName,
          conf.streamName,
          credentials,
          getWorkerId()
      );

  kclConfig
      .withMaxRecords(maxBatchSize)
      .withCallProcessRecordsEvenForEmptyRecordList(false)
      .withIdleTimeBetweenReadsInMillis(conf.idleTimeBetweenReads)
      .withKinesisClientConfig(clientConfiguration);

  if (conf.initialPositionInStream == InitialPositionInStream.AT_TIMESTAMP) {
    kclConfig.withTimestampAtInitialPositionInStream(new Date(conf.initialTimestamp));
  } else if (conf.initialPositionInStream == InitialPositionInStream.LATEST || conf.initialPositionInStream == InitialPositionInStream.TRIM_HORIZON) {
    kclConfig.withInitialPositionInStream(conf.initialPositionInStream);
  }

  if (conf.region == AwsRegion.OTHER) {
    kclConfig.withKinesisEndpoint(conf.endpoint);
  } else {
    kclConfig.withRegionName(conf.region.getId());
  }

  return new Worker.Builder()
      .recordProcessorFactory(recordProcessorFactory)
      .metricsFactory(metricsFactory)
      .dynamoDBClient(dynamoDBClient)
      .cloudWatchClient(cloudWatchClient)
      .execService(executor)
      .config(kclConfig)
      .build();
}
 
Example #9
Source File: ManagedConsumer.java    From aws-big-data-blog with Apache License 2.0 5 votes vote down vote up
public int run() throws Exception {
    configure();

    System.out.println(String.format("Starting %s", appName));
    LOG.info(String.format("Running %s to process stream %s", appName, streamName));

    IRecordProcessorFactory recordProcessorFactory = new ManagedClientProcessorFactory(
            this.templateProcessor);
    Worker worker = new Worker(recordProcessorFactory, this.config);

    int exitCode = 0;
    int failures = 0;

    // run the worker, tolerating as many failures as is configured
    while (failures < failuresToTolerate || failuresToTolerate == -1) {
        try {
            worker.run();
        } catch (Throwable t) {
            LOG.error("Caught throwable while processing data.", t);

            failures++;

            if (failures < failuresToTolerate) {
                LOG.error("Restarting...");
            }
            exitCode = 1;
        }
    }

    return exitCode;
}
 
Example #10
Source File: KinesisApplication.java    From aws-big-data-blog with Apache License 2.0 5 votes vote down vote up
/**
 * @param args Property file with config overrides (e.g. application name, stream name)
 * @throws IOException Thrown if we can't read properties from the specified properties file
 */
public static void main(String[] args) throws IOException {
    String propertiesFile = null;

    if (args.length > 1) {
        System.err.println("Usage: java " + KinesisApplication.class.getName() + " <propertiesFile>");
        System.exit(1);
    } else if (args.length == 1) {
        propertiesFile = args[0];
    }

    configure(propertiesFile);

    System.out.println("Starting " + applicationName);
    LOG.info("Running " + applicationName + " to process stream " + streamName);


    IRecordProcessorFactory recordProcessorFactory = new KinesisRecordProcessorFactory(redisEndpoint, redisPort);
    Worker worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration);
    


    int exitCode = 0;
    try {
        worker.run();
    } catch (Throwable t) {
        LOG.error("Caught throwable while processing data.", t);
        exitCode = 1;
    }
    System.exit(exitCode);
}
 
Example #11
Source File: DynamoStreamsManager.java    From dynamo-cassandra-proxy with Apache License 2.0 4 votes vote down vote up
public void configure(DCProxyConfiguration config) {

        //TODO make table name dynamic
        String tableName = "test";

        this.dynamodbEndpoint = config.getAwsDynamodbEndpoint();
        this.streamsEndpoint = config.getStreamsEndpoint();
        this.signinRegion = config.getDynamoRegion();
        this.accessKey = config.getDynamoAccessKey();
        this.secretKey = config.getDynamoSecretKey();

        Properties props = System.getProperties();
        props.setProperty("aws.accessKeyId", accessKey);
        props.setProperty("aws.secretKey", secretKey);

        AwsClientBuilder.EndpointConfiguration endpointConfiguration =
                new AwsClientBuilder.EndpointConfiguration(streamsEndpoint, signinRegion);
        SystemPropertiesCredentialsProvider spcp = new SystemPropertiesCredentialsProvider();

        realDDB = AmazonDynamoDBClientBuilder.standard().
                withRegion(Regions.US_EAST_2).
                //withEndpointConfiguration(endpointConfiguration).
                withCredentials(spcp).build();

        DescribeTableResult tableResult = realDDB.describeTable(tableName);
        streamArn = tableResult.getTable().getLatestStreamArn();
        //streamSpec = tableResult.getTable().getStreamSpecification();
        streamsClient = AmazonDynamoDBStreamsClientBuilder.standard().withEndpointConfiguration(endpointConfiguration).build();

        adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsClient);

        recordProcessorFactory = new StreamsRecordProcessorFactory(ddbProxy, tableName);

        workerConfig = new KinesisClientLibConfiguration("test-app",
                streamArn,
                spcp,
                "streams-worker")
                .withMaxRecords(1000)
                .withIdleTimeBetweenReadsInMillis(500)
                .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);
        AmazonCloudWatch cloudWatchClient;
        cloudWatchClient = AmazonCloudWatchClientBuilder.standard()
        .withRegion(signinRegion)
        .build();

        System.out.println("Creating worker for stream: " + streamArn);

        /*
        DescribeStreamRequest request = new DescribeStreamRequest();
        DescribeStreamRequestAdapter describeStreamResult = new DescribeStreamRequestAdapter(request);
        String id = describeStreamResult.getExclusiveStartShardId();
        String id2 = describeStreamResult.withStreamArn(streamArn).getExclusiveStartShardId();
        */

        Worker worker = StreamsWorkerFactory.createDynamoDbStreamsWorker(
                recordProcessorFactory,
                workerConfig,
                adapterClient,
                realDDB,
                cloudWatchClient
        );

        System.out.println("Starting worker...");
        Thread t = new Thread(worker);
        t.start();
    }
 
Example #12
Source File: CommandLineInterface.java    From dynamodb-cross-region-library with Apache License 2.0 4 votes vote down vote up
public Worker createWorker() {

        // use default credential provider chain to locate appropriate credentials
        final AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();

        // initialize DynamoDB client and set the endpoint properly for source table / region
        final AmazonDynamoDB dynamodbClient = AmazonDynamoDBClientBuilder.standard()
                .withCredentials(credentialsProvider)
                .withEndpointConfiguration(createEndpointConfiguration(sourceRegion, sourceDynamodbEndpoint, AmazonDynamoDB.ENDPOINT_PREFIX))
                .build();

        // initialize Streams client
        final AwsClientBuilder.EndpointConfiguration streamsEndpointConfiguration = createEndpointConfiguration(sourceRegion,
                sourceDynamodbStreamsEndpoint, AmazonDynamoDBStreams.ENDPOINT_PREFIX);
        final ClientConfiguration streamsClientConfig = new ClientConfiguration().withGzip(false);
        final AmazonDynamoDBStreams streamsClient = AmazonDynamoDBStreamsClientBuilder.standard()
                .withCredentials(credentialsProvider)
                .withEndpointConfiguration(streamsEndpointConfiguration)
                .withClientConfiguration(streamsClientConfig)
                .build();

        // obtain the Stream ID associated with the source table
        final String streamArn = dynamodbClient.describeTable(sourceTable).getTable().getLatestStreamArn();
        final boolean streamEnabled = DynamoDBConnectorUtilities.isStreamsEnabled(streamsClient, streamArn, DynamoDBConnectorConstants.NEW_AND_OLD);
        Preconditions.checkArgument(streamArn != null, DynamoDBConnectorConstants.MSG_NO_STREAMS_FOUND);
        Preconditions.checkState(streamEnabled, DynamoDBConnectorConstants.STREAM_NOT_READY);

        // initialize DynamoDB client for KCL
        final AmazonDynamoDB kclDynamoDBClient = AmazonDynamoDBClientBuilder.standard()
                .withCredentials(credentialsProvider)
                .withEndpointConfiguration(createKclDynamoDbEndpointConfiguration())
                .build();

        // initialize DynamoDB Streams Adapter client and set the Streams endpoint properly
        final AmazonDynamoDBStreamsAdapterClient streamsAdapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsClient);

        // initialize CloudWatch client and set the region to emit metrics to
        final AmazonCloudWatch kclCloudWatchClient;
        if (isPublishCloudWatch) {
            kclCloudWatchClient = AmazonCloudWatchClientBuilder.standard()
                    .withCredentials(credentialsProvider)
                    .withRegion(kclRegion.or(sourceRegion).getName()).build();
        } else {
            kclCloudWatchClient = new NoopCloudWatch();
        }

        // try to get taskname from command line arguments, auto generate one if needed
        final AwsClientBuilder.EndpointConfiguration destinationEndpointConfiguration = createEndpointConfiguration(destinationRegion,
                destinationDynamodbEndpoint, AmazonDynamoDB.ENDPOINT_PREFIX);
        final String actualTaskName = DynamoDBConnectorUtilities.getTaskName(sourceRegion, destinationRegion, taskName, sourceTable, destinationTable);

        // set the appropriate Connector properties for the destination KCL configuration
        final Properties properties = new Properties();
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_APP_NAME, actualTaskName);
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_DYNAMODB_ENDPOINT, destinationEndpointConfiguration.getServiceEndpoint());
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_DYNAMODB_DATA_TABLE_NAME, destinationTable);
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_REGION_NAME, destinationRegion.getName());

        // create the record processor factory based on given pipeline and connector configurations
        // use the master to replicas pipeline
        final KinesisConnectorRecordProcessorFactory<Record, Record> factory = new KinesisConnectorRecordProcessorFactory<>(
                new DynamoDBMasterToReplicasPipeline(), new DynamoDBStreamsConnectorConfiguration(properties, credentialsProvider));

        // create the KCL configuration with default values
        final KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(actualTaskName,
                streamArn,
                credentialsProvider,
                DynamoDBConnectorConstants.WORKER_LABEL + actualTaskName + UUID.randomUUID().toString())
                // worker will use checkpoint table if available, otherwise it is safer
                // to start at beginning of the stream
                .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON)
                // we want the maximum batch size to avoid network transfer latency overhead
                .withMaxRecords(getRecordsLimit.or(DynamoDBConnectorConstants.STREAMS_RECORDS_LIMIT))
                // wait a reasonable amount of time - default 0.5 seconds
                .withIdleTimeBetweenReadsInMillis(DynamoDBConnectorConstants.IDLE_TIME_BETWEEN_READS)
                // Remove calls to GetShardIterator
                .withValidateSequenceNumberBeforeCheckpointing(false)
                // make parent shard poll interval tunable to decrease time to run integration test
                .withParentShardPollIntervalMillis(parentShardPollIntervalMillis.or(DynamoDBConnectorConstants.DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS))
                // avoid losing leases too often - default 60 seconds
                .withFailoverTimeMillis(DynamoDBConnectorConstants.KCL_FAILOVER_TIME);

        // create the KCL worker for this connector
        return new Worker(factory, kclConfig, streamsAdapterClient, kclDynamoDBClient, kclCloudWatchClient);
    }
 
Example #13
Source File: CrossRegionReplicationIntegrationTests.java    From dynamodb-cross-region-library with Apache License 2.0 4 votes vote down vote up
@Test
public void testBothTablesAddStreamAfterCreation() throws InterruptedException {
    //create table one in one region
    final CreateTableRequest iadCreateTableRequest = createTableRequest(INVENTORY_TABLE_IAD);
    dynamoDbIad.createTable(iadCreateTableRequest
            .withStreamSpecification(new StreamSpecification()
                    .withStreamViewType(StreamViewType.NEW_AND_OLD_IMAGES)
                    .withStreamEnabled(true)));
    //create table two in another region
    final CreateTableRequest pdxCreateTableRequest = createTableRequest(INVENTORY_TABLE_PDX);
    dynamoDbPdx.createTable(pdxCreateTableRequest);

    //create and start the command line client and worker
    final List<String> commandLineArgs = Lists.newArrayList(
            "--sourceEndpoint",
            DYNAMODB_LOCAL_ENDPOINT,
            // override the signing region as DynamoDB Local uses it to create different table namespaces
            "--sourceSigningRegion",
            Regions.US_EAST_1.getName(),
            "--sourceTable",
            INVENTORY_TABLE_IAD,
            "--destinationEndpoint",
            DYNAMODB_LOCAL_ENDPOINT,
            // override the signing region as DynamoDB Local uses it to create different table namespaces
            "--destinationSigningRegion",
            Regions.US_WEST_2.getName(),
            "--destinationTable",
            INVENTORY_TABLE_PDX,
            "--taskName",
            CRR_INTEGRATION_TEST,
            // 100ms - override to reduce the time to sleep
            "--parentShardPollIntervalMillis",
            "100",
            "--dontPublishCloudwatch");
    final String[] args = commandLineArgs.toArray(new String[commandLineArgs.size()]);
    final Worker worker = CommandLineInterface.mainUnsafe(args).get();
    final Thread workerThread = new Thread(worker, "KCLWorker");
    workerThread.start();

    //perform the updates on the source table
    final Item asin1sea = new Item().withString(SKU_CODE, ASIN_1).withString(STORE, SEA);
    iadTable.putItem(asin1sea);
    final Item asin1seaRead = iadTable.getItem(SKU_CODE, ASIN_1, STORE, SEA);
    assertEquals(asin1sea, asin1seaRead);

    //verify the updates on the destination table
    //wait for the worker to start and the update to propagate
    Thread.sleep(10000);
    final List<Item> pdxItems = new ArrayList<>();
    for(Item item : pdxTable.scan()) {
        pdxItems.add(item);
    }
    assertEquals(1, pdxItems.size());
    final Item copied = Iterables.getOnlyElement(pdxItems);
    assertEquals(asin1sea, copied);

    //close the worker
    worker.shutdown(); //this leaks threads, I wonder
}
 
Example #14
Source File: AmazonDynamoDBStreamstoIgnite.java    From aws-big-data-blog with Apache License 2.0 4 votes vote down vote up
public void run() throws Exception {
	adapterClient = new AmazonDynamoDBStreamsAdapterClient(new ClientConfiguration());
	adapterClient.setEndpoint(streamsEndpoint);
	dynamoDBClient = new AmazonDynamoDBClient(new ClientConfiguration());
	dynamoDBClient.setEndpoint(dynamodbEndpoint);

	cloudWatchClient = new AmazonCloudWatchClient(dynamoDBCredentials, new ClientConfiguration());

	TcpDiscoverySpi spi = new TcpDiscoverySpi();
	TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder();
	List<String> hostList = Arrays.asList(Properties.getString("hostList").split(","));
	ipFinder.setAddresses(hostList);
	spi.setIpFinder(ipFinder);
	IgniteConfiguration cfg = new IgniteConfiguration();
	cfg.setDiscoverySpi(spi);
	cfg.setClientMode(true);
	cfg.setPeerClassLoadingEnabled(true);

	@SuppressWarnings("unused")
	Ignite ignite = Ignition.start(cfg);
	cache = Ignition.ignite().cache(Properties.getString("cacheName"));
	LOG.info(">>> cache acquired");

	recordProcessorFactory = new StreamsRecordProcessorFactory(cache);
	workerConfig = new KinesisClientLibConfiguration(Properties.getString("applicationName"), streamArn,
			streamsCredentials, "ddbstreamsworker")
					.withMaxRecords(Integer.parseInt(Properties.getString("maxRecords")))
					.withInitialPositionInStream(
							InitialPositionInStream.valueOf(Properties.getString("initialPositionInStream")));

	LOG.info("Creating worker for stream: " + streamArn);
	worker = new Worker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient);
	LOG.info("Starting worker...");

	int exitCode = 0;
	try {
		worker.run();
	} catch (Throwable t) {
		LOG.error("Caught throwable while processing data.");
		t.printStackTrace();
		exitCode = 1;
	}
	System.exit(exitCode);
}
 
Example #15
Source File: StreamsAdapterDemo.java    From aws-dynamodb-examples with Apache License 2.0 4 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) throws Exception {
    System.out.println("Starting demo...");

    String srcTable = tablePrefix + "-src";
    String destTable = tablePrefix + "-dest";
    streamsCredentials = new ProfileCredentialsProvider();
    dynamoDBCredentials = new ProfileCredentialsProvider();
    recordProcessorFactory = new StreamsRecordProcessorFactory(dynamoDBCredentials, dynamodbEndpoint, serviceName, destTable);


    /* ===== REQUIRED =====
     * Users will have to explicitly instantiate and configure the adapter, then pass it to
     * the KCL worker.
     */
    adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsCredentials, new ClientConfiguration());
    adapterClient.setEndpoint(streamsEndpoint);

    dynamoDBClient = new AmazonDynamoDBClient(dynamoDBCredentials, new ClientConfiguration());
    dynamoDBClient.setEndpoint(dynamodbEndpoint);

    cloudWatchClient = new AmazonCloudWatchClient(dynamoDBCredentials, new ClientConfiguration());

    setUpTables();

    workerConfig = new KinesisClientLibConfiguration("streams-adapter-demo",
            streamArn, streamsCredentials, "streams-demo-worker")
        .withMaxRecords(1)
        .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    System.out.println("Creating worker for stream: " + streamArn);
    worker = new Worker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient);
    System.out.println("Starting worker...");
    Thread t = new Thread(worker);
    t.start();

    Thread.sleep(25000);
    worker.shutdown();
    t.join();

    if(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, srcTable).getItems().equals(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, destTable).getItems())) {
        System.out.println("Scan result is equal.");
    } else {
        System.out.println("Tables are different!");
    }

    System.out.println("Done.");
    cleanupAndExit(0);
}