com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient Java Examples

The following examples show how to use com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DynamoDBStreamsProxy.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an AmazonDynamoDBStreamsAdapterClient.
 * Uses it as the internal client interacting with the DynamoDB streams.
 *
 * @param configProps configuration properties
 * @return an AWS DynamoDB streams adapter client
 */
@Override
protected AmazonKinesis createKinesisClient(Properties configProps) {
	ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
	setAwsClientConfigProperties(awsClientConfig, configProps);

	AWSCredentialsProvider credentials = getCredentialsProvider(configProps);
	awsClientConfig.setUserAgentPrefix(
			String.format(
					USER_AGENT_FORMAT,
					EnvironmentInformation.getVersion(),
					EnvironmentInformation.getRevisionInformation().commitId));

	AmazonDynamoDBStreamsAdapterClient adapterClient =
			new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig);

	if (configProps.containsKey(AWS_ENDPOINT)) {
		adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT));
	} else {
		adapterClient.setRegion(Region.getRegion(
				Regions.fromName(configProps.getProperty(AWS_REGION))));
	}

	return adapterClient;
}
 
Example #2
Source File: DynamoDBStreamsProxy.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an AmazonDynamoDBStreamsAdapterClient.
 * Uses it as the internal client interacting with the DynamoDB streams.
 *
 * @param configProps configuration properties
 * @return an AWS DynamoDB streams adapter client
 */
@Override
protected AmazonKinesis createKinesisClient(Properties configProps) {
	ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
	setAwsClientConfigProperties(awsClientConfig, configProps);

	AWSCredentialsProvider credentials = getCredentialsProvider(configProps);
	awsClientConfig.setUserAgentPrefix(
			String.format(
					USER_AGENT_FORMAT,
					EnvironmentInformation.getVersion(),
					EnvironmentInformation.getRevisionInformation().commitId));

	AmazonDynamoDBStreamsAdapterClient adapterClient =
			new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig);

	if (configProps.containsKey(AWS_ENDPOINT)) {
		adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT));
	} else {
		adapterClient.setRegion(Region.getRegion(
				Regions.fromName(configProps.getProperty(AWS_REGION))));
	}

	return adapterClient;
}
 
Example #3
Source File: KinesisMessageChannelBinder.java    From spring-cloud-stream-binder-aws-kinesis with Apache License 2.0 6 votes vote down vote up
public KinesisMessageChannelBinder(KinesisBinderConfigurationProperties configurationProperties,
		KinesisStreamProvisioner provisioningProvider, AmazonKinesisAsync amazonKinesis,
		AWSCredentialsProvider awsCredentialsProvider,
		@Nullable AmazonDynamoDB dynamoDBClient,
		@Nullable AmazonDynamoDBStreams dynamoDBStreams,
		@Nullable AmazonCloudWatch cloudWatchClient) {

	super(headersToMap(configurationProperties), provisioningProvider);
	Assert.notNull(amazonKinesis, "'amazonKinesis' must not be null");
	Assert.notNull(awsCredentialsProvider, "'awsCredentialsProvider' must not be null");
	this.configurationProperties = configurationProperties;
	this.amazonKinesis = amazonKinesis;
	this.cloudWatchClient = cloudWatchClient;
	this.dynamoDBClient = dynamoDBClient;
	this.awsCredentialsProvider = awsCredentialsProvider;

	if (dynamoDBStreams != null) {
		this.dynamoDBStreamsAdapter = new AmazonDynamoDBStreamsAdapterClient(dynamoDBStreams);
	}
	else {
		this.dynamoDBStreamsAdapter = null;
	}
}
 
Example #4
Source File: DynamoDBStreamsProxy.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an AmazonDynamoDBStreamsAdapterClient.
 * Uses it as the internal client interacting with the DynamoDB streams.
 *
 * @param configProps configuration properties
 * @return an AWS DynamoDB streams adapter client
 */
@Override
protected AmazonKinesis createKinesisClient(Properties configProps) {
	ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
	setAwsClientConfigProperties(awsClientConfig, configProps);

	AWSCredentialsProvider credentials = getCredentialsProvider(configProps);
	awsClientConfig.setUserAgentPrefix(
			String.format(
					USER_AGENT_FORMAT,
					EnvironmentInformation.getVersion(),
					EnvironmentInformation.getRevisionInformation().commitId));

	AmazonDynamoDBStreamsAdapterClient adapterClient =
			new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig);

	if (configProps.containsKey(AWS_ENDPOINT)) {
		adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT));
	} else {
		adapterClient.setRegion(Region.getRegion(
				Regions.fromName(configProps.getProperty(AWS_REGION))));
	}

	return adapterClient;
}
 
Example #5
Source File: DynamoDBTableReplicator.java    From podyn with Apache License 2.0 5 votes vote down vote up
public void startReplicatingChanges() throws StreamNotEnabledException {
	if (tableSchema == null) {
		throw new TableExistsException("table %s does not exist in destination", dynamoTableName);
	}

	String tableStreamArn = getStreamArn();

	if (tableStreamArn == null) {
		throw new StreamNotEnabledException("table %s does not have a stream enabled\n", dynamoTableName);
	}

	AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsClient);
	AmazonCloudWatch cloudWatchClient = AmazonCloudWatchClientBuilder.standard().build();

	String workerId = generateWorkerId();

	final KinesisClientLibConfiguration workerConfig = new KinesisClientLibConfiguration(
			APPLICATION_NAME, tableStreamArn, awsCredentialsProvider, workerId).
			withMaxRecords(1000).
			withIdleTimeBetweenReadsInMillis(500).
			withCallProcessRecordsEvenForEmptyRecordList(false).
			withCleanupLeasesUponShardCompletion(false).
			withFailoverTimeMillis(20000).
			withTableName(LEASE_TABLE_PREFIX + dynamoTableName).
			withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

	Worker worker = new Worker.Builder().
			recordProcessorFactory(recordProcessorFactory).
			config(workerConfig).
			kinesisClient(adapterClient).
			cloudWatchClient(cloudWatchClient).
			dynamoDBClient(dynamoDBClient).
			execService(executor).
			build();

	executor.execute(worker);
}
 
Example #6
Source File: DynamoStreamsManager.java    From dynamo-cassandra-proxy with Apache License 2.0 4 votes vote down vote up
public void configure(DCProxyConfiguration config) {

        //TODO make table name dynamic
        String tableName = "test";

        this.dynamodbEndpoint = config.getAwsDynamodbEndpoint();
        this.streamsEndpoint = config.getStreamsEndpoint();
        this.signinRegion = config.getDynamoRegion();
        this.accessKey = config.getDynamoAccessKey();
        this.secretKey = config.getDynamoSecretKey();

        Properties props = System.getProperties();
        props.setProperty("aws.accessKeyId", accessKey);
        props.setProperty("aws.secretKey", secretKey);

        AwsClientBuilder.EndpointConfiguration endpointConfiguration =
                new AwsClientBuilder.EndpointConfiguration(streamsEndpoint, signinRegion);
        SystemPropertiesCredentialsProvider spcp = new SystemPropertiesCredentialsProvider();

        realDDB = AmazonDynamoDBClientBuilder.standard().
                withRegion(Regions.US_EAST_2).
                //withEndpointConfiguration(endpointConfiguration).
                withCredentials(spcp).build();

        DescribeTableResult tableResult = realDDB.describeTable(tableName);
        streamArn = tableResult.getTable().getLatestStreamArn();
        //streamSpec = tableResult.getTable().getStreamSpecification();
        streamsClient = AmazonDynamoDBStreamsClientBuilder.standard().withEndpointConfiguration(endpointConfiguration).build();

        adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsClient);

        recordProcessorFactory = new StreamsRecordProcessorFactory(ddbProxy, tableName);

        workerConfig = new KinesisClientLibConfiguration("test-app",
                streamArn,
                spcp,
                "streams-worker")
                .withMaxRecords(1000)
                .withIdleTimeBetweenReadsInMillis(500)
                .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);
        AmazonCloudWatch cloudWatchClient;
        cloudWatchClient = AmazonCloudWatchClientBuilder.standard()
        .withRegion(signinRegion)
        .build();

        System.out.println("Creating worker for stream: " + streamArn);

        /*
        DescribeStreamRequest request = new DescribeStreamRequest();
        DescribeStreamRequestAdapter describeStreamResult = new DescribeStreamRequestAdapter(request);
        String id = describeStreamResult.getExclusiveStartShardId();
        String id2 = describeStreamResult.withStreamArn(streamArn).getExclusiveStartShardId();
        */

        Worker worker = StreamsWorkerFactory.createDynamoDbStreamsWorker(
                recordProcessorFactory,
                workerConfig,
                adapterClient,
                realDDB,
                cloudWatchClient
        );

        System.out.println("Starting worker...");
        Thread t = new Thread(worker);
        t.start();
    }
 
Example #7
Source File: StreamsAdapterDemo.java    From aws-doc-sdk-examples with Apache License 2.0 4 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) throws Exception {
    System.out.println("Starting demo...");

    dynamoDBClient = AmazonDynamoDBClientBuilder.standard()
                                                .withRegion(awsRegion)
                                                .build();
    cloudWatchClient = AmazonCloudWatchClientBuilder.standard()
                                                    .withRegion(awsRegion)
                                                    .build();
    dynamoDBStreamsClient = AmazonDynamoDBStreamsClientBuilder.standard()
                                                              .withRegion(awsRegion)
                                                              .build();
    adapterClient = new AmazonDynamoDBStreamsAdapterClient(dynamoDBStreamsClient);
    String srcTable = tablePrefix + "-src";
    String destTable = tablePrefix + "-dest";
    recordProcessorFactory = new StreamsRecordProcessorFactory(dynamoDBClient, destTable);

    setUpTables();

    workerConfig = new KinesisClientLibConfiguration("streams-adapter-demo",
                                                     streamArn,
                                                     awsCredentialsProvider,
                                                     "streams-demo-worker")
            .withMaxRecords(1000)
            .withIdleTimeBetweenReadsInMillis(500)
            .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    System.out.println("Creating worker for stream: " + streamArn);
    worker = StreamsWorkerFactory.createDynamoDbStreamsWorker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient);
    System.out.println("Starting worker...");
    Thread t = new Thread(worker);
    t.start();

    Thread.sleep(25000);
    worker.shutdown();
    t.join();

    if (StreamsAdapterDemoHelper.scanTable(dynamoDBClient, srcTable).getItems()
                                .equals(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, destTable).getItems())) {
        System.out.println("Scan result is equal.");
    }
    else {
        System.out.println("Tables are different!");
    }

    System.out.println("Done.");
    cleanupAndExit(0);
}
 
Example #8
Source File: DynamoDBSource.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Override
    public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception {
        this.dynamodbSourceConfig = DynamoDBSourceConfig.load(config);
        
        checkArgument(isNotBlank(dynamodbSourceConfig.getAwsDynamodbStreamArn()), "empty dynamo-stream arn");
//       Even if the endpoint is set, it seems to require a region to go with it
        checkArgument(isNotBlank(dynamodbSourceConfig.getAwsRegion()),
                     "The aws-region must be set");
        checkArgument(isNotBlank(dynamodbSourceConfig.getAwsCredentialPluginParam()), "empty aws-credential param");
        
        if (dynamodbSourceConfig.getInitialPositionInStream() == InitialPositionInStream.AT_TIMESTAMP) {
            checkArgument((dynamodbSourceConfig.getStartAtTime() != null),"Timestamp must be specified");
        }
        
        queue = new LinkedBlockingQueue<> (dynamodbSourceConfig.getReceiveQueueSize());
        workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID();
        
        AwsCredentialProviderPlugin credentialsProvider = createCredentialProvider(
                dynamodbSourceConfig.getAwsCredentialPluginName(),
                dynamodbSourceConfig.getAwsCredentialPluginParam());

        AmazonDynamoDBStreams dynamoDBStreamsClient = dynamodbSourceConfig.buildDynamoDBStreamsClient(credentialsProvider);
        AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient(dynamoDBStreamsClient);
        recordProcessorFactory = new StreamsRecordProcessorFactory(queue, dynamodbSourceConfig);

        kinesisClientLibConfig = new KinesisClientLibConfiguration(dynamodbSourceConfig.getApplicationName(),
                dynamodbSourceConfig.getAwsDynamodbStreamArn(),
                credentialsProvider.getCredentialProvider(),
                workerId)
                .withRegionName(dynamodbSourceConfig.getAwsRegion())
                .withInitialPositionInStream(dynamodbSourceConfig.getInitialPositionInStream());

        if(kinesisClientLibConfig.getInitialPositionInStream() == InitialPositionInStream.AT_TIMESTAMP) {
            kinesisClientLibConfig.withTimestampAtInitialPositionInStream(dynamodbSourceConfig.getStartAtTime());
        }

        worker = StreamsWorkerFactory.createDynamoDbStreamsWorker(recordProcessorFactory,
                kinesisClientLibConfig,
                adapterClient,
                dynamodbSourceConfig.buildDynamoDBClient(credentialsProvider),
                dynamodbSourceConfig.buildCloudwatchClient(credentialsProvider));

        workerThread = new Thread(worker);
        workerThread.setDaemon(true);
        threadEx = null;
        workerThread.setUncaughtExceptionHandler((t, ex) -> {
            threadEx = ex;
            log.error("Worker died with error", ex);
        });
        workerThread.start();
    }
 
Example #9
Source File: CommandLineInterface.java    From dynamodb-cross-region-library with Apache License 2.0 4 votes vote down vote up
public Worker createWorker() {

        // use default credential provider chain to locate appropriate credentials
        final AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();

        // initialize DynamoDB client and set the endpoint properly for source table / region
        final AmazonDynamoDB dynamodbClient = AmazonDynamoDBClientBuilder.standard()
                .withCredentials(credentialsProvider)
                .withEndpointConfiguration(createEndpointConfiguration(sourceRegion, sourceDynamodbEndpoint, AmazonDynamoDB.ENDPOINT_PREFIX))
                .build();

        // initialize Streams client
        final AwsClientBuilder.EndpointConfiguration streamsEndpointConfiguration = createEndpointConfiguration(sourceRegion,
                sourceDynamodbStreamsEndpoint, AmazonDynamoDBStreams.ENDPOINT_PREFIX);
        final ClientConfiguration streamsClientConfig = new ClientConfiguration().withGzip(false);
        final AmazonDynamoDBStreams streamsClient = AmazonDynamoDBStreamsClientBuilder.standard()
                .withCredentials(credentialsProvider)
                .withEndpointConfiguration(streamsEndpointConfiguration)
                .withClientConfiguration(streamsClientConfig)
                .build();

        // obtain the Stream ID associated with the source table
        final String streamArn = dynamodbClient.describeTable(sourceTable).getTable().getLatestStreamArn();
        final boolean streamEnabled = DynamoDBConnectorUtilities.isStreamsEnabled(streamsClient, streamArn, DynamoDBConnectorConstants.NEW_AND_OLD);
        Preconditions.checkArgument(streamArn != null, DynamoDBConnectorConstants.MSG_NO_STREAMS_FOUND);
        Preconditions.checkState(streamEnabled, DynamoDBConnectorConstants.STREAM_NOT_READY);

        // initialize DynamoDB client for KCL
        final AmazonDynamoDB kclDynamoDBClient = AmazonDynamoDBClientBuilder.standard()
                .withCredentials(credentialsProvider)
                .withEndpointConfiguration(createKclDynamoDbEndpointConfiguration())
                .build();

        // initialize DynamoDB Streams Adapter client and set the Streams endpoint properly
        final AmazonDynamoDBStreamsAdapterClient streamsAdapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsClient);

        // initialize CloudWatch client and set the region to emit metrics to
        final AmazonCloudWatch kclCloudWatchClient;
        if (isPublishCloudWatch) {
            kclCloudWatchClient = AmazonCloudWatchClientBuilder.standard()
                    .withCredentials(credentialsProvider)
                    .withRegion(kclRegion.or(sourceRegion).getName()).build();
        } else {
            kclCloudWatchClient = new NoopCloudWatch();
        }

        // try to get taskname from command line arguments, auto generate one if needed
        final AwsClientBuilder.EndpointConfiguration destinationEndpointConfiguration = createEndpointConfiguration(destinationRegion,
                destinationDynamodbEndpoint, AmazonDynamoDB.ENDPOINT_PREFIX);
        final String actualTaskName = DynamoDBConnectorUtilities.getTaskName(sourceRegion, destinationRegion, taskName, sourceTable, destinationTable);

        // set the appropriate Connector properties for the destination KCL configuration
        final Properties properties = new Properties();
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_APP_NAME, actualTaskName);
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_DYNAMODB_ENDPOINT, destinationEndpointConfiguration.getServiceEndpoint());
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_DYNAMODB_DATA_TABLE_NAME, destinationTable);
        properties.put(DynamoDBStreamsConnectorConfiguration.PROP_REGION_NAME, destinationRegion.getName());

        // create the record processor factory based on given pipeline and connector configurations
        // use the master to replicas pipeline
        final KinesisConnectorRecordProcessorFactory<Record, Record> factory = new KinesisConnectorRecordProcessorFactory<>(
                new DynamoDBMasterToReplicasPipeline(), new DynamoDBStreamsConnectorConfiguration(properties, credentialsProvider));

        // create the KCL configuration with default values
        final KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(actualTaskName,
                streamArn,
                credentialsProvider,
                DynamoDBConnectorConstants.WORKER_LABEL + actualTaskName + UUID.randomUUID().toString())
                // worker will use checkpoint table if available, otherwise it is safer
                // to start at beginning of the stream
                .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON)
                // we want the maximum batch size to avoid network transfer latency overhead
                .withMaxRecords(getRecordsLimit.or(DynamoDBConnectorConstants.STREAMS_RECORDS_LIMIT))
                // wait a reasonable amount of time - default 0.5 seconds
                .withIdleTimeBetweenReadsInMillis(DynamoDBConnectorConstants.IDLE_TIME_BETWEEN_READS)
                // Remove calls to GetShardIterator
                .withValidateSequenceNumberBeforeCheckpointing(false)
                // make parent shard poll interval tunable to decrease time to run integration test
                .withParentShardPollIntervalMillis(parentShardPollIntervalMillis.or(DynamoDBConnectorConstants.DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS))
                // avoid losing leases too often - default 60 seconds
                .withFailoverTimeMillis(DynamoDBConnectorConstants.KCL_FAILOVER_TIME);

        // create the KCL worker for this connector
        return new Worker(factory, kclConfig, streamsAdapterClient, kclDynamoDBClient, kclCloudWatchClient);
    }
 
Example #10
Source File: AmazonDynamoDBStreamstoIgnite.java    From aws-big-data-blog with Apache License 2.0 4 votes vote down vote up
public void run() throws Exception {
	adapterClient = new AmazonDynamoDBStreamsAdapterClient(new ClientConfiguration());
	adapterClient.setEndpoint(streamsEndpoint);
	dynamoDBClient = new AmazonDynamoDBClient(new ClientConfiguration());
	dynamoDBClient.setEndpoint(dynamodbEndpoint);

	cloudWatchClient = new AmazonCloudWatchClient(dynamoDBCredentials, new ClientConfiguration());

	TcpDiscoverySpi spi = new TcpDiscoverySpi();
	TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder();
	List<String> hostList = Arrays.asList(Properties.getString("hostList").split(","));
	ipFinder.setAddresses(hostList);
	spi.setIpFinder(ipFinder);
	IgniteConfiguration cfg = new IgniteConfiguration();
	cfg.setDiscoverySpi(spi);
	cfg.setClientMode(true);
	cfg.setPeerClassLoadingEnabled(true);

	@SuppressWarnings("unused")
	Ignite ignite = Ignition.start(cfg);
	cache = Ignition.ignite().cache(Properties.getString("cacheName"));
	LOG.info(">>> cache acquired");

	recordProcessorFactory = new StreamsRecordProcessorFactory(cache);
	workerConfig = new KinesisClientLibConfiguration(Properties.getString("applicationName"), streamArn,
			streamsCredentials, "ddbstreamsworker")
					.withMaxRecords(Integer.parseInt(Properties.getString("maxRecords")))
					.withInitialPositionInStream(
							InitialPositionInStream.valueOf(Properties.getString("initialPositionInStream")));

	LOG.info("Creating worker for stream: " + streamArn);
	worker = new Worker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient);
	LOG.info("Starting worker...");

	int exitCode = 0;
	try {
		worker.run();
	} catch (Throwable t) {
		LOG.error("Caught throwable while processing data.");
		t.printStackTrace();
		exitCode = 1;
	}
	System.exit(exitCode);
}
 
Example #11
Source File: StreamsAdapterDemo.java    From aws-dynamodb-examples with Apache License 2.0 4 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) throws Exception {
    System.out.println("Starting demo...");

    String srcTable = tablePrefix + "-src";
    String destTable = tablePrefix + "-dest";
    streamsCredentials = new ProfileCredentialsProvider();
    dynamoDBCredentials = new ProfileCredentialsProvider();
    recordProcessorFactory = new StreamsRecordProcessorFactory(dynamoDBCredentials, dynamodbEndpoint, serviceName, destTable);


    /* ===== REQUIRED =====
     * Users will have to explicitly instantiate and configure the adapter, then pass it to
     * the KCL worker.
     */
    adapterClient = new AmazonDynamoDBStreamsAdapterClient(streamsCredentials, new ClientConfiguration());
    adapterClient.setEndpoint(streamsEndpoint);

    dynamoDBClient = new AmazonDynamoDBClient(dynamoDBCredentials, new ClientConfiguration());
    dynamoDBClient.setEndpoint(dynamodbEndpoint);

    cloudWatchClient = new AmazonCloudWatchClient(dynamoDBCredentials, new ClientConfiguration());

    setUpTables();

    workerConfig = new KinesisClientLibConfiguration("streams-adapter-demo",
            streamArn, streamsCredentials, "streams-demo-worker")
        .withMaxRecords(1)
        .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    System.out.println("Creating worker for stream: " + streamArn);
    worker = new Worker(recordProcessorFactory, workerConfig, adapterClient, dynamoDBClient, cloudWatchClient);
    System.out.println("Starting worker...");
    Thread t = new Thread(worker);
    t.start();

    Thread.sleep(25000);
    worker.shutdown();
    t.join();

    if(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, srcTable).getItems().equals(StreamsAdapterDemoHelper.scanTable(dynamoDBClient, destTable).getItems())) {
        System.out.println("Scan result is equal.");
    } else {
        System.out.println("Tables are different!");
    }

    System.out.println("Done.");
    cleanupAndExit(0);
}