Java Code Examples for org.apache.kafka.streams.KafkaStreams#start()

The following examples show how to use org.apache.kafka.streams.KafkaStreams#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamsStarterApp.java    From kafka-streams-machine-learning-examples with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {

		Properties config = new Properties();
		config.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-starter-app");
		config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
		config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
		config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

		StreamsBuilder builder = new StreamsBuilder();

		KStream<String, String> kStream = builder.stream("streams-file-input");
		// do stuff
		kStream.to("streams-wordcount-output");

		KafkaStreams streams = new KafkaStreams(builder.build(), config);
		streams.cleanUp(); // only do this in dev - not in prod
		streams.start();

		// print the topology
		System.out.println(streams.localThreadsMetadata().toString());

		// shutdown hook to correctly close the streams application
		Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

	}
 
Example 2
Source File: StreamDemo.java    From javatech with Creative Commons Attribution Share Alike 4.0 International 6 votes vote down vote up
public static void main(String[] args) {
	// 1. 指定流的配置
	Properties config = new Properties();
	config.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application");
	config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
	config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

	// 设置流构造器
	StreamsBuilder builder = new StreamsBuilder();
	KStream<String, String> textLines = builder.stream("TextLinesTopic");
	KTable<String, Long> wordCounts = textLines
		.flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\\W+")))
		.groupBy((key, word) -> word)
		.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store"));
	wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long()));

	// 根据流构造器和流配置初始化 Kafka 流
	KafkaStreams streams = new KafkaStreams(builder.build(), config);
	streams.start();
}
 
Example 3
Source File: KafkaRuntime.java    From jMetalSP with MIT License 6 votes vote down vote up
@Override
public void startStreamingDataSources(List<StreamingDataSource<?>> streamingDataSourceList) {
    for (StreamingDataSource<?> streamingDataSource : streamingDataSourceList) {
        ((KafkaStreamingDataSource)streamingDataSource).setStreamingBuilder(streamsBuilder);
        ((KafkaStreamingDataSource)streamingDataSource).setTopic(topic);
        streamingDataSource.run();
    }

    //streamingContext.start();
    KafkaStreams streams= new KafkaStreams(streamsBuilder.build(),config);

    try {
        streams.start();;
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example 4
Source File: KafkaStreamWordCount.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();
    Serde<Long> longSerde = Serdes.Long();

    KStreamBuilder streamTopology = new KStreamBuilder();
    KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input");
    KStream<String, Long> wordCounts = topicRecords
            .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
            .map((key, word) -> new KeyValue<>(word, word))
            .countByKey("Count")
            .toStream();
    wordCounts.to(stringSerde, longSerde, "wordCount");

    KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
Example 5
Source File: IPFraudKafkaStreamApp.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Properties kafkaStreamProperties = new Properties();
    kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "IP-Fraud-Detection");
    kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");
    kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());

    Serde<String> stringSerde = Serdes.String();

    KStreamBuilder fraudDetectionTopology = new KStreamBuilder();

    KStream<String, String> ipRecords = fraudDetectionTopology.stream(stringSerde, stringSerde, propertyReader.getPropertyValue("topic"));

    KStream<String, String> fraudIpRecords = ipRecords
            .filter((k, v) -> isFraud(v));

    fraudIpRecords.to(propertyReader.getPropertyValue("output_topic"));

    KafkaStreams streamManager = new KafkaStreams(fraudDetectionTopology, kafkaStreamProperties);
    streamManager.start();

    Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close));
}
 
Example 6
Source File: ProcessStreamService.java    From SkaETL with Apache License 2.0 5 votes vote down vote up
public void createStreamEs(String inputTopic, ParameterOutput parameterOutput) {

        StreamsBuilder builder = new StreamsBuilder();

        KStream<String, JsonNode> streamToES = builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde()));
        streamToES.process(() -> applicationContext.getBean(JsonNodeToElasticSearchProcessor.class, parameterOutput.getElasticsearchRetentionLevel(), parameterOutput.getIndexShape()));

        KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(getProcessConsumer().getIdProcess() + ProcessConstants.ES_PROCESS, getBootstrapServer()));
        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
        streams.start();
        addStreams(streams);
    }
 
Example 7
Source File: ReferentialImporter.java    From SkaETL with Apache License 2.0 5 votes vote down vote up
private void feedStream(String consumerId, ProcessReferential processReferential, String topicMerge) {
    String topicSource = consumerId + TOPIC_PARSED_PROCESS;
    log.info("creating {} Process Merge for topicsource {}", consumerId, topicSource);
    StreamsBuilder builder = new StreamsBuilder();
    KStream<String, JsonNode> streamToMerge = builder.stream(topicSource, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde()));
    streamToMerge.to(topicMerge, Produced.with(Serdes.String(), GenericSerdes.jsonNodeSerde()));
    KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(processReferential.getIdProcess() + "_" + consumerId + "-_merge-topic", kafkaConfiguration.getBootstrapServers()));
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
    runningMergeProcess.get(processReferential).add(streams);
    streams.start();
}
 
Example 8
Source File: PregelGraphAlgorithm.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Override
public GraphAlgorithmState<Void> configure(StreamsBuilder builder, Properties streamsConfig) {
    ClientUtils.createTopic(solutionSetTopic, numPartitions, replicationFactor, streamsConfig);
    ClientUtils.createTopic(workSetTopic, numPartitions, replicationFactor, streamsConfig);

    computation.prepare(builder, streamsConfig);

    Topology topology = builder.build();
    log.info("Topology description {}", topology.describe());
    streams = new KafkaStreams(topology, streamsConfig, new PregelClientSupplier());
    streams.start();

    return new GraphAlgorithmState<>(streams, GraphAlgorithmState.State.CREATED, 0,
        0L, Collections.emptyMap(), null);
}
 
Example 9
Source File: KStreamAggDemo.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 5 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
    props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreName);

    StreamsBuilder streamsBuilder = new StreamsBuilder();
    KStream<String, Employee> KS0 = streamsBuilder.stream(AppConfigs.topicName,
        Consumed.with(AppSerdes.String(), AppSerdes.Employee()));

    KGroupedStream<String, Employee> KGS1 = KS0.groupBy(
        (k, v) -> v.getDepartment(),
        Serialized.with(AppSerdes.String(),
            AppSerdes.Employee()));

    KTable<String, DepartmentAggregate> KT2 = KGS1.aggregate(
        //Initializer
        () -> new DepartmentAggregate()
            .withEmployeeCount(0)
            .withTotalSalary(0)
            .withAvgSalary(0D),
        //Aggregator
        (k, v, aggV) -> new DepartmentAggregate()
            .withEmployeeCount(aggV.getEmployeeCount() + 1)
            .withTotalSalary(aggV.getTotalSalary() + v.getSalary())
            .withAvgSalary((aggV.getTotalSalary() + v.getSalary()) / (aggV.getEmployeeCount() + 1D)),
        //Serializer
        Materialized.<String, DepartmentAggregate, KeyValueStore<Bytes, byte[]>>as("agg-store")
            .withKeySerde(AppSerdes.String())
            .withValueSerde(AppSerdes.DepartmentAggregate())
    );

    KT2.toStream().foreach(
        (k, v) -> System.out.println("Key = " + k + " Value = " + v.toString()));

    KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

}
 
Example 10
Source File: KafkaStreamsJoinsApp.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        StreamsBuilder builder = new StreamsBuilder();


        Serde<Purchase> purchaseSerde = StreamsSerdes.PurchaseSerde();
        Serde<String> stringSerde = Serdes.String();

        KeyValueMapper<String, Purchase, KeyValue<String,Purchase>> custIdCCMasking = (k, v) -> {
            Purchase masked = Purchase.builder(v).maskCreditCard().build();
            return new KeyValue<>(masked.getCustomerId(), masked);
        };


        Predicate<String, Purchase> coffeePurchase = (key, purchase) -> purchase.getDepartment().equalsIgnoreCase("coffee");
        Predicate<String, Purchase> electronicPurchase = (key, purchase) -> purchase.getDepartment().equalsIgnoreCase("electronics");

        int COFFEE_PURCHASE = 0;
        int ELECTRONICS_PURCHASE = 1;

        KStream<String, Purchase> transactionStream = builder.stream( "transactions", Consumed.with(Serdes.String(), purchaseSerde)).map(custIdCCMasking);

        KStream<String, Purchase>[] branchesStream = transactionStream.selectKey((k,v)-> v.getCustomerId()).branch(coffeePurchase, electronicPurchase);

        KStream<String, Purchase> coffeeStream = branchesStream[COFFEE_PURCHASE];
        KStream<String, Purchase> electronicsStream = branchesStream[ELECTRONICS_PURCHASE];

        ValueJoiner<Purchase, Purchase, CorrelatedPurchase> purchaseJoiner = new PurchaseJoiner();
        JoinWindows twentyMinuteWindow =  JoinWindows.of(60 * 1000 * 20);

        KStream<String, CorrelatedPurchase> joinedKStream = coffeeStream.join(electronicsStream,
                                                                              purchaseJoiner,
                                                                              twentyMinuteWindow,
                                                                              Joined.with(stringSerde,
                                                                                          purchaseSerde,
                                                                                          purchaseSerde));

        joinedKStream.print(Printed.<String, CorrelatedPurchase>toSysOut().withLabel("joined KStream"));

        // used only to produce data for this application, not typical usage
        MockDataProducer.producePurchaseData();
        
        LOG.info("Starting Join Examples");
        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        kafkaStreams.start();
        Thread.sleep(65000);
        LOG.info("Shutting down the Join Examples now");
        kafkaStreams.close();
        MockDataProducer.shutdown();


    }
 
Example 11
Source File: ProcessorKafkaStreamInstrumented.java    From kafka-streams-ex with MIT License 4 votes vote down vote up
/** Runs the streams program, writing to the "fast-avgs-instrumented", 
    * "medium-avgs-instrumented", and "slow-avgs-instrumented" topics.
    *
    * @param args Not used.
    */
public static void main(String[] args) throws Exception { 
       
       // Configuration for Kafka Streams.
       Properties config = new Properties();

       config.put(StreamsConfig.APPLICATION_ID_CONFIG,
                  "processor-kafka-streams-instrumented");
       config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
                  "localhost:9092");
       config.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG,
                  "localhost:2181");
       config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG,
                  Serdes.String().getClass().getName());
       config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG,
                  Serdes.Double().getClass().getName());

       // Start at latest message.
       config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");

       // Create the state stores. We need one for each of the
       // MessageProcessor's in the topology.
       StateStoreSupplier fastStore = 
           Stores.create("FAST-store")
                 .withStringKeys()
                 .withDoubleValues()
                 .inMemory()
                 .build();

       // Build the topology.
       TopologyBuilder builder = new TopologyBuilder();
       builder.addSource("messages-source",
                         Serdes.String().deserializer(),
                         Serdes.Double().deserializer(),
                         "messages-instrumented")
              .addProcessor("FAST-processor",
                            () -> new MovingAverageProcessor(0.1),
                            "messages-source")
              .addStateStore(fastStore, "FAST-processor")
              .addSink("FAST-sink", 
                       "fast-avgs-instrumented", 
                       Serdes.String().serializer(),
                       Serdes.Double().serializer(),
                       "FAST-processor");

       KafkaStreams streams = new KafkaStreams(builder, config);
       streams.start();

}
 
Example 12
Source File: CoGroupingApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms", "120000");
        changeLogConfigs.put("cleanup.policy", "compact,delete");


        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> storeBuilder =
                Stores.keyValueStoreBuilder(storeSupplier,
                        Serdes.String(),
                        eventPerformanceTuple).withLoggingEnabled(changeLogConfigs);

        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource("Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(storeBuilder, "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        System.out.println("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Co-Grouping App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example 13
Source File: CoGroupingListeningExampleApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms","120000" );
        changeLogConfigs.put("cleanup.policy", "compact,delete");

        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> builder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple);



        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource( "Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(builder.withLoggingEnabled(changeLogConfigs), "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        kafkaStreams.setGlobalStateRestoreListener(new LoggingStateRestoreListener());

        kafkaStreams.setUncaughtExceptionHandler((thread, exception) ->
            LOG.error("Thread [{}] encountered [{}]", thread.getName(), exception.getMessage())
        );

        kafkaStreams.setStateListener((newState, oldState) -> {
           if (oldState == KafkaStreams.State.REBALANCING && newState== KafkaStreams.State.RUNNING) {
               LOG.info("Topology Layout {}", topology.describe());
               LOG.info("Thread metadata {}", kafkaStreams.localThreadsMetadata());
           }
        });


        LOG.info("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();

        Thread.sleep(70000);
        LOG.info("Shutting down the Co-Grouping metrics App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example 14
Source File: NamingChangelogAndRepartitionTopics.java    From kafka-tutorials with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

    if (args.length < 1) {
      throw new IllegalArgumentException(
          "This program takes one argument: the path to an environment configuration file.");
    }

    final NamingChangelogAndRepartitionTopics instance = new NamingChangelogAndRepartitionTopics();
    final Properties envProps = instance.loadEnvProperties(args[0]);
    if (args.length > 1 ) {
      final String namesAndFilter = args[1];

      if (namesAndFilter.contains("filter")) {
        envProps.put("add.filter", "true");
      }

      if (namesAndFilter.contains("names")) {
        envProps.put("add.names", "true");
      }
    }

    final CountDownLatch latch = new CountDownLatch(1);
    final Properties streamProps = instance.buildStreamsProperties(envProps);
    final Topology topology = instance.buildTopology(envProps);

    instance.createTopics(envProps);

    final KafkaStreams streams = new KafkaStreams(topology, streamProps);

    // Attach shutdown handler to catch Control-C.
    Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
      @Override
      public void run() {
        streams.close(Duration.ofSeconds(5));
        latch.countDown();
      }
    });

    try {
      streams.start();
      latch.await();
    } catch (Throwable e) {
      System.exit(1);
    }
    System.exit(0);
  }
 
Example 15
Source File: KTableKafkaStream.java    From kafka-streams-ex with MIT License 4 votes vote down vote up
/** Runs the streams program (which produces its own data), writing
 *  to the "longs-table", "longs-table-out", "longs-stream-out" topics.
 *
 * @param args Not used.
 */
public static void main(String[] args) throws Exception {
    
    Properties config = new Properties();

    config.put(StreamsConfig.APPLICATION_ID_CONFIG,
        "ktable-kafka-stream");
    config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    config.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181");

    config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG,
        Serdes.String().getClass().getName());
    config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG,
        Serdes.Long().getClass().getName());

    KStreamBuilder builder = new KStreamBuilder();
    
    KTable<String, Long> longs_table = builder.table("longs-table");

    longs_table.to("longs-table-out");

    // Convert to a stream and output to see what happens.
    longs_table.toStream().to("longs-stream-out");

    KafkaStreams streams = new KafkaStreams(builder, config);
    streams.start();

    Properties producerConfig = new Properties();
    producerConfig.put("bootstrap.servers", "localhost:9092");
    producerConfig.put("key.serializer", 
        "org.apache.kafka.common.serialization.StringSerializer");
    producerConfig.put("value.serializer",
        "org.apache.kafka.common.serialization.LongSerializer");

    KafkaProducer<String, Long> producer = 
        new KafkaProducer<String, Long>(producerConfig);

    Random rng = new Random(12345L);
    String[] keys = {"A"}; // Can change to make a more complicated example.
    Long[] values = {1L, 2L, 3L};

    try {

        while(true) {
            String key = keys[rng.nextInt(keys.length)];
            Long value = values[rng.nextInt(values.length)];
            producer.send(
                new ProducerRecord<String, Long>("longs-table", 
                                                 key, 
                                                 value));
            Thread.sleep(1000L);
        } // Close while loop for generating the data. 
    
    } catch(InterruptedException e) {
        producer.close();
    } // Close try/catch around data production.
}
 
Example 16
Source File: StreamsBuilderSmokeTest.java    From kafka-junit with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Integration test validates that streams can be used against KafkaTestServer.
 */
@Test
void testStreamConsumer() throws Exception {
    // Define topic to test with.
    final String inputTopic = "stream-input-topic" + System.currentTimeMillis();
    final String outputTopic = "stream-output-topic" + System.currentTimeMillis();

    // Define how many records
    final int numberOfRecords = 25;
    final int partitionId = 0;

    // Tracks how many records the Stream consumer has processed.
    final AtomicInteger recordCounter = new AtomicInteger(0);

    // Create our test server instance.
    try (final KafkaTestServer kafkaTestServer = new KafkaTestServer()) {
        // Start it and create our topic.
        kafkaTestServer.start();

        // Create test utils instance.
        final KafkaTestUtils kafkaTestUtils = new KafkaTestUtils(kafkaTestServer);

        // Create topics
        kafkaTestUtils.createTopic(inputTopic, 1, (short) 1);
        kafkaTestUtils.createTopic(outputTopic, 1, (short) 1);

        // Produce random data into input topic
        kafkaTestUtils.produceRecords(numberOfRecords, inputTopic, partitionId);

        // Define stream consumer properties.
        final Properties config = new Properties();
        config.put(StreamsConfig.APPLICATION_ID_CONFIG, "testStreamProcessor");
        config.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaTestServer.getKafkaConnectString());
        config.put("group.id", "test-stream-group");
        config.put("auto.offset.reset", "earliest");

        // Build the stream
        final StreamsBuilder streamsBuilder = new StreamsBuilder();
        streamsBuilder
            // Read from input topic.
            .stream(inputTopic)

            // For each record processed, increment our counter
            .map((key, word) -> {
                recordCounter.incrementAndGet();
                return new KeyValue<>(word, word);
            })

            // Write to output topic.
            .to(outputTopic);

        // Create stream
        final KafkaStreams kafkaStreams = new KafkaStreams(streamsBuilder.build(), new StreamsConfig(config));
        try {
            // Start the stream consumer
            kafkaStreams.start();

            // Since stream processing is async, we need to wait for the Stream processor to start, consume messages
            // from the input topic, and process them. We'll wait for Wait for it to do its thing up to 10 seconds.
            for (int timeoutCounter = 0; timeoutCounter <= 10; timeoutCounter++) {
                // If we've processed all of our records
                if (recordCounter.get() >= numberOfRecords) {
                    // Break out of sleep loop.
                    break;
                }
                // Otherwise, we need to wait longer, sleep 1 second.
                Thread.sleep(1000L);
            }
        } finally {
            // Close the stream consumer.
            kafkaStreams.close();
        }

        // Validation.
        Assertions.assertEquals(numberOfRecords, recordCounter.get(), "Should have 25 records processed");

        // Consume records from output topic.
        final List<ConsumerRecord<String, String>> outputRecords =
            kafkaTestUtils.consumeAllRecordsFromTopic(outputTopic, StringDeserializer.class, StringDeserializer.class);

        // Validate we got the correct number of records.
        Assertions.assertEquals(numberOfRecords, outputRecords.size());
    }
}
 
Example 17
Source File: KafkaStreamsYellingIntegrationTest.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldYellFromMultipleTopics() throws Exception {

    StreamsBuilder streamsBuilder = new StreamsBuilder();

    streamsBuilder.<String, String>stream(Pattern.compile("yell.*"))
            .mapValues(String::toUpperCase)
            .to(OUT_TOPIC);

    kafkaStreams = new KafkaStreams(streamsBuilder.build(), streamsConfig);
    kafkaStreams.start();

    List<String> valuesToSendList = Arrays.asList("this", "should", "yell", "at", "you");
    List<String> expectedValuesList = valuesToSendList.stream()
                                                      .map(String::toUpperCase)
                                                      .collect(Collectors.toList());

    IntegrationTestUtils.produceValuesSynchronously(YELL_A_TOPIC,
                                                    valuesToSendList,
                                                    producerConfig,
                                                    mockTime);
    int expectedNumberOfRecords = 5;
    List<String> actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig,
                                                                                       OUT_TOPIC,
                                                                                       expectedNumberOfRecords);

    assertThat(actualValues, equalTo(expectedValuesList));

    EMBEDDED_KAFKA.createTopic(YELL_B_TOPIC);

    valuesToSendList = Arrays.asList("yell", "at", "you", "too");
    IntegrationTestUtils.produceValuesSynchronously(YELL_B_TOPIC,
                                                    valuesToSendList,
                                                    producerConfig,
                                                    mockTime);

    expectedValuesList = valuesToSendList.stream().map(String::toUpperCase).collect(Collectors.toList());

    expectedNumberOfRecords = 4;
    actualValues = IntegrationTestUtils.waitUntilMinValuesRecordsReceived(consumerConfig,
                                                                          OUT_TOPIC,
                                                                          expectedNumberOfRecords);

    assertThat(actualValues, equalTo(expectedValuesList));

}
 
Example 18
Source File: CountingWindowApp.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 4 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
    props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreName);

    StreamsBuilder streamsBuilder = new StreamsBuilder();
    KStream<String, SimpleInvoice> KS0 = streamsBuilder.stream(AppConfigs.posTopicName,
        Consumed.with(AppSerdes.String(), AppSerdes.SimpleInvoice())
            .withTimestampExtractor(new InvoiceTimeExtractor())
    );

    KGroupedStream<String, SimpleInvoice> KS1 = KS0.groupByKey(
        Grouped.with(AppSerdes.String(),
            AppSerdes.SimpleInvoice()));

    TimeWindowedKStream<String, SimpleInvoice> KS2 = KS1.windowedBy(
        TimeWindows.of(Duration.ofMinutes(5))
        //.grace(Duration.ofMillis(100))
    );

    KTable<Windowed<String>, Long> KT3 = KS2.count(
        //Materialized is not needed if you don't want to override defaults
        Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("invoice-count")
        //.withRetention(Duration.ofHours(6))
    );

    //Suppress is only available in 2.1, Checkout 2.1 branch
    //.suppress(untilWindowCloses(unbounded()));


    KT3.toStream().foreach(
        (kWindowed, v) -> logger.info(
            "StoreID: " + kWindowed.key() +
                " Window start: " +
                Instant.ofEpochMilli(kWindowed.window().start())
                    .atOffset(ZoneOffset.UTC) +
                " Window end: " +
                Instant.ofEpochMilli(kWindowed.window().end())
                    .atOffset(ZoneOffset.UTC) +
                " Count: " + v +
                " Window#: " + kWindowed.window().hashCode()

        ));

    KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
Example 19
Source File: CustomSinkApp.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 4 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
    props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation);

    StreamsBuilder streamsBuilder = new StreamsBuilder();

    //Global table for mappings
    GlobalKTable<String, TableMap> topicTableMapGlobalKTable = streamsBuilder.globalTable(
        AppConfigs.topicTableMap,
        Consumed.with(AppSerdes.String(),
            AppSerdes.TableMap()));

    //Stream of Records
    KStream<String, GenericRecord> recordKStream = streamsBuilder.stream(
        AppConfigs.topicPattern,
        Consumed.with(AppSerdes.String(),
            AppSerdes.GenericRecord())
    ).transform(() -> new RecordTransformer());

    //Join to get Target Table Name
    KStream<String, GenericRecord> joinedKStream = recordKStream.join(
        topicTableMapGlobalKTable,
        (keyGenericRecord, valueGenericRecord) -> keyGenericRecord,
        (valueGenericRecord, valueTableMap) -> {
            valueGenericRecord.setAdditionalProperty(
                AppConfigs.targetTableField,
                valueTableMap.getTargetTable());
            return valueGenericRecord;
        }
    );

    //Change key to target table name and cleanup record
    KStream<String, GenericRecord> sinkRecord = joinedKStream.selectKey(
        (k, v) -> {
            String newKey = v.getAdditionalProperties()
                .get(AppConfigs.targetTableField);
            v.getAdditionalProperties().remove(AppConfigs.targetTableField);
            return newKey;
        }).peek((k, v) -> logger.info("Ready to Sink key= " + k + " value= " + v));

    //Sink to Target Database
    sinkRecord.process(() -> new SinkProcessor());

    //Start the stream and add a shutdown hook
    KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
Example 20
Source File: KStreamVsKTableExample.java    From kafka-streams-in-action with Apache License 2.0 3 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());

        StreamsBuilder builder = new StreamsBuilder();


        KTable<String, StockTickerData> stockTickerTable = builder.table(STOCK_TICKER_TABLE_TOPIC);
        KStream<String, StockTickerData> stockTickerStream = builder.stream(STOCK_TICKER_STREAM_TOPIC);

        stockTickerTable.toStream().print(Printed.<String, StockTickerData>toSysOut().withLabel("Stocks-KTable"));
        stockTickerStream.print(Printed.<String, StockTickerData>toSysOut().withLabel( "Stocks-KStream"));

        int numberCompanies = 3;
        int iterations = 3;

        MockDataProducer.produceStockTickerData(numberCompanies, iterations);

        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        LOG.info("KTable vs KStream output started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(15000);
        LOG.info("Shutting down KTable vs KStream Application now");
        kafkaStreams.close();
        MockDataProducer.shutdown();

    }