Java Code Examples for org.apache.kafka.streams.KafkaStreams#setStateListener()

The following examples show how to use org.apache.kafka.streams.KafkaStreams#setStateListener() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaStreamsStarter.java    From football-events with MIT License 5 votes vote down vote up
private void startStreams(KafkaStreams kafkaStreams) {
    CountDownLatch streamsStartedLatch = new CountDownLatch(1);

    // wait for consistent state
    kafkaStreams.setStateListener((newState, oldState) -> {
        logger.trace("Kafka Streams state has been changed from {} to {}", oldState, newState);

        if (oldState == KafkaStreams.State.REBALANCING && newState == KafkaStreams.State.RUNNING) {
            streamsStartedLatch.countDown();
        }
    });
    kafkaStreams.cleanUp();
    kafkaStreams.start();
    long timeout = System.currentTimeMillis() + streamsStartupTimeout;

    try {
        streamsStartedLatch.await(timeout - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupted();
    }

    KafkaStreams.State state = kafkaStreams.state();

    if (state != KafkaStreams.State.RUNNING) {
        logger.error("Unable to start Kafka Streams in {} ms, the current state is {}",
                streamsStartupTimeout, state);
        System.exit(1);
    }
}
 
Example 2
Source File: KafkaStreamsService.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Override
protected final void doStart() {
    _streams = new KafkaStreams(topology(), _streamsConfiguration);
    _streams.setUncaughtExceptionHandler((thread, throwable) -> {
        _uncaughtException.compareAndSet(null, throwable);
        _fatalErrorEncountered.set(true);
        _streamsExceptionMeter.mark();
        _streams.close(Duration.ofMillis(1));
    });
    _streams.setStateListener(this);
    _streams.start();
    notifyStarted();
}
 
Example 3
Source File: StockPerformanceInteractiveQueryApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {

        if(args.length < 2){
            LOG.error("Need to specify host, port");
            System.exit(1);
        }

        String host = args[0];
        int port = Integer.parseInt(args[1]);
        final HostInfo hostInfo = new HostInfo(host, port);

        Properties properties = getProperties();
        properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, host+":"+port);

        StreamsConfig streamsConfig = new StreamsConfig(properties);
        Serde<String> stringSerde = Serdes.String();
        Serde<Long> longSerde = Serdes.Long();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        WindowedSerializer<String> windowedSerializer = new WindowedSerializer<>(stringSerde.serializer());
        WindowedDeserializer<String> windowedDeserializer = new WindowedDeserializer<>(stringSerde.deserializer());
        Serde<Windowed<String>> windowedSerde = Serdes.serdeFrom(windowedSerializer, windowedDeserializer);
        Serde<CustomerTransactions> customerTransactionsSerde = StreamsSerdes.CustomerTransactionsSerde();

        Aggregator<String, StockTransaction, Integer> sharesAggregator = (k, v, i) -> v.getShares() + i;

        StreamsBuilder builder = new StreamsBuilder();

        // data is already coming in keyed
        KStream<String, StockTransaction> stockTransactionKStream = builder.stream(MockDataProducer.STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, stockTransactionSerde)
                .withOffsetResetPolicy(Topology.AutoOffsetReset.LATEST));


        stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getSector(), v))
                .groupByKey(Serialized.with(stringSerde, stockTransactionSerde))
                .count(Materialized.as("TransactionsBySector"))
                .toStream()
                .peek((k,v) -> LOG.info("Transaction count for {} {}", k, v))
                .to("sector-transaction-counts", Produced.with(stringSerde, longSerde));
        
        stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getCustomerId(), v))
                .groupByKey(Serialized.with(stringSerde, stockTransactionSerde))
                .windowedBy(SessionWindows.with(TimeUnit.MINUTES.toMillis(60)).until(TimeUnit.MINUTES.toMillis(120)))
                .aggregate(CustomerTransactions::new,(k, v, ct) -> ct.update(v),
                        (k, ct, other)-> ct.merge(other),
                        Materialized.<String, CustomerTransactions, SessionStore<Bytes, byte[]>>as("CustomerPurchaseSessions")
                                .withKeySerde(stringSerde).withValueSerde(customerTransactionsSerde))
                .toStream()
                .peek((k,v) -> LOG.info("Session info for {} {}", k, v))
                .to("session-transactions", Produced.with(windowedSerde, customerTransactionsSerde));


        stockTransactionKStream.groupByKey(Serialized.with(stringSerde, stockTransactionSerde))
                .windowedBy(TimeWindows.of(10000))
                .aggregate(() -> 0, sharesAggregator,
                        Materialized.<String, Integer, WindowStore<Bytes, byte[]>>as("NumberSharesPerPeriod")
                                .withKeySerde(stringSerde)
                                .withValueSerde(Serdes.Integer()))
                .toStream().peek((k,v)->LOG.info("key is {} value is {}", k, v))
                .to("transaction-count", Produced.with(windowedSerde,Serdes.Integer()));


        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        InteractiveQueryServer queryServer = new InteractiveQueryServer(kafkaStreams, hostInfo);
        StateRestoreHttpReporter restoreReporter = new StateRestoreHttpReporter(queryServer);

        queryServer.init();

        kafkaStreams.setGlobalStateRestoreListener(restoreReporter);

        kafkaStreams.setStateListener(((newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
                LOG.info("Setting the query server to ready");
                queryServer.setReady(true);
            } else if (newState != KafkaStreams.State.RUNNING) {
                LOG.info("State not RUNNING, disabling the query server");
                queryServer.setReady(false);
            }
        }));

        kafkaStreams.setUncaughtExceptionHandler((t, e) -> {
            LOG.error("Thread {} had a fatal error {}", t, e, e);
            shutdown(kafkaStreams, queryServer);
        });


        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            shutdown(kafkaStreams, queryServer);
        }));

        LOG.info("Stock Analysis KStream Interactive Query App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
    }
 
Example 4
Source File: CoGroupingListeningExampleApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms","120000" );
        changeLogConfigs.put("cleanup.policy", "compact,delete");

        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> builder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple);



        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource( "Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(builder.withLoggingEnabled(changeLogConfigs), "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        kafkaStreams.setGlobalStateRestoreListener(new LoggingStateRestoreListener());

        kafkaStreams.setUncaughtExceptionHandler((thread, exception) ->
            LOG.error("Thread [{}] encountered [{}]", thread.getName(), exception.getMessage())
        );

        kafkaStreams.setStateListener((newState, oldState) -> {
           if (oldState == KafkaStreams.State.REBALANCING && newState== KafkaStreams.State.RUNNING) {
               LOG.info("Topology Layout {}", topology.describe());
               LOG.info("Thread metadata {}", kafkaStreams.localThreadsMetadata());
           }
        });


        LOG.info("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();

        Thread.sleep(70000);
        LOG.info("Shutting down the Co-Grouping metrics App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example 5
Source File: KTableAggDemo.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 4 votes vote down vote up
public static void main(String[] args) {

        if (args.length < 2) {
            System.out.println("Please provide command line arguments: hostname port");
            System.exit(-1);
        }
        String hostName = args[0];
        int portNumber = Integer.parseInt(args[1]);

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
        props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation);

        props.put(StreamsConfig.APPLICATION_SERVER_CONFIG, hostName
            + ":"
            + portNumber);

        StreamsBuilder streamsBuilder = new StreamsBuilder();
        KTable<String, Employee> KS0 = streamsBuilder.table(AppConfigs.topicName,
            Consumed.with(AppSerdes.String(), AppSerdes.Employee()));

        KGroupedTable<String, Employee> KGS1 = KS0.groupBy(
            (k, v) -> KeyValue.pair(v.getDepartment(), v),
            Grouped.with(AppSerdes.String(), AppSerdes.Employee()));

        KTable<String, DepartmentAggregate> KT2 = KGS1.aggregate(
            //Initializer
            () -> new DepartmentAggregate()
                .withEmployeeCount(0)
                .withTotalSalary(0)
                .withAvgSalary(0D),
            //Adder
            (k, v, aggV) -> new DepartmentAggregate()
                .withEmployeeCount(aggV.getEmployeeCount() + 1)
                .withTotalSalary(aggV.getTotalSalary() + v.getSalary())
                .withAvgSalary((aggV.getTotalSalary() + v.getSalary())
                    / (aggV.getEmployeeCount() + 1D)),
            //subtract
            (k, v, aggV) -> new DepartmentAggregate()
                .withEmployeeCount(aggV.getEmployeeCount() - 1)
                .withTotalSalary(aggV.getTotalSalary() - v.getSalary())
                .withAvgSalary((aggV.getTotalSalary() - v.getSalary())
                    / (aggV.getEmployeeCount() - 1D)),
            Materialized.<String, DepartmentAggregate, KeyValueStore<Bytes,
                byte[]>>as(AppConfigs.aggStateStoreName)
                .withKeySerde(AppSerdes.String())
                .withValueSerde(AppSerdes.DepartmentAggregate())
        );

        KT2.toStream().foreach(
            (k, v) -> System.out.println("Key = " + k + " Value = " + v.toString()));

        KafkaStreams streams = new KafkaStreams(
            streamsBuilder.build(), props);

        AppRestService queryServer = new AppRestService(
            streams,
            hostName,
            portNumber);

        streams.setStateListener((newState, oldState) -> {
            logger.info("State Changing to " + newState + " from " + oldState);
            queryServer.setActive(
                newState == KafkaStreams.State.RUNNING &&
                    oldState == KafkaStreams.State.REBALANCING
            );
        });

        streams.start();
        queryServer.start();

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            queryServer.stop();
            streams.close();
        }));
    }
 
Example 6
Source File: RuntimeVerticle.java    From kiqr with Apache License 2.0 3 votes vote down vote up
protected  KafkaStreams createAndStartStream(){
    streams = new KafkaStreams(builder, props);



    streams.setStateListener(((newState, oldState) -> {

        vertx.eventBus().publish(Config.CLUSTER_STATE_BROADCAST_ADDRESS, newState.toString());
        LOG.info("State change in KafkaStreams recorded: oldstate=" + oldState +  ", newstate=" + newState);
        if(listener != null) listener.onChange(newState, oldState);
    }));

    streams.start();
    return streams;
}
 
Example 7
Source File: ZMartKafkaStreamsAdvancedReqsMetricsApp.java    From kafka-streams-in-action with Apache License 2.0 2 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());

        Serde<Purchase> purchaseSerde = StreamsSerdes.PurchaseSerde();
        Serde<PurchasePattern> purchasePatternSerde = StreamsSerdes.PurchasePatternSerde();
        Serde<RewardAccumulator> rewardAccumulatorSerde = StreamsSerdes.RewardAccumulatorSerde();
        Serde<String> stringSerde = Serdes.String();

        StreamsBuilder streamsBuilder = new StreamsBuilder();


        /**
         * Previous requirements
         */
        KStream<String,Purchase> purchaseKStream = streamsBuilder.stream("transactions", Consumed.with(stringSerde, purchaseSerde))
                .mapValues(p -> Purchase.builder(p).maskCreditCard().build());

        KStream<String, PurchasePattern> patternKStream = purchaseKStream.mapValues(purchase -> PurchasePattern.builder(purchase).build());

        patternKStream.to("patterns", Produced.with(stringSerde,purchasePatternSerde));


        KStream<String, RewardAccumulator> rewardsKStream = purchaseKStream.mapValues(purchase -> RewardAccumulator.builder(purchase).build());

        rewardsKStream.to("rewards", Produced.with(stringSerde,rewardAccumulatorSerde));


        /**
         *  Selecting a key for storage and filtering out low dollar purchases
         */

        KeyValueMapper<String, Purchase, Long> purchaseDateAsKey = (key, purchase) -> purchase.getPurchaseDate().getTime();

        KStream<Long, Purchase> filteredKStream = purchaseKStream.filter((key, purchase) -> purchase.getPrice() > 5.00).selectKey(purchaseDateAsKey);

        filteredKStream.to("purchases", Produced.with(Serdes.Long(),purchaseSerde));


        /**
         * Branching stream for separating out purchases in new departments to their own topics
         */
        Predicate<String, Purchase> isCoffee = (key, purchase) -> purchase.getDepartment().equalsIgnoreCase("coffee");
        Predicate<String, Purchase> isElectronics = (key, purchase) -> purchase.getDepartment().equalsIgnoreCase("electronics");

        int coffee = 0;
        int electronics = 1;

        KStream<String, Purchase>[] kstreamByDept = purchaseKStream.branch(isCoffee, isElectronics);

        kstreamByDept[coffee].to("coffee", Produced.with(stringSerde, purchaseSerde));

        kstreamByDept[electronics].to("electronics", Produced.with(stringSerde, purchaseSerde));



        /**
         * Security Requirements to record transactions for certain employee
         */
        ForeachAction<String, Purchase> purchaseForeachAction = (key, purchase) -> { };

        
        purchaseKStream.filter((key, purchase) -> purchase.getEmployeeId().equals("000000")).foreach(purchaseForeachAction);

        Topology topology = streamsBuilder.build();


        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);

        KafkaStreams.StateListener stateListener = (newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
                LOG.info("Application has gone from REBALANCING to RUNNING ");
                LOG.info("Topology Layout {}", streamsBuilder.build().describe());
            }

            if (newState == KafkaStreams.State.REBALANCING) {
                LOG.info("Application is entering REBALANCING phase");
            }
        };

        kafkaStreams.setStateListener(stateListener);
        LOG.info("ZMart Advanced Requirements Metrics Application Started");
        kafkaStreams.cleanUp();
        CountDownLatch stopSignal = new CountDownLatch(1);

        Runtime.getRuntime().addShutdownHook(new Thread(()-> {
            LOG.info("Shutting down the Kafka Streams Application now");
            kafkaStreams.close();
            MockDataProducer.shutdown();
            stopSignal.countDown();
        }));



        MockDataProducer.producePurchaseData(DataGenerator.DEFAULT_NUM_PURCHASES, 250, DataGenerator.NUMBER_UNIQUE_CUSTOMERS);
        kafkaStreams.start();

        stopSignal.await();
        LOG.info("All done now, good-bye");
    }