Java Code Examples for org.apache.kafka.common.serialization.Serde#deserializer()

The following examples show how to use org.apache.kafka.common.serialization.Serde#deserializer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ZMartProcessorApp.java    From kafka-streams-in-action with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    MockDataProducer.producePurchaseData();


    StreamsConfig streamsConfig = new StreamsConfig(getProperties());
    Deserializer<String> stringDeserializer = Serdes.String().deserializer();
    Serializer<String> stringSerializer = Serdes.String().serializer();
    Serde<Purchase> purchaseSerde = StreamsSerdes.PurchaseSerde();
    Deserializer<Purchase> purchaseDeserializer = purchaseSerde.deserializer();
    Serializer<Purchase> purchaseSerializer = purchaseSerde.serializer();
    Serializer<PurchasePattern> patternSerializer = StreamsSerdes.PurchasePatternSerde().serializer();
    Serializer<RewardAccumulator> rewardsSerializer = StreamsSerdes.RewardAccumulatorSerde().serializer();

    Topology topology = new Topology();

    topology.addSource("txn-source", stringDeserializer, purchaseDeserializer, "transactions")
            .addProcessor("masking-processor",
                    () -> new MapValueProcessor<String, Purchase, Purchase>(p -> Purchase.builder(p).maskCreditCard().build()), "txn-source")
            .addProcessor("rewards-processor",
                    () -> new MapValueProcessor<String, Purchase, RewardAccumulator>(purchase -> RewardAccumulator.builder(purchase).build()), "txn-source")
            .addProcessor("patterns-processor",
                    () -> new MapValueProcessor<String, Purchase, PurchasePattern>(purchase -> PurchasePattern.builder(purchase).build()), "txn-source")
            .addSink("purchase-sink", "purchases", stringSerializer, purchaseSerializer, "masking-processor")
            .addSink("rewards-sink", "rewards", stringSerializer, rewardsSerializer, "rewards-processor")
            .addSink("patterns-sink", "patterns", stringSerializer, patternSerializer, "patterns-processor");


    topology.addProcessor("purchase-printer", new KStreamPrinter("purchase"), "masking-processor")
            .addProcessor("rewards-printer", new KStreamPrinter("rewards"), "rewards-processor")
            .addProcessor("patterns-printer", new KStreamPrinter("pattens"), "patterns-processor");

    KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
    System.out.println("ZMart Processor App Started");
    kafkaStreams.start();
    Thread.sleep(35000);
    System.out.println("Shutting down the ZMart Processor App now");
    kafkaStreams.close();
    MockDataProducer.shutdown();
}
 
Example 2
Source File: StockPerformanceApplication.java    From kafka-streams-in-action with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde();
        Serializer<StockPerformance> stockPerformanceSerializer = stockPerformanceSerde.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();


        Topology topology = new Topology();
        String stocksStateStore = "stock-performance-store";
        double differentialThreshold = 0.02;

        KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore(stocksStateStore);
        StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde);


        topology.addSource("stocks-source", stringDeserializer, stockTransactionDeserializer,"stock-transactions")
                .addProcessor("stocks-processor", () -> new StockPerformanceProcessor(stocksStateStore, differentialThreshold), "stocks-source")
                .addStateStore(storeBuilder,"stocks-processor")
                .addSink("stocks-sink", "stock-performance", stringSerializer, stockPerformanceSerializer, "stocks-processor");


        topology.addProcessor("stocks-printer", new KStreamPrinter("StockPerformance"), "stocks-processor");

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        MockDataProducer.produceStockTransactionsWithKeyFunction(50,50, 25, StockTransaction::getSymbol);
        System.out.println("Stock Analysis App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Stock Analysis App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example 3
Source File: KafkaConsumerRunner.java    From simplesource with Apache License 2.0 5 votes vote down vote up
RunnableConsumer(
        Properties consumerConfig,
        Serde<R> responseSerde,
        String topicName,
        BiConsumer<KR, R> receiver,
        Function<UUID, KR> idConverter) {
    this.topicName = topicName;
    this.receiver = receiver;
    this.idConverter = idConverter;
    consumer = new KafkaConsumer<>(consumerConfig, Serdes.String().deserializer(), responseSerde.deserializer());
}
 
Example 4
Source File: CodecsTest.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
private <T> void testSerializer(Class<T> type, T val) {
  final Serde<T> serde = VertxSerdes.serdeFrom(type);
  final Deserializer<T> deserializer = serde.deserializer();
  final Serializer<T> serializer = serde.serializer();

  assertEquals("Should get the original value after serialization and deserialization",
    val, deserializer.deserialize(topic, serializer.serialize(topic, val)));

  assertEquals("Should support null in serialization and deserialization",
    null, deserializer.deserialize(topic, serializer.serialize(topic, null)));
}
 
Example 5
Source File: CogroupingStreamsTest.java    From kafka-tutorials with Apache License 2.0 4 votes vote down vote up
@Test
public void cogroupingTest() throws IOException {
    final CogroupingStreams instance = new CogroupingStreams();
    final Properties envProps = instance.loadEnvProperties(TEST_CONFIG_FILE);

    final Properties streamProps = instance.buildStreamsProperties(envProps);

    final String appOneInputTopicName = envProps.getProperty("app-one.topic.name");
    final String appTwoInputTopicName = envProps.getProperty("app-two.topic.name");
    final String appThreeInputTopicName = envProps.getProperty("app-three.topic.name");
    final String totalResultOutputTopicName = envProps.getProperty("output.topic.name");
  
    final Topology topology = instance.buildTopology(envProps);
    try (final TopologyTestDriver testDriver = new TopologyTestDriver(topology, streamProps)) {

        final Serde<String> stringAvroSerde = CogroupingStreams.getPrimitiveAvroSerde(envProps, true);
        final SpecificAvroSerde<LoginEvent> loginEventSerde = CogroupingStreams.getSpecificAvroSerde(envProps);
        final SpecificAvroSerde<LoginRollup> rollupSerde = CogroupingStreams.getSpecificAvroSerde(envProps);

        final Serializer<String> keySerializer = stringAvroSerde.serializer();
        final Deserializer<String> keyDeserializer = stringAvroSerde.deserializer();
        final Serializer<LoginEvent> loginEventSerializer = loginEventSerde.serializer();


        final TestInputTopic<String, LoginEvent>  appOneInputTopic = testDriver.createInputTopic(appOneInputTopicName, keySerializer, loginEventSerializer);
        final TestInputTopic<String, LoginEvent>  appTwoInputTopic = testDriver.createInputTopic(appTwoInputTopicName, keySerializer, loginEventSerializer);
        final TestInputTopic<String, LoginEvent>  appThreeInputTopic = testDriver.createInputTopic(appThreeInputTopicName, keySerializer, loginEventSerializer);

        final TestOutputTopic<String, LoginRollup> outputTopic = testDriver.createOutputTopic(totalResultOutputTopicName, keyDeserializer, rollupSerde.deserializer());


        final List<LoginEvent> appOneEvents = new ArrayList<>();
        appOneEvents.add(LoginEvent.newBuilder().setAppId("one").setUserId("foo").setTime(5L).build());
        appOneEvents.add(LoginEvent.newBuilder().setAppId("one").setUserId("bar").setTime(6l).build());
        appOneEvents.add(LoginEvent.newBuilder().setAppId("one").setUserId("bar").setTime(7L).build());

        final List<LoginEvent> appTwoEvents = new ArrayList<>();
        appTwoEvents.add(LoginEvent.newBuilder().setAppId("two").setUserId("foo").setTime(5L).build());
        appTwoEvents.add(LoginEvent.newBuilder().setAppId("two").setUserId("foo").setTime(6l).build());
        appTwoEvents.add(LoginEvent.newBuilder().setAppId("two").setUserId("bar").setTime(7L).build());

        final List<LoginEvent> appThreeEvents = new ArrayList<>();
        appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("foo").setTime(5L).build());
        appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("foo").setTime(6l).build());
        appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("bar").setTime(7L).build());
        appThreeEvents.add(LoginEvent.newBuilder().setAppId("three").setUserId("bar").setTime(9L).build());

        final Map<String, Map<String, Long>> expectedEventRollups = new TreeMap<>();
        final Map<String, Long> expectedAppOneRollup = new HashMap<>();
        final LoginRollup expectedLoginRollup = new LoginRollup(expectedEventRollups);
        expectedAppOneRollup.put("foo", 1L);
        expectedAppOneRollup.put("bar", 2L);
        expectedEventRollups.put("one", expectedAppOneRollup);

        final Map<String, Long> expectedAppTwoRollup = new HashMap<>();
        expectedAppTwoRollup.put("foo", 2L);
        expectedAppTwoRollup.put("bar", 1L);
        expectedEventRollups.put("two", expectedAppTwoRollup);

        final Map<String, Long> expectedAppThreeRollup = new HashMap<>();
        expectedAppThreeRollup.put("foo", 2L);
        expectedAppThreeRollup.put("bar", 2L);
        expectedEventRollups.put("three", expectedAppThreeRollup);

        sendEvents(appOneEvents, appOneInputTopic);
        sendEvents(appTwoEvents, appTwoInputTopic);
        sendEvents(appThreeEvents, appThreeInputTopic);

        final List<LoginRollup> actualLoginEventResults = outputTopic.readValuesToList();
        final Map<String, Map<String, Long>> actualRollupMap = new HashMap<>();
        for (LoginRollup actualLoginEventResult : actualLoginEventResults) {
              actualRollupMap.putAll(actualLoginEventResult.getLoginByAppAndUser());
        }
        final LoginRollup actualLoginRollup = new LoginRollup(actualRollupMap);

        assertEquals(expectedLoginRollup, actualLoginRollup);
    }
}
 
Example 6
Source File: StockPerformanceInteractiveQueryApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {

        if(args.length < 2){
            LOG.error("Need to specify host, port");
            System.exit(1);
        }

        String host = args[0];
        int port = Integer.parseInt(args[1]);
        final HostInfo hostInfo = new HostInfo(host, port);

        Properties properties = getProperties();
        properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, host+":"+port);

        StreamsConfig streamsConfig = new StreamsConfig(properties);
        Serde<String> stringSerde = Serdes.String();
        Serde<Long> longSerde = Serdes.Long();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        WindowedSerializer<String> windowedSerializer = new WindowedSerializer<>(stringSerde.serializer());
        WindowedDeserializer<String> windowedDeserializer = new WindowedDeserializer<>(stringSerde.deserializer());
        Serde<Windowed<String>> windowedSerde = Serdes.serdeFrom(windowedSerializer, windowedDeserializer);
        Serde<CustomerTransactions> customerTransactionsSerde = StreamsSerdes.CustomerTransactionsSerde();

        Aggregator<String, StockTransaction, Integer> sharesAggregator = (k, v, i) -> v.getShares() + i;

        StreamsBuilder builder = new StreamsBuilder();

        // data is already coming in keyed
        KStream<String, StockTransaction> stockTransactionKStream = builder.stream(MockDataProducer.STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, stockTransactionSerde)
                .withOffsetResetPolicy(Topology.AutoOffsetReset.LATEST));


        stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getSector(), v))
                .groupByKey(Serialized.with(stringSerde, stockTransactionSerde))
                .count(Materialized.as("TransactionsBySector"))
                .toStream()
                .peek((k,v) -> LOG.info("Transaction count for {} {}", k, v))
                .to("sector-transaction-counts", Produced.with(stringSerde, longSerde));
        
        stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getCustomerId(), v))
                .groupByKey(Serialized.with(stringSerde, stockTransactionSerde))
                .windowedBy(SessionWindows.with(TimeUnit.MINUTES.toMillis(60)).until(TimeUnit.MINUTES.toMillis(120)))
                .aggregate(CustomerTransactions::new,(k, v, ct) -> ct.update(v),
                        (k, ct, other)-> ct.merge(other),
                        Materialized.<String, CustomerTransactions, SessionStore<Bytes, byte[]>>as("CustomerPurchaseSessions")
                                .withKeySerde(stringSerde).withValueSerde(customerTransactionsSerde))
                .toStream()
                .peek((k,v) -> LOG.info("Session info for {} {}", k, v))
                .to("session-transactions", Produced.with(windowedSerde, customerTransactionsSerde));


        stockTransactionKStream.groupByKey(Serialized.with(stringSerde, stockTransactionSerde))
                .windowedBy(TimeWindows.of(10000))
                .aggregate(() -> 0, sharesAggregator,
                        Materialized.<String, Integer, WindowStore<Bytes, byte[]>>as("NumberSharesPerPeriod")
                                .withKeySerde(stringSerde)
                                .withValueSerde(Serdes.Integer()))
                .toStream().peek((k,v)->LOG.info("key is {} value is {}", k, v))
                .to("transaction-count", Produced.with(windowedSerde,Serdes.Integer()));


        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        InteractiveQueryServer queryServer = new InteractiveQueryServer(kafkaStreams, hostInfo);
        StateRestoreHttpReporter restoreReporter = new StateRestoreHttpReporter(queryServer);

        queryServer.init();

        kafkaStreams.setGlobalStateRestoreListener(restoreReporter);

        kafkaStreams.setStateListener(((newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
                LOG.info("Setting the query server to ready");
                queryServer.setReady(true);
            } else if (newState != KafkaStreams.State.RUNNING) {
                LOG.info("State not RUNNING, disabling the query server");
                queryServer.setReady(false);
            }
        }));

        kafkaStreams.setUncaughtExceptionHandler((t, e) -> {
            LOG.error("Thread {} had a fatal error {}", t, e, e);
            shutdown(kafkaStreams, queryServer);
        });


        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            shutdown(kafkaStreams, queryServer);
        }));

        LOG.info("Stock Analysis KStream Interactive Query App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
    }
 
Example 7
Source File: PopsHopsApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<BeerPurchase> beerPurchaseDeserializer = new JsonDeserializer<>(BeerPurchase.class);
        Serde<String> stringSerde = Serdes.String();
        Deserializer<String> stringDeserializer = stringSerde.deserializer();
        Serializer<String> stringSerializer = stringSerde.serializer();
        Serializer<BeerPurchase> beerPurchaseSerializer = new JsonSerializer<>();

        Topology toplogy = new Topology();

        String domesticSalesSink = "domestic-beer-sales";
        String internationalSalesSink = "international-beer-sales";
        String purchaseSourceNodeName = "beer-purchase-source";
        String purchaseProcessor = "purchase-processor";


        BeerPurchaseProcessor beerProcessor = new BeerPurchaseProcessor(domesticSalesSink, internationalSalesSink);

        toplogy.addSource(LATEST,
                          purchaseSourceNodeName,
                          new UsePreviousTimeOnInvalidTimestamp(),
                          stringDeserializer,
                          beerPurchaseDeserializer,
                          Topics.POPS_HOPS_PURCHASES.topicName())
                .addProcessor(purchaseProcessor,
                              () -> beerProcessor,
                              purchaseSourceNodeName);

                //Uncomment these two lines and comment out the printer lines for writing to topics
               // .addSink(internationalSalesSink,"international-sales", stringSerializer, beerPurchaseSerializer, purchaseProcessor)
               // .addSink(domesticSalesSink,"domestic-sales", stringSerializer, beerPurchaseSerializer, purchaseProcessor);

        //You'll have to comment these lines out if you want to write to topics as they have the same node names
        toplogy.addProcessor(domesticSalesSink,
                            new KStreamPrinter("domestic-sales"),
                            purchaseProcessor );

        toplogy.addProcessor(internationalSalesSink,
                             new KStreamPrinter("international-sales"),
                             purchaseProcessor );

        KafkaStreams kafkaStreams = new KafkaStreams(toplogy, streamsConfig);
        MockDataProducer.produceBeerPurchases(5);
        System.out.println("Starting Pops-Hops Application now");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down Pops-Hops Application  now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example 8
Source File: CoGroupingApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms", "120000");
        changeLogConfigs.put("cleanup.policy", "compact,delete");


        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> storeBuilder =
                Stores.keyValueStoreBuilder(storeSupplier,
                        Serdes.String(),
                        eventPerformanceTuple).withLoggingEnabled(changeLogConfigs);

        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource("Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(storeBuilder, "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        System.out.println("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Co-Grouping App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example 9
Source File: CoGroupingListeningExampleApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms","120000" );
        changeLogConfigs.put("cleanup.policy", "compact,delete");

        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> builder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple);



        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource( "Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(builder.withLoggingEnabled(changeLogConfigs), "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        kafkaStreams.setGlobalStateRestoreListener(new LoggingStateRestoreListener());

        kafkaStreams.setUncaughtExceptionHandler((thread, exception) ->
            LOG.error("Thread [{}] encountered [{}]", thread.getName(), exception.getMessage())
        );

        kafkaStreams.setStateListener((newState, oldState) -> {
           if (oldState == KafkaStreams.State.REBALANCING && newState== KafkaStreams.State.RUNNING) {
               LOG.info("Topology Layout {}", topology.describe());
               LOG.info("Thread metadata {}", kafkaStreams.localThreadsMetadata());
           }
        });


        LOG.info("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();

        Thread.sleep(70000);
        LOG.info("Shutting down the Co-Grouping metrics App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }