org.apache.kafka.streams.state.StoreBuilder Java Examples

The following examples show how to use org.apache.kafka.streams.state.StoreBuilder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
private KStream<?, ?> getkStream(String inboundName,
								KafkaStreamsStateStoreProperties storeSpec,
								BindingProperties bindingProperties,
								KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder,
								Serde<?> keySerde, Serde<?> valueSerde,
								Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
	if (storeSpec != null) {
		StoreBuilder storeBuilder = buildStateStore(storeSpec);
		streamsBuilder.addStateStore(storeBuilder);
		if (LOG.isInfoEnabled()) {
			LOG.info("state store " + storeBuilder.name() + " added to topology");
		}
	}
	return getKStream(inboundName, bindingProperties, kafkaStreamsConsumerProperties, streamsBuilder,
			keySerde, valueSerde, autoOffsetReset, firstBuild);
}
 
Example #2
Source File: AbstractKafkaStreamsBinderProcessor.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("rawtypes")
private void addStateStoreBeans(StreamsBuilder streamsBuilder) {
	try {
		final Map<String, StoreBuilder> storeBuilders = applicationContext.getBeansOfType(StoreBuilder.class);
		if (!CollectionUtils.isEmpty(storeBuilders)) {
			storeBuilders.values().forEach(storeBuilder -> {
				streamsBuilder.addStateStore(storeBuilder);
				if (LOG.isInfoEnabled()) {
					LOG.info("state store " + storeBuilder.name() + " added to topology");
				}
			});
		}
	}
	catch (Exception e) {
		// Pass through.
	}
}
 
Example #3
Source File: FindDistinctEvents.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
public Topology buildTopology(Properties envProps,
                              final SpecificAvroSerde<Click> clicksSerde) {
    final StreamsBuilder builder = new StreamsBuilder();

    final String inputTopic = envProps.getProperty("input.topic.name");
    final String outputTopic = envProps.getProperty("output.topic.name");

    // How long we "remember" an event.  During this time, any incoming duplicates of the event
    // will be, well, dropped, thereby de-duplicating the input data.
    //
    // The actual value depends on your use case.  To reduce memory and disk usage, you could
    // decrease the size to purge old windows more frequently at the cost of potentially missing out
    // on de-duplicating late-arriving records.
    final Duration windowSize = Duration.ofMinutes(2);

    // retention period must be at least window size -- for this use case, we don't need a longer retention period
    // and thus just use the window size as retention time
    final Duration retentionPeriod = windowSize;

    final StoreBuilder<WindowStore<String, Long>> dedupStoreBuilder = Stores.windowStoreBuilder(
            Stores.persistentWindowStore(storeName,
                    retentionPeriod,
                    windowSize,
                    false
            ),
            Serdes.String(),
            Serdes.Long());

    builder.addStateStore(dedupStoreBuilder);

    builder
            .stream(inputTopic, Consumed.with(Serdes.String(), clicksSerde))
            .transformValues(() -> new DeduplicationTransformer<>(windowSize.toMillis(), (key, value) -> value.getIp()), storeName)
            .filter((k, v) -> v != null)
            .to(outputTopic, Produced.with(Serdes.String(), clicksSerde));

    return builder.build();
}
 
Example #4
Source File: KStreamsTopologyDescriptionParserTest.java    From netbeans-mmd-plugin with Apache License 2.0 5 votes vote down vote up
@Test
public void testKsDsl2() {

  final String storeName = "stateStore";
  final String globalStoreName = "glob-stateStore";
  final StreamsBuilder builder = new StreamsBuilder();
  final StoreBuilder<KeyValueStore<String, String>> storeBuilder = Stores.keyValueStoreBuilder(
      Stores.persistentKeyValueStore(storeName),
      Serdes.String(),
      Serdes.String());
  final StoreBuilder<KeyValueStore<String, String>> globalStoreBuilder = Stores.keyValueStoreBuilder(
      Stores.persistentKeyValueStore(globalStoreName),
      Serdes.String(),
      Serdes.String());
  builder.addGlobalStore(globalStoreBuilder, "some-global-topic", Consumed.with(Serdes.Short(), Serdes.String(), new WallclockTimestampExtractor(), Topology.AutoOffsetReset.EARLIEST), FakeProcessor::new);
  builder.addStateStore(storeBuilder);
  builder.<String, String>stream("input")
      .filter((k, v) -> v.endsWith("FOO"))
      .through("some-through-topic")
      .transformValues(() -> new SimpleValueTransformer(storeName), storeName)
      .to("output");

  final Topology topology = builder.build();
  final String text = topology.describe().toString();
  System.out.println(text);

  final KStreamsTopologyDescriptionParser parsed = new KStreamsTopologyDescriptionParser(text);
  assertEquals(8, parsed.size());
}
 
Example #5
Source File: KafkaStreamsFunctionStateStoreTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
@Bean
public StoreBuilder otherStore() {
	return Stores.windowStoreBuilder(
			Stores.persistentWindowStore("other-store",
					Duration.ofSeconds(3), Duration.ofSeconds(3),  false), Serdes.Long(),
			Serdes.Long());
}
 
Example #6
Source File: KafkaStreamsStateStoreIntegrationTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
@Bean
public StoreBuilder mystore() {
	return Stores.windowStoreBuilder(
			Stores.persistentWindowStore("mystate",
					3L, 3, 3L, false), Serdes.String(),
			Serdes.String());
}
 
Example #7
Source File: StreamsUtils.java    From football-events with MIT License 5 votes vote down vote up
public static <D, E extends Event> void addStore(Topology topology, Class<D> domainType, String store,
        Class<E>... eventTypes) {
    StoreBuilder<KeyValueStore<String, D>> matchStoreBuilder = Stores.keyValueStoreBuilder(
            Stores.persistentKeyValueStore(store), Serdes.String(), new JsonPojoSerde<D>(domainType))
            .withLoggingDisabled();

    String[] processorNames = Stream.of(eventTypes)
        .map(event -> event.getSimpleName() + "Process")
        .collect(Collectors.toList()).toArray(new String[eventTypes.length]);

    topology.addStateStore(matchStoreBuilder, processorNames);
}
 
Example #8
Source File: StockPerformanceApplication.java    From kafka-streams-in-action with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde();
        Serializer<StockPerformance> stockPerformanceSerializer = stockPerformanceSerde.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();


        Topology topology = new Topology();
        String stocksStateStore = "stock-performance-store";
        double differentialThreshold = 0.02;

        KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore(stocksStateStore);
        StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde);


        topology.addSource("stocks-source", stringDeserializer, stockTransactionDeserializer,"stock-transactions")
                .addProcessor("stocks-processor", () -> new StockPerformanceProcessor(stocksStateStore, differentialThreshold), "stocks-source")
                .addStateStore(storeBuilder,"stocks-processor")
                .addSink("stocks-sink", "stock-performance", stringSerializer, stockPerformanceSerializer, "stocks-processor");


        topology.addProcessor("stocks-printer", new KStreamPrinter("StockPerformance"), "stocks-processor");

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        MockDataProducer.produceStockTransactionsWithKeyFunction(50,50, 25, StockTransaction::getSymbol);
        System.out.println("Stock Analysis App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Stock Analysis App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example #9
Source File: StockPerformanceStreamsAndProcessorApplication.java    From kafka-streams-in-action with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Serde<String> stringSerde = Serdes.String();
        Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();


        StreamsBuilder builder = new StreamsBuilder();

        String stocksStateStore = "stock-performance-store";
        double differentialThreshold = 0.02;

        KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100);
        StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde);

        builder.addStateStore(storeBuilder);

        builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde))
                .transform(() -> new StockPerformanceTransformer(stocksStateStore, differentialThreshold), stocksStateStore)
                .print(Printed.<String, StockPerformance>toSysOut().withLabel("StockPerformance"));

        //Uncomment this line and comment out the line above for writing to a topic
        //.to(stringSerde, stockPerformanceSerde, "stock-performance");


        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol);
        System.out.println("Stock Analysis KStream/Process API App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Stock KStream/Process API Analysis App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example #10
Source File: StockPerformanceStreamsAndProcessorMultipleValuesApplication.java    From kafka-streams-in-action with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Serde<String> stringSerde = Serdes.String();
        Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();


        StreamsBuilder builder = new StreamsBuilder();

        String stocksStateStore = "stock-performance-store";
        double differentialThreshold = 0.05;

        TransformerSupplier<String, StockTransaction, KeyValue<String, List<KeyValue<String, StockPerformance>>>> transformerSupplier =
                () -> new StockPerformanceMultipleValuesTransformer(stocksStateStore, differentialThreshold);

        KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100);
        StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde);

        builder.addStateStore(storeBuilder);

        builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde))
                .transform(transformerSupplier, stocksStateStore).flatMap((dummyKey,valueList) -> valueList)
                .print(Printed.<String, StockPerformance>toSysOut().withLabel("StockPerformance"));
                //.to(stringSerde, stockPerformanceSerde, "stock-performance");


        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol);
        System.out.println("Stock Analysis KStream/Process API App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Stock KStream/Process API Analysis App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example #11
Source File: KafkaAnomalyDetectorMapper.java    From adaptive-alerting with Apache License 2.0 5 votes vote down vote up
@Override
protected Topology buildTopology() {
    val config = getConfig();
    val inputTopic = config.getInputTopic();
    val defaultOutputTopic = config.getOutputTopic();
    log.info("Initializing: inputTopic={}, defaultOutputTopic={}", inputTopic, defaultOutputTopic);

    val builder = new StreamsBuilder();

    // create store
    StoreBuilder<KeyValueStore<String, MetricData>> keyValueStoreBuilder =
            Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore(STATE_STORE_NAME),
                    Serdes.String(),
                    new MetricDataJsonSerde())
                    .withLoggingDisabled();
    // register store
    builder.addStateStore(keyValueStoreBuilder);

    //Dynamically choose kafka topic depending on the consumer id.
    final TopicNameExtractor<String, MappedMetricData> kafkaTopicNameExtractor = (key, mappedMetricData, recordContext) -> {
        final String consumerId = mappedMetricData.getConsumerId();
        if (DEFAULT_CONSUMER_ID.equals(consumerId)) {
            return defaultOutputTopic;
        }
        return defaultOutputTopic + "-" + consumerId;
    };

    final KStream<String, MetricData> stream = builder.stream(inputTopic);
    stream
            .filter((key, md) -> md != null)
            .transform(new MetricDataTransformerSupplier(mapper, STATE_STORE_NAME), STATE_STORE_NAME)
            .flatMap(this::metricsByDetector)
            .to(kafkaTopicNameExtractor, Produced.with(outputKeySerde, outputValueSerde));
    return builder.build();
}
 
Example #12
Source File: CoGroupingListeningExampleApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms","120000" );
        changeLogConfigs.put("cleanup.policy", "compact,delete");

        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> builder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), eventPerformanceTuple);



        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource( "Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(builder.withLoggingEnabled(changeLogConfigs), "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        kafkaStreams.setGlobalStateRestoreListener(new LoggingStateRestoreListener());

        kafkaStreams.setUncaughtExceptionHandler((thread, exception) ->
            LOG.error("Thread [{}] encountered [{}]", thread.getName(), exception.getMessage())
        );

        kafkaStreams.setStateListener((newState, oldState) -> {
           if (oldState == KafkaStreams.State.REBALANCING && newState== KafkaStreams.State.RUNNING) {
               LOG.info("Topology Layout {}", topology.describe());
               LOG.info("Thread metadata {}", kafkaStreams.localThreadsMetadata());
           }
        });


        LOG.info("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();

        Thread.sleep(70000);
        LOG.info("Shutting down the Co-Grouping metrics App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example #13
Source File: RewardsApp.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 4 votes vote down vote up
public static void main(String[] args) {

        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);

        StreamsBuilder streamsBuilder = new StreamsBuilder();

        KStream<String, PosInvoice> KS0 = streamsBuilder.stream(
            AppConfigs.posTopicName,
            Consumed.with(PosSerdes.String(),
                PosSerdes.PosInvoice()));

        KStream<String, PosInvoice> KS1 = KS0.filter((key, value) ->
            value.getCustomerType().equalsIgnoreCase(AppConfigs.CUSTOMER_TYPE_PRIME));

        KStream<String, PosInvoice> KS2 = KS1.through("rewards-intermediate",
            Produced.with(PosSerdes.String(),
                PosSerdes.PosInvoice(),
                new RewardsPartitioner()));

        StoreBuilder kvStoreBuilder = Stores.keyValueStoreBuilder(
            Stores.inMemoryKeyValueStore(AppConfigs.REWARDS_STORE_NAME),
            Serdes.String(),
            Serdes.Double()
        );

        streamsBuilder.addStateStore(kvStoreBuilder);

        KStream<String, Notification> KS3 = KS2.transformValues(
            RewardsTransformer::new,
            AppConfigs.REWARDS_STORE_NAME);

        KS3.to(AppConfigs.notificationTopic,
            Produced.with(PosSerdes.String(), PosSerdes.Notification()));

        logger.info("Starting Kafka Streams");
        KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(), props);
        myStream.start();

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            logger.info("Stopping Stream");
            myStream.close();
        }));
    }
 
Example #14
Source File: PregelComputation.java    From kafka-graphs with Apache License 2.0 4 votes vote down vote up
public void prepare(StreamsBuilder builder, Properties streamsConfig) {
    Properties producerConfig = ClientUtils.producerConfig(
        bootstrapServers, serialized.keySerde().serializer().getClass(), KryoSerializer.class,
        streamsConfig != null ? streamsConfig : new Properties()
    );
    producerConfig.setProperty(ProducerConfig.CLIENT_ID_CONFIG, applicationId + "-producer");
    this.producer = new KafkaProducer<>(producerConfig);

    final StoreBuilder<KeyValueStore<Integer, Map<K, Map<K, List<Message>>>>> workSetStoreBuilder =
        Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(localworkSetStoreName),
            Serdes.Integer(), new KryoSerde<>()
        );
    builder.addStateStore(workSetStoreBuilder);

    final StoreBuilder<KeyValueStore<K, Tuple4<Integer, VV, Integer, VV>>> solutionSetStoreBuilder =
        Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(localSolutionSetStoreName),
            serialized.keySerde(), new KryoSerde<>()
        );
    builder.addStateStore(solutionSetStoreBuilder);

    this.vertices = builder
        .table(
            verticesTopic,
            Materialized.<K, VV, KeyValueStore<Bytes, byte[]>>as(verticesStoreName)
                .withKeySerde(serialized.keySerde()).withValueSerde(serialized.vertexValueSerde())
        );

    this.edgesGroupedBySource = builder
        .table(
            edgesGroupedBySourceTopic,
            Materialized.<K, Map<K, EV>, KeyValueStore<Bytes, byte[]>>as(edgesStoreName)
                .withKeySerde(serialized.keySerde()).withValueSerde(new KryoSerde<>())
        );

    this.solutionSet = builder
        .table(solutionSetTopic, Consumed.<K, Tuple4<Integer, VV, Integer, VV>>with(serialized.keySerde(), new KryoSerde<>()))
        .mapValues(v -> v._4, Materialized.as(solutionSetStore));

    // Initalize solution set
    this.vertices
        .toStream()
        .mapValues(v -> new Tuple4<>(-1, v, 0, v))
        .to(solutionSetTopic, Produced.with(serialized.keySerde(), new KryoSerde<>()));

    // Initialize workset
    this.vertices
        .toStream()
        .peek((k, v) -> {
            try {
                int partition = PregelComputation.vertexToPartition(k, serialized.keySerde().serializer(), numPartitions);
                ZKUtils.addChild(curator, applicationId, new PregelState(State.CREATED, 0, Stage.SEND), childPath(partition));
            } catch (Exception e) {
                throw toRuntimeException(e);
            }

        })
        .mapValues((k, v) -> new Tuple3<>(0, k, initialMessage.map(Collections::singletonList).orElse(Collections.emptyList())))
        .peek((k, v) -> log.trace("workset 0 before topic: (" + k + ", " + v + ")"))
        .<K, Tuple3<Integer, K, List<Message>>>to(workSetTopic, Produced.with(serialized.keySerde(), new KryoSerde<>()));

    this.workSet = builder
        .stream(workSetTopic, Consumed.with(serialized.keySerde(), new KryoSerde<Tuple3<Integer, K, List<Message>>>()))
        .peek((k, v) -> log.trace("workset 1 after topic: (" + k + ", " + v + ")"));

    KStream<K, Tuple2<Integer, Map<K, List<Message>>>> syncedWorkSet = workSet
        .transform(BarrierSync::new, localworkSetStoreName)
        .peek((k, v) -> log.trace("workset 2 after join: (" + k + ", " + v + ")"));

    KStream<K, Tuple3<Integer, Tuple4<Integer, VV, Integer, VV>, Map<K, List<Message>>>> superstepComputation =
        syncedWorkSet
            .transformValues(VertexComputeUdf::new, localSolutionSetStoreName, vertices.queryableStoreName(),
                edgesGroupedBySource.queryableStoreName());

    // Compute the solution set delta
    KStream<K, Tuple4<Integer, VV, Integer, VV>> solutionSetDelta = superstepComputation
        .flatMapValues(v -> v._2 != null ? Collections.singletonList(v._2) : Collections.emptyList())
        .peek((k, v) -> log.trace("solution set: (" + k + ", " + v + ")"));

    solutionSetDelta
        .to(solutionSetTopic, Produced.with(serialized.keySerde(), new KryoSerde<>()));

    // Compute the inbox of each vertex for the next step (new workset)
    KStream<K, Tuple2<Integer, Map<K, List<Message>>>> newworkSet = superstepComputation
        .mapValues(v -> new Tuple2<>(v._1, v._3))
        .peek((k, v) -> log.trace("workset new: (" + k + ", " + v + ")"));

    newworkSet.process(() -> new SendMessages(producer));
}
 
Example #15
Source File: CoGroupingApplication.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Deserializer<String> stringDeserializer = Serdes.String().deserializer();
        Serializer<String> stringSerializer = Serdes.String().serializer();
        Serde<Tuple<List<ClickEvent>, List<StockTransaction>>> eventPerformanceTuple = StreamsSerdes.EventTransactionTupleSerde();
        Serializer<Tuple<List<ClickEvent>, List<StockTransaction>>> tupleSerializer = eventPerformanceTuple.serializer();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();
        Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer();

        Serde<ClickEvent> clickEventSerde = StreamsSerdes.ClickEventSerde();
        Deserializer<ClickEvent> clickEventDeserializer = clickEventSerde.deserializer();


        Topology topology = new Topology();
        Map<String, String> changeLogConfigs = new HashMap<>();
        changeLogConfigs.put("retention.ms", "120000");
        changeLogConfigs.put("cleanup.policy", "compact,delete");


        KeyValueBytesStoreSupplier storeSupplier = Stores.persistentKeyValueStore(TUPLE_STORE_NAME);
        StoreBuilder<KeyValueStore<String, Tuple<List<ClickEvent>, List<StockTransaction>>>> storeBuilder =
                Stores.keyValueStoreBuilder(storeSupplier,
                        Serdes.String(),
                        eventPerformanceTuple).withLoggingEnabled(changeLogConfigs);

        topology.addSource("Txn-Source", stringDeserializer, stockTransactionDeserializer, "stock-transactions")
                .addSource("Events-Source", stringDeserializer, clickEventDeserializer, "events")
                .addProcessor("Txn-Processor", StockTransactionProcessor::new, "Txn-Source")
                .addProcessor("Events-Processor", ClickEventProcessor::new, "Events-Source")
                .addProcessor("CoGrouping-Processor", CogroupingProcessor::new, "Txn-Processor", "Events-Processor")
                .addStateStore(storeBuilder, "CoGrouping-Processor")
                .addSink("Tuple-Sink", "cogrouped-results", stringSerializer, tupleSerializer, "CoGrouping-Processor");

        topology.addProcessor("Print", new KStreamPrinter("Co-Grouping"), "CoGrouping-Processor");


        MockDataProducer.produceStockTransactionsAndDayTradingClickEvents(50, 100, 100, StockTransaction::getSymbol);

        KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig);
        System.out.println("Co-Grouping App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();
        Thread.sleep(70000);
        System.out.println("Shutting down the Co-Grouping App now");
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example #16
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 4 votes vote down vote up
private StoreBuilder buildStateStore(KafkaStreamsStateStoreProperties spec) {
	try {

		Serde<?> keySerde = this.keyValueSerdeResolver
				.getStateStoreKeySerde(spec.getKeySerdeString());
		Serde<?> valueSerde = this.keyValueSerdeResolver
				.getStateStoreValueSerde(spec.getValueSerdeString());
		StoreBuilder builder;
		switch (spec.getType()) {
			case KEYVALUE:
				builder = Stores.keyValueStoreBuilder(
						Stores.persistentKeyValueStore(spec.getName()), keySerde,
						valueSerde);
				break;
			case WINDOW:
				builder = Stores
						.windowStoreBuilder(
								Stores.persistentWindowStore(spec.getName(),
										spec.getRetention(), 3, spec.getLength(), false),
								keySerde, valueSerde);
				break;
			case SESSION:
				builder = Stores.sessionStoreBuilder(Stores.persistentSessionStore(
						spec.getName(), spec.getRetention()), keySerde, valueSerde);
				break;
			default:
				throw new UnsupportedOperationException(
						"state store type (" + spec.getType() + ") is not supported!");
		}
		if (spec.isCacheEnabled()) {
			builder = builder.withCachingEnabled();
		}
		if (spec.isLoggingDisabled()) {
			builder = builder.withLoggingDisabled();
		}
		return builder;
	}
	catch (Exception ex) {
		LOG.error("failed to build state store exception : " + ex);
		throw ex;
	}
}
 
Example #17
Source File: StockPerformanceStreamsProcessorTopology.java    From kafka-streams-in-action with Apache License 2.0 4 votes vote down vote up
public static Topology build() {
    
    Serde<String> stringSerde = Serdes.String();
    Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde();
    Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();


    StreamsBuilder builder = new StreamsBuilder();

    String stocksStateStore = "stock-performance-store";
    double differentialThreshold = 0.02;

    KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100);
    StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde);

    builder.addStateStore(storeBuilder);

    builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde))
            .transform(() -> new StockPerformanceTransformer(stocksStateStore, differentialThreshold), stocksStateStore)
            .to("stock-performance", Produced.with(stringSerde, stockPerformanceSerde));

    return builder.build();
}
 
Example #18
Source File: KafkaStreamsFunctionStateStoreTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 4 votes vote down vote up
@Bean
public StoreBuilder myStore() {
	return Stores.keyValueStoreBuilder(
			Stores.persistentKeyValueStore("my-store"), Serdes.Long(),
			Serdes.Long());
}
 
Example #19
Source File: StreamsTopologyProvider.java    From apicurio-registry with Apache License 2.0 4 votes vote down vote up
@Override
public Topology get() {
    StreamsBuilder builder = new StreamsBuilder();

    // Simple defaults
    ImmutableMap<String, String> configuration = ImmutableMap.of(
        TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT,
        TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, "0",
        TopicConfig.SEGMENT_BYTES_CONFIG, String.valueOf(64 * 1024 * 1024)
    );

    // Input topic -- storage topic
    // This is where we handle "http" requests
    // Key is artifactId -- which is also used for KeyValue store key
    KStream<String, Str.StorageValue> storageRequest = builder.stream(
        properties.getStorageTopic(),
        Consumed.with(Serdes.String(), ProtoSerde.parsedWith(Str.StorageValue.parser()))
    );

    // Data structure holds all artifact information
    // Global rules are Data as well, with constant artifactId (GLOBAL_RULES variable)
    String storageStoreName = properties.getStorageStoreName();
    StoreBuilder<KeyValueStore<String /* artifactId */, Str.Data>> storageStoreBuilder =
        Stores
            .keyValueStoreBuilder(
                Stores.inMemoryKeyValueStore(storageStoreName),
                Serdes.String(), ProtoSerde.parsedWith(Str.Data.parser())
            )
            .withCachingEnabled()
            .withLoggingEnabled(configuration);

    builder.addStateStore(storageStoreBuilder);

    // We transform <artifactId, Data> into simple mapping <globalId, <artifactId, version>>
    KStream<Long, Str.TupleValue> globalRequest =
        storageRequest.transform(
            () -> new StorageTransformer(properties, dataDispatcher, factory),
            storageStoreName
        ).through(
            properties.getGlobalIdTopic(),
            Produced.with(Serdes.Long(), ProtoSerde.parsedWith(Str.TupleValue.parser()))
        );

    String globalIdStoreName = properties.getGlobalIdStoreName();
    StoreBuilder<KeyValueStore<Long /* globalId */, Str.TupleValue>> globalIdStoreBuilder =
        Stores
            .keyValueStoreBuilder(
                Stores.inMemoryKeyValueStore(globalIdStoreName),
                Serdes.Long(), ProtoSerde.parsedWith(Str.TupleValue.parser())
            )
            .withCachingEnabled()
            .withLoggingEnabled(configuration);

    builder.addStateStore(globalIdStoreBuilder);

    // Just handle globalId mapping -- put or delete
    globalRequest.process(() -> new GlobalIdProcessor(globalIdStoreName), globalIdStoreName);

    return builder.build(properties.getProperties());
}
 
Example #20
Source File: StockPerformanceStreamsAndProcessorMetricsApplication.java    From kafka-streams-in-action with Apache License 2.0 2 votes vote down vote up
public static void main(String[] args) throws Exception {


        StreamsConfig streamsConfig = new StreamsConfig(getProperties());
        Serde<String> stringSerde = Serdes.String();
        Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde();
        Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde();


        StreamsBuilder builder = new StreamsBuilder();

        String stocksStateStore = "stock-performance-store";
        double differentialThreshold = 0.05;

        KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100);
        StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde);

        builder.addStateStore(storeBuilder);

        builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde))
                .transform(() -> new StockPerformanceMetricsTransformer(stocksStateStore, differentialThreshold), stocksStateStore)
                .peek((k, v)-> LOG.info("[stock-performance] key: {} value: {}" , k, v))
                .to( "stock-performance", Produced.with(stringSerde, stockPerformanceSerde));


        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig);
        MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol);
        LOG.info("Stock Analysis KStream/Process API Metrics App Started");
        kafkaStreams.cleanUp();
        kafkaStreams.start();




        Thread.sleep(70000);


        LOG.info("Shutting down the Stock KStream/Process API Analysis Metrics  App now");
        for (Map.Entry<MetricName, ? extends Metric> metricNameEntry :kafkaStreams.metrics().entrySet()) {
            Metric metric = metricNameEntry.getValue();
            MetricName metricName = metricNameEntry.getKey();
            if(!metric.metricValue().equals(0.0) && !metric.metricValue().equals(Double.NEGATIVE_INFINITY)) {
                LOG.info("MetricName {}", metricName.name());
                LOG.info(" = {}", metric.metricValue());
            }   

        }
        kafkaStreams.close();
        MockDataProducer.shutdown();
    }
 
Example #21
Source File: ZMartKafkaStreamsAddStateApp.java    From kafka-streams-in-action with Apache License 2.0 2 votes vote down vote up
public static void main(String[] args) throws Exception {
    
    StreamsConfig streamsConfig = new StreamsConfig(getProperties());

    Serde<Purchase> purchaseSerde = StreamsSerdes.PurchaseSerde();
    Serde<PurchasePattern> purchasePatternSerde = StreamsSerdes.PurchasePatternSerde();
    Serde<RewardAccumulator> rewardAccumulatorSerde = StreamsSerdes.RewardAccumulatorSerde();
    Serde<String> stringSerde = Serdes.String();

    StreamsBuilder builder = new StreamsBuilder();

    KStream<String,Purchase> purchaseKStream = builder.stream( "transactions", Consumed.with(stringSerde, purchaseSerde))
            .mapValues(p -> Purchase.builder(p).maskCreditCard().build());

    KStream<String, PurchasePattern> patternKStream = purchaseKStream.mapValues(purchase -> PurchasePattern.builder(purchase).build());

    patternKStream.print(Printed.<String, PurchasePattern>toSysOut().withLabel("patterns"));
    patternKStream.to("patterns", Produced.with(stringSerde, purchasePatternSerde));



     // adding State to processor
    String rewardsStateStoreName = "rewardsPointsStore";
    RewardsStreamPartitioner streamPartitioner = new RewardsStreamPartitioner();

    KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore(rewardsStateStoreName);
    StoreBuilder<KeyValueStore<String, Integer>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), Serdes.Integer());

    builder.addStateStore(storeBuilder);

    KStream<String, Purchase> transByCustomerStream = purchaseKStream.through( "customer_transactions", Produced.with(stringSerde, purchaseSerde, streamPartitioner));


    KStream<String, RewardAccumulator> statefulRewardAccumulator = transByCustomerStream.transformValues(() ->  new PurchaseRewardTransformer(rewardsStateStoreName),
            rewardsStateStoreName);

    statefulRewardAccumulator.print(Printed.<String, RewardAccumulator>toSysOut().withLabel("rewards"));
    statefulRewardAccumulator.to("rewards", Produced.with(stringSerde, rewardAccumulatorSerde));



    // used only to produce data for this application, not typical usage
    MockDataProducer.producePurchaseData();

    
    LOG.info("Starting Adding State Example");
    KafkaStreams kafkaStreams = new KafkaStreams(builder.build(),streamsConfig);
    LOG.info("ZMart Adding State Application Started");
    kafkaStreams.cleanUp();
    kafkaStreams.start();
    Thread.sleep(65000);
    LOG.info("Shutting down the Add State Application now");
    kafkaStreams.close();
    MockDataProducer.shutdown();
}