org.apache.kafka.streams.kstream.Serialized Java Examples
The following examples show how to use
org.apache.kafka.streams.kstream.Serialized.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamToTableJoinIntegrationTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
@StreamListener @SendTo("output") public KStream<String, Long> process( @Input("input") KStream<String, Long> userClicksStream, @Input("input-x") KTable<String, String> userRegionsTable) { return userClicksStream .leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks( region == null ? "UNKNOWN" : region, clicks), Joined.with(Serdes.String(), Serdes.Long(), null)) .map((user, regionWithClicks) -> new KeyValue<>( regionWithClicks.getRegion(), regionWithClicks.getClicks())) .groupByKey(Serialized.with(Serdes.String(), Serdes.Long())) .reduce(Long::sum) .toStream(); }
Example #2
Source File: KafkaStreamsBinderMultipleInputTopicsTest.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 6 votes |
@StreamListener @SendTo("output") public KStream<?, WordCount> process( @Input("input") KStream<Object, String> input) { input.map((k, v) -> { System.out.println(k); System.out.println(v); return new KeyValue<>(k, v); }); return input .flatMapValues( value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(Serdes.String(), Serdes.String())) .count(Materialized.as("WordCounts")).toStream() .map((key, value) -> new KeyValue<>(null, new WordCount(key, value))); }
Example #3
Source File: StatisticsBuilder.java From football-events with MIT License | 6 votes |
private void buildPlayerStatistics(KStream<String, GoalScored> goalStream) { KTable<String, PlayerStartedCareer> playerTable = builder .table(PLAYER_STARTED_TOPIC, with(String(), playerSerde)); KTable<String, PlayerGoals> playerGoalsTable = goalStream .selectKey((matchId, goal) -> goal.getScorerId()) .leftJoin(playerTable, (goal, player) -> new PlayerGoals(player).goal(goal), with(String(), goalScoredSerde, playerSerde)) .groupByKey(Serialized.with(String(), playerGoalsSerde)) .reduce(PlayerGoals::aggregate, materialized(PLAYER_GOALS_STORE, playerGoalsSerde)); KTable<String, PlayerCards> playerCardsTable = builder .stream(CARD_RECEIVED_TOPIC, with(String(), cardReceivedSerde)) .selectKey((matchId, card) -> card.getReceiverId()) .leftJoin(playerTable, (card, player) -> new PlayerCards(player).card(card), with(String(), cardReceivedSerde, playerSerde)) .groupByKey(Serialized.with(String(), playerCardsSerde)) .reduce(PlayerCards::aggregate, materialized(PLAYER_CARDS_STORE, playerCardsSerde)); // publish changes to a view topic playerCardsTable.toStream().to(PLAYER_CARDS_TOPIC, Produced.with(String(), playerCardsSerde)); KStream<String, PlayerGoals> playerGoalsStream = playerGoalsTable.toStream(); playerGoalsStream.to(PLAYER_GOALS_TOPIC, Produced.with(String(), playerGoalsSerde)); }
Example #4
Source File: EventSourcedStreams.java From simplesource with Apache License 2.0 | 6 votes |
static <K, C, E, A> Tuple2<KStream<K, CommandRequest<K, C>>, KStream<K, CommandResponse<K>>> getProcessedCommands( TopologyContext<K, C, E, A> ctx, final KStream<K, CommandRequest<K, C>> commandRequestStream, final KStream<K, CommandResponse<K>> commandResponseStream) { final KTable<CommandId, CommandResponse<K>> commandResponseById = commandResponseStream .selectKey((key, response) -> response.commandId()) .groupByKey(Serialized.with(ctx.serdes().commandId(), ctx.serdes().commandResponse())) .reduce((r1, r2) -> getResponseSequence(r1) > getResponseSequence(r2) ? r1 : r2); final KStream<K, Tuple2<CommandRequest<K, C>, CommandResponse<K>>> reqResp = commandRequestStream .selectKey((k, v) -> v.commandId()) .leftJoin(commandResponseById, Tuple2::new, Joined.with(ctx.serdes().commandId(), ctx.serdes().commandRequest(), ctx.serdes().commandResponse())) .selectKey((k, v) -> v.v1().aggregateKey()); KStream<K, Tuple2<CommandRequest<K, C>, CommandResponse<K>>>[] branches = reqResp.branch((k, tuple) -> tuple.v2() == null, (k, tuple) -> tuple.v2() != null); KStream<K, CommandRequest<K, C>> unProcessed = branches[0].mapValues((k, tuple) -> tuple.v1()); KStream<K, CommandResponse<K>> processed = branches[1].mapValues((k, tuple) -> tuple.v2()) .peek((k, r) -> logger.info("Preprocessed: {}=CommandId:{}", k, r.commandId())); return new Tuple2<>(unProcessed, processed); }
Example #5
Source File: StructuredDataSourceNode.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private <K> KTable table( final KStream<K, GenericRow> stream, final Serde<K> keySerde, final Serde<GenericRow> valueSerde ) { return stream.groupByKey(Serialized.with(keySerde, valueSerde)) .reduce((genericRow, newValue) -> newValue); }
Example #6
Source File: KafkaStreamsInteractiveQueryIntegrationTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener("input") @SendTo("output") public KStream<?, String> process(KStream<Object, Product> input) { return input.filter((key, product) -> product.getId() == 123) .map((key, value) -> new KeyValue<>(value.id, value)) .groupByKey(Serialized.with(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class))) .count(Materialized.as("prod-id-count-store")).toStream() .map((key, value) -> new KeyValue<>(null, "Count for product with ID 123: " + value)); }
Example #7
Source File: KafkaStreamsNativeEncodingDecodingTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener("input") @SendTo("output") public KStream<?, String> process(KStream<Object, String> input) { return input .flatMapValues( value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(Serdes.String(), Serdes.String())) .windowedBy(TimeWindows.of(Duration.ofSeconds(5))).count(Materialized.as("foo-WordCounts-x")) .toStream().map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value)); }
Example #8
Source File: OutboundValueNullSkippedConversionTest.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener @SendTo("output") public KStream<?, KafkaStreamsBinderWordCountIntegrationTests.WordCount> process( @Input("input") KStream<Object, String> input) { return input .flatMapValues( value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(Serdes.String(), Serdes.String())) .windowedBy(TimeWindows.of(Duration.ofSeconds(5))).count(Materialized.as("foo-WordCounts")) .toStream() .map((key, value) -> new KeyValue<>(null, null)); }
Example #9
Source File: DeserializationErrorHandlerByKafkaTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener("input") @SendTo("output") public KStream<?, String> process(KStream<Object, String> input) { return input .flatMapValues( value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(Serdes.String(), Serdes.String())) .windowedBy(TimeWindows.of(5000)).count(Materialized.as("foo-WordCounts-x")) .toStream().map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value)); }
Example #10
Source File: KafkastreamsBinderPojoInputStringOutputIntegrationTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener("input") @SendTo("output") public KStream<Integer, String> process(KStream<Object, Product> input) { return input.filter((key, product) -> product.getId() == 123) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))) .windowedBy(TimeWindows.of(5000)) .count(Materialized.as("id-count-store")).toStream() .map((key, value) -> new KeyValue<>(key.key().id, "Count for product with ID 123: " + value)); }
Example #11
Source File: KafkaStreamsBinderPojoInputAndPrimitiveTypeOutputTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@StreamListener("input") @SendTo("output") public KStream<Integer, Long> process(KStream<Object, Product> input) { return input.filter((key, product) -> product.getId() == 123) .map((key, value) -> new KeyValue<>(value, value)) .groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))) .windowedBy(TimeWindows.of(5000)) .count(Materialized.as("id-count-store-x")).toStream() .map((key, value) -> { return new KeyValue<>(key.key().id, value); }); }
Example #12
Source File: VehicleStatusCountProcessor.java From microservice-patterns with Apache License 2.0 | 5 votes |
@Bean public KStream<String, Long> statusCountStreamProcessor(StreamsBuilder streamsBuilder) { KStream<Integer, VehicleLocation> stream = streamsBuilder.stream("gpslocation", //Read from topic Consumed.with(Serdes.Integer(), new JsonSerde<>(VehicleLocation.class))); //using Integer and JSON serde return stream.map((k,v)-> { // transform they key as Online/Offline based on status String online = v.isOnline() == true ? "Online" : "Offline"; return new KeyValue<>(online, v); }) .groupByKey(Serialized.with( //Group by the newly mapped key in previous step Serdes.String(), new JsonSerde<>(VehicleLocation.class)) ) .count(Materialized.as("statusCount")) // materialize this value to state store .toStream(); }
Example #13
Source File: TopologyContext.java From simplesource with Apache License 2.0 | 5 votes |
public TopologyContext(AggregateSpec<K, C, E, A> aggregateSpec) { this.aggregateSpec = aggregateSpec; this.commandResponseRetentionInSeconds = aggregateSpec.generation().stateStoreSpec().retentionInSeconds(); serdes = aggregateSpec.serialization().serdes(); commandRequestConsumed = Consumed.with(serdes().aggregateKey(), serdes().commandRequest()); commandResponseConsumed = Consumed.with(serdes().aggregateKey(), serdes().commandResponse()); eventsConsumedProduced = Produced.with(serdes().aggregateKey(), serdes().valueWithSequence()); aggregatedUpdateProduced = Produced.with(serdes().aggregateKey(), serdes().aggregateUpdate()); commandResponseProduced = Produced.with(serdes().aggregateKey(), serdes().commandResponse()); serializedCommandResponse = Serialized.with(serdes().commandId(), serdes().commandResponse()); aggregator = aggregateSpec.generation().aggregator(); initialValue = aggregateSpec.generation().initialValue(); }
Example #14
Source File: StatisticsBuilder.java From football-events with MIT License | 5 votes |
private void buildMatchStatistics(KStream<String, GoalScored> goalStream) { KStream<String, MatchStarted> matchStartedStream = builder .stream(MATCH_STARTED_TOPIC, with(String(), matchStartedSerde)); KStream<String, MatchFinished> matchFinishedStream = builder .stream(MATCH_FINISHED_TOPIC, with(String(), matchFinishedSerde)); KStream<String, MatchScore> scoreStream = matchStartedStream .leftJoin(goalStream, (match, goal) -> new MatchScore(match).goal(goal), JoinWindows.of(maxMatchDuration), with(String(), matchStartedSerde, goalScoredSerde) ); KTable<String, MatchScore> scoreTable = scoreStream .groupByKey() .reduce(MatchScore::aggregate, materialized(MATCH_SCORES_STORE, matchScoreSerde)); scoreTable.toStream().to(MATCH_SCORES_TOPIC, Produced.with(String(), matchScoreSerde)); KStream<String, MatchScore> finalScoreStream = matchFinishedStream .leftJoin(scoreTable, (matchFinished, matchScore) -> matchScore, with(String(), matchFinishedSerde, matchScoreSerde) ); // new key: clubId KStream<String, TeamRanking> rankingStream = finalScoreStream .flatMap((clubId, matchScore) -> { Collection<KeyValue<String, TeamRanking>> result = new ArrayList<>(2); result.add(pair(matchScore.getHomeClubId(), matchScore.homeRanking())); result.add(pair(matchScore.getAwayClubId(), matchScore.awayRanking())); return result; }); KTable<String, TeamRanking> rankingTable = rankingStream .groupByKey(Serialized.with(String(), rankingSerde)) .reduce(TeamRanking::aggregate, materialized(TEAM_RANKING_STORE, rankingSerde)); // publish changes to a view topic rankingTable.toStream().to(TEAM_RANKING_TOPIC, Produced.with(String(), rankingSerde)); }
Example #15
Source File: TopScorersBuilder.java From football-events with MIT License | 5 votes |
public void build() { KTable<String, TopPlayers> top10Table = builder .stream(PLAYER_GOALS_TOPIC, Consumed.with(Serdes.String(), playerGoalsSerde)) // create a single record that includes the top scorers .groupBy((playerId, playerGoals) -> "topPlayers", Serialized.with(Serdes.String(), playerGoalsSerde)) .aggregate(() -> new TopPlayers(10), (playerId, playerStat, top10) -> top10.aggregate(playerStat), materialized(TOP_SCORERS_STORE, topSerde)); top10Table.toStream().to(TOP_SCORERS_TOPIC, Produced.with(String(), topSerde)); }
Example #16
Source File: DomainEventSinkImpl.java From event-store-demo with GNU General Public License v3.0 | 5 votes |
@StreamListener( "input" ) public void process( KStream<Object, byte[]> input ) { log.debug( "process : enter" ); input .map( (key, value) -> { try { DomainEvent domainEvent = mapper.readValue( value, DomainEvent.class ); log.debug( "process : domainEvent=" + domainEvent ); return new KeyValue<>( domainEvent.getBoardUuid().toString(), domainEvent ); } catch( IOException e ) { log.error( "process : error converting json to DomainEvent", e ); } return null; }) .groupBy( (s, domainEvent) -> s, Serialized.with( Serdes.String(), domainEventSerde ) ) .aggregate( Board::new, (key, domainEvent, board) -> board.handleEvent( domainEvent ), Materialized.<String, Board, KeyValueStore<Bytes, byte[]>>as( BOARD_EVENTS_SNAPSHOTS ) .withKeySerde( Serdes.String() ) .withValueSerde( boardSerde ) ); log.debug( "process : exit" ); }
Example #17
Source File: DomainEventSinkImpl.java From event-store-demo with GNU General Public License v3.0 | 5 votes |
@StreamListener( "input" ) public void process( KStream<Object, byte[]> input ) { log.debug( "process : enter" ); input .map( (key, value) -> { try { DomainEvent domainEvent = mapper.readValue( value, DomainEvent.class ); log.debug( "process : domainEvent=" + domainEvent ); return new KeyValue<>( domainEvent.getBoardUuid().toString(), domainEvent ); } catch( IOException e ) { log.error( "process : error converting json to DomainEvent", e ); } return null; }) .groupBy( (s, domainEvent) -> s, Serialized.with( Serdes.String(), domainEventSerde ) ) .aggregate( Board::new, (key, domainEvent, board) -> board.handleEvent( domainEvent ), Materialized.<String, Board, KeyValueStore<Bytes, byte[]>>as( BOARD_EVENTS_SNAPSHOTS ) .withKeySerde( Serdes.String() ) .withValueSerde( boardSerde ) ); log.debug( "process : exit" ); }
Example #18
Source File: AgeCountDemo.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 5 votes |
public static void main(String[] args) { Properties props = new Properties(); props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID); props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers); props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreLocation); StreamsBuilder streamsBuilder = new StreamsBuilder(); KTable<String, String> KT0 = streamsBuilder.table( AppConfigs.topicName, Consumed.with(Serdes.String(),Serdes.String())); KGroupedTable<String, String> KGT1 = KT0.groupBy( (person, age) -> KeyValue.pair(age, "1"), Serialized.with(Serdes.String(),Serdes.String()) ); KGT1.count() .toStream().peek((k, v) -> logger.info("Age=" + k + " Count=" + v)); KafkaStreams myStream = new KafkaStreams(streamsBuilder.build(), props); myStream.start(); Runtime.getRuntime().addShutdownHook(new Thread(() -> { logger.info("Stopping Stream"); myStream.close(); })); }
Example #19
Source File: CountingWindowingAndKtableJoinExample.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockTransaction> transactionSerde = StreamsSerdes.StockTransactionSerde(); Serde<TransactionSummary> transactionKeySerde = StreamsSerdes.TransactionSummarySerde(); StreamsBuilder builder = new StreamsBuilder(); long twentySeconds = 1000 * 20; long fifteenMinutes = 1000 * 60 * 15; long fiveSeconds = 1000 * 5; KTable<Windowed<TransactionSummary>, Long> customerTransactionCounts = builder.stream(STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, transactionSerde).withOffsetResetPolicy(LATEST)) .groupBy((noKey, transaction) -> TransactionSummary.from(transaction), Serialized.with(transactionKeySerde, transactionSerde)) // session window comment line below and uncomment another line below for a different window example .windowedBy(SessionWindows.with(twentySeconds).until(fifteenMinutes)).count(); //The following are examples of different windows examples //Tumbling window with timeout 15 minutes //.windowedBy(TimeWindows.of(twentySeconds).until(fifteenMinutes)).count(); //Tumbling window with default timeout 24 hours //.windowedBy(TimeWindows.of(twentySeconds)).count(); //Hopping window //.windowedBy(TimeWindows.of(twentySeconds).advanceBy(fiveSeconds).until(fifteenMinutes)).count(); customerTransactionCounts.toStream().print(Printed.<Windowed<TransactionSummary>, Long>toSysOut().withLabel("Customer Transactions Counts")); KStream<String, TransactionSummary> countStream = customerTransactionCounts.toStream().map((window, count) -> { TransactionSummary transactionSummary = window.key(); String newKey = transactionSummary.getIndustry(); transactionSummary.setSummaryCount(count); return KeyValue.pair(newKey, transactionSummary); }); KTable<String, String> financialNews = builder.table( "financial-news", Consumed.with(EARLIEST)); ValueJoiner<TransactionSummary, String, String> valueJoiner = (txnct, news) -> String.format("%d shares purchased %s related news [%s]", txnct.getSummaryCount(), txnct.getStockTicker(), news); KStream<String,String> joined = countStream.leftJoin(financialNews, valueJoiner, Joined.with(stringSerde, transactionKeySerde, stringSerde)); joined.print(Printed.<String, String>toSysOut().withLabel("Transactions and News")); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); kafkaStreams.cleanUp(); kafkaStreams.setUncaughtExceptionHandler((t, e) -> { LOG.error("had exception ", e); }); CustomDateGenerator dateGenerator = CustomDateGenerator.withTimestampsIncreasingBy(Duration.ofMillis(750)); DataGenerator.setTimestampGenerator(dateGenerator::get); MockDataProducer.produceStockTransactions(2, 5, 3, false); LOG.info("Starting CountingWindowing and KTableJoins Example"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(65000); LOG.info("Shutting down the CountingWindowing and KTableJoins Example Application now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #20
Source File: StockPerformanceInteractiveQueryApplication.java From kafka-streams-in-action with Apache License 2.0 | 4 votes |
public static void main(String[] args) { if(args.length < 2){ LOG.error("Need to specify host, port"); System.exit(1); } String host = args[0]; int port = Integer.parseInt(args[1]); final HostInfo hostInfo = new HostInfo(host, port); Properties properties = getProperties(); properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, host+":"+port); StreamsConfig streamsConfig = new StreamsConfig(properties); Serde<String> stringSerde = Serdes.String(); Serde<Long> longSerde = Serdes.Long(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); WindowedSerializer<String> windowedSerializer = new WindowedSerializer<>(stringSerde.serializer()); WindowedDeserializer<String> windowedDeserializer = new WindowedDeserializer<>(stringSerde.deserializer()); Serde<Windowed<String>> windowedSerde = Serdes.serdeFrom(windowedSerializer, windowedDeserializer); Serde<CustomerTransactions> customerTransactionsSerde = StreamsSerdes.CustomerTransactionsSerde(); Aggregator<String, StockTransaction, Integer> sharesAggregator = (k, v, i) -> v.getShares() + i; StreamsBuilder builder = new StreamsBuilder(); // data is already coming in keyed KStream<String, StockTransaction> stockTransactionKStream = builder.stream(MockDataProducer.STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, stockTransactionSerde) .withOffsetResetPolicy(Topology.AutoOffsetReset.LATEST)); stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getSector(), v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .count(Materialized.as("TransactionsBySector")) .toStream() .peek((k,v) -> LOG.info("Transaction count for {} {}", k, v)) .to("sector-transaction-counts", Produced.with(stringSerde, longSerde)); stockTransactionKStream.map((k,v) -> KeyValue.pair(v.getCustomerId(), v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .windowedBy(SessionWindows.with(TimeUnit.MINUTES.toMillis(60)).until(TimeUnit.MINUTES.toMillis(120))) .aggregate(CustomerTransactions::new,(k, v, ct) -> ct.update(v), (k, ct, other)-> ct.merge(other), Materialized.<String, CustomerTransactions, SessionStore<Bytes, byte[]>>as("CustomerPurchaseSessions") .withKeySerde(stringSerde).withValueSerde(customerTransactionsSerde)) .toStream() .peek((k,v) -> LOG.info("Session info for {} {}", k, v)) .to("session-transactions", Produced.with(windowedSerde, customerTransactionsSerde)); stockTransactionKStream.groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .windowedBy(TimeWindows.of(10000)) .aggregate(() -> 0, sharesAggregator, Materialized.<String, Integer, WindowStore<Bytes, byte[]>>as("NumberSharesPerPeriod") .withKeySerde(stringSerde) .withValueSerde(Serdes.Integer())) .toStream().peek((k,v)->LOG.info("key is {} value is {}", k, v)) .to("transaction-count", Produced.with(windowedSerde,Serdes.Integer())); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); InteractiveQueryServer queryServer = new InteractiveQueryServer(kafkaStreams, hostInfo); StateRestoreHttpReporter restoreReporter = new StateRestoreHttpReporter(queryServer); queryServer.init(); kafkaStreams.setGlobalStateRestoreListener(restoreReporter); kafkaStreams.setStateListener(((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { LOG.info("Setting the query server to ready"); queryServer.setReady(true); } else if (newState != KafkaStreams.State.RUNNING) { LOG.info("State not RUNNING, disabling the query server"); queryServer.setReady(false); } })); kafkaStreams.setUncaughtExceptionHandler((t, e) -> { LOG.error("Thread {} had a fatal error {}", t, e, e); shutdown(kafkaStreams, queryServer); }); Runtime.getRuntime().addShutdownHook(new Thread(() -> { shutdown(kafkaStreams, queryServer); })); LOG.info("Stock Analysis KStream Interactive Query App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); }
Example #21
Source File: KafkaStreamsInteractiveQuerySample.java From spring-cloud-stream-samples with Apache License 2.0 | 4 votes |
@Bean public BiConsumer<KStream<String, PlayEvent>, KTable<Long, Song>> process() { return (s, t) -> { // create and configure the SpecificAvroSerdes required in this example final Map<String, String> serdeConfig = Collections.singletonMap( AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081"); final SpecificAvroSerde<PlayEvent> playEventSerde = new SpecificAvroSerde<>(); playEventSerde.configure(serdeConfig, false); final SpecificAvroSerde<Song> keySongSerde = new SpecificAvroSerde<>(); keySongSerde.configure(serdeConfig, true); final SpecificAvroSerde<Song> valueSongSerde = new SpecificAvroSerde<>(); valueSongSerde.configure(serdeConfig, false); final SpecificAvroSerde<SongPlayCount> songPlayCountSerde = new SpecificAvroSerde<>(); songPlayCountSerde.configure(serdeConfig, false); // Accept play events that have a duration >= the minimum final KStream<Long, PlayEvent> playsBySongId = s.filter((region, event) -> event.getDuration() >= MIN_CHARTABLE_DURATION) // repartition based on song id .map((key, value) -> KeyValue.pair(value.getSongId(), value)); // join the plays with song as we will use it later for charting final KStream<Long, Song> songPlays = playsBySongId.leftJoin(t, (value1, song) -> song, Joined.with(Serdes.Long(), playEventSerde, valueSongSerde)); // create a state store to track song play counts final KTable<Song, Long> songPlayCounts = songPlays.groupBy((songId, song) -> song, Serialized.with(keySongSerde, valueSongSerde)) .count(Materialized.<Song, Long, KeyValueStore<Bytes, byte[]>>as(SONG_PLAY_COUNT_STORE) .withKeySerde(valueSongSerde) .withValueSerde(Serdes.Long())); final TopFiveSerde topFiveSerde = new TopFiveSerde(); // Compute the top five charts for each genre. The results of this computation will continuously update the state // store "top-five-songs-by-genre", and this state store can then be queried interactively via a REST API (cf. // MusicPlaysRestService) for the latest charts per genre. songPlayCounts.groupBy((song, plays) -> KeyValue.pair(song.getGenre().toLowerCase(), new SongPlayCount(song.getId(), plays)), Serialized.with(Serdes.String(), songPlayCountSerde)) // aggregate into a TopFiveSongs instance that will keep track // of the current top five for each genre. The data will be available in the // top-five-songs-genre store .aggregate(TopFiveSongs::new, (aggKey, value, aggregate) -> { aggregate.add(value); return aggregate; }, (aggKey, value, aggregate) -> { aggregate.remove(value); return aggregate; }, Materialized.<String, TopFiveSongs, KeyValueStore<Bytes, byte[]>>as(TOP_FIVE_SONGS_BY_GENRE_STORE) .withKeySerde(Serdes.String()) .withValueSerde(topFiveSerde) ); // Compute the top five chart. The results of this computation will continuously update the state // store "top-five-songs", and this state store can then be queried interactively via a REST API (cf. // MusicPlaysRestService) for the latest charts per genre. songPlayCounts.groupBy((song, plays) -> KeyValue.pair(TOP_FIVE_KEY, new SongPlayCount(song.getId(), plays)), Serialized.with(Serdes.String(), songPlayCountSerde)) .aggregate(TopFiveSongs::new, (aggKey, value, aggregate) -> { aggregate.add(value); return aggregate; }, (aggKey, value, aggregate) -> { aggregate.remove(value); return aggregate; }, Materialized.<String, TopFiveSongs, KeyValueStore<Bytes, byte[]>>as(TOP_FIVE_SONGS_STORE) .withKeySerde(Serdes.String()) .withValueSerde(topFiveSerde) ); }; }
Example #22
Source File: StockCountsStreamsConnectIntegrationApplication.java From kafka-streams-in-action with Apache License 2.0 | 3 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Serde<Long> longSerde = Serdes.Long(); StreamsBuilder builder = new StreamsBuilder(); builder.stream("dbTxnTRANSACTIONS", Consumed.with(stringSerde, stockTransactionSerde)) .peek((k, v)-> LOG.info("transactions from database key {} value {}",k, v)) .groupByKey(Serialized.with(stringSerde, stockTransactionSerde)) .aggregate(()-> 0L,(symb, stockTxn, numShares) -> numShares + stockTxn.getShares(), Materialized.with(stringSerde, longSerde)).toStream() .peek((k,v) -> LOG.info("Aggregated stock sales for {} {}",k, v)) .to( "stock-counts", Produced.with(stringSerde, longSerde)); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); CountDownLatch doneSignal = new CountDownLatch(1); Runtime.getRuntime().addShutdownHook(new Thread(()-> { doneSignal.countDown(); LOG.info("Shutting down the Stock Analysis KStream Connect App Started now"); kafkaStreams.close(); })); LOG.info("Stock Analysis KStream Connect App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); doneSignal.await(); }
Example #23
Source File: GlobalKTableExample.java From kafka-streams-in-action with Apache License 2.0 | 2 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockTransaction> transactionSerde = StreamsSerdes.StockTransactionSerde(); Serde<TransactionSummary> transactionSummarySerde = StreamsSerdes.TransactionSummarySerde(); StreamsBuilder builder = new StreamsBuilder(); long twentySeconds = 1000 * 20; KeyValueMapper<Windowed<TransactionSummary>, Long, KeyValue<String, TransactionSummary>> transactionMapper = (window, count) -> { TransactionSummary transactionSummary = window.key(); String newKey = transactionSummary.getIndustry(); transactionSummary.setSummaryCount(count); return KeyValue.pair(newKey, transactionSummary); }; KStream<String, TransactionSummary> countStream = builder.stream( STOCK_TRANSACTIONS_TOPIC, Consumed.with(stringSerde, transactionSerde).withOffsetResetPolicy(LATEST)) .groupBy((noKey, transaction) -> TransactionSummary.from(transaction), Serialized.with(transactionSummarySerde, transactionSerde)) .windowedBy(SessionWindows.with(twentySeconds)).count() .toStream().map(transactionMapper); GlobalKTable<String, String> publicCompanies = builder.globalTable(COMPANIES.topicName()); GlobalKTable<String, String> clients = builder.globalTable(CLIENTS.topicName()); countStream.leftJoin(publicCompanies, (key, txn) -> txn.getStockTicker(),TransactionSummary::withCompanyName) .leftJoin(clients, (key, txn) -> txn.getCustomerId(), TransactionSummary::withCustomerName) .print(Printed.<String, TransactionSummary>toSysOut().withLabel("Resolved Transaction Summaries")); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); kafkaStreams.cleanUp(); kafkaStreams.setUncaughtExceptionHandler((t, e) -> { LOG.error("had exception ", e); }); CustomDateGenerator dateGenerator = CustomDateGenerator.withTimestampsIncreasingBy(Duration.ofMillis(750)); DataGenerator.setTimestampGenerator(dateGenerator::get); MockDataProducer.produceStockTransactions(2, 5, 3, true); LOG.info("Starting GlobalKTable Example"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(65000); LOG.info("Shutting down the GlobalKTable Example Application now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #24
Source File: SerdesPair.java From kafka-encryption with Apache License 2.0 | 2 votes |
/** * Build a {@link Serialized} using the keySerde and valueSerde of the pair * * @return */ public Serialized<K, V> toSerialized() { return Serialized.with(keySerde, valueSerde); }