org.apache.kafka.streams.KafkaStreams Java Examples
The following examples show how to use
org.apache.kafka.streams.KafkaStreams.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamDemo.java From javatech with Creative Commons Attribution Share Alike 4.0 International | 6 votes |
public static void main(String[] args) { // 1. 指定流的配置 Properties config = new Properties(); config.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application"); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, HOST); config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); // 设置流构造器 StreamsBuilder builder = new StreamsBuilder(); KStream<String, String> textLines = builder.stream("TextLinesTopic"); KTable<String, Long> wordCounts = textLines .flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\\W+"))) .groupBy((key, word) -> word) .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store")); wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long())); // 根据流构造器和流配置初始化 Kafka 流 KafkaStreams streams = new KafkaStreams(builder.build(), config); streams.start(); }
Example #2
Source File: StreamsRegistryConfiguration.java From apicurio-registry with Apache License 2.0 | 6 votes |
@Produces @ApplicationScoped public ExtReadOnlyKeyValueStore<String, Str.Data> storageKeyValueStore( KafkaStreams streams, HostInfo storageLocalHost, StreamsProperties properties, FilterPredicate<String, Str.Data> filterPredicate ) { return new DistributedReadOnlyKeyValueStore<>( streams, storageLocalHost, properties.getStorageStoreName(), Serdes.String(), ProtoSerde.parsedWith(Str.Data.parser()), new DefaultGrpcChannelProvider(), true, filterPredicate ); }
Example #3
Source File: StreamsRegistryConfiguration.java From apicurio-registry with Apache License 2.0 | 6 votes |
@Produces @ApplicationScoped public ReadOnlyKeyValueStore<Long, Str.TupleValue> globalIdKeyValueStore( KafkaStreams streams, HostInfo storageLocalHost, StreamsProperties properties ) { return new DistributedReadOnlyKeyValueStore<>( streams, storageLocalHost, properties.getGlobalIdStoreName(), Serdes.Long(), ProtoSerde.parsedWith(Str.TupleValue.parser()), new DefaultGrpcChannelProvider(), true, (filter, over, id, tuple) -> true ); }
Example #4
Source File: KafkaRuntime.java From jMetalSP with MIT License | 6 votes |
@Override public void startStreamingDataSources(List<StreamingDataSource<?>> streamingDataSourceList) { for (StreamingDataSource<?> streamingDataSource : streamingDataSourceList) { ((KafkaStreamingDataSource)streamingDataSource).setStreamingBuilder(streamsBuilder); ((KafkaStreamingDataSource)streamingDataSource).setTopic(topic); streamingDataSource.run(); } //streamingContext.start(); KafkaStreams streams= new KafkaStreams(streamsBuilder.build(),config); try { streams.start();; } catch (Exception e) { e.printStackTrace(); } }
Example #5
Source File: DistributedService.java From apicurio-registry with Apache License 2.0 | 6 votes |
/** * @param streams The {@link KafkaStreams} application * @param localApplicationServer The {@link HostInfo} derived from the * {@link StreamsConfig#APPLICATION_SERVER_CONFIG application.server} * configuration property of local kafka streams node for the streams application. * This is used to identify requests for local service, bypassing gRPC calls * @param storeName The name of the store registered in the streams application and used for distribution * of keys among kafka streams processing nodes. * @param keySerde the {@link Serde} for keys of the service which are also the distribution keys of the * corresponding store. * @param grpcChannelProvider A function that establishes gRPC {@link Channel} to a remote service * for the given {@link HostInfo} parameter * @param parallel {@code true} if service calls that need to dispatch to many local services in * the cluster are to be performed in parallel */ public DistributedService( KafkaStreams streams, HostInfo localApplicationServer, String storeName, Serde<K> keySerde, Function<? super HostInfo, ? extends Channel> grpcChannelProvider, boolean parallel ) { this.streams = Objects.requireNonNull(streams, "streams"); this.localApplicationServer = Objects.requireNonNull(localApplicationServer, "localApplicationServer"); this.storeName = Objects.requireNonNull(storeName, "storeName"); this.keySerde = Objects.requireNonNull(keySerde, "keySerde"); this.grpcChannelProvider = Objects.requireNonNull(grpcChannelProvider, "grpcChannelProvider"); this.parallel = parallel; }
Example #6
Source File: DistributedReadOnlyKeyValueStore.java From apicurio-registry with Apache License 2.0 | 6 votes |
/** * @param streams The {@link KafkaStreams} application * @param localApplicationServer The {@link HostInfo} derived from the * {@link StreamsConfig#APPLICATION_SERVER_CONFIG application.server} * configuration property of local kafka streams node for the streams application. * This is used to identify requests for local store, bypassing gRPC calls * @param storeName The name of the {@link ReadOnlyKeyValueStore} registered in the streams application * @param keySerde The {@link Serde} for keys of the store * @param valSerde The {@link Serde} for values of the store * @param grpcChannelProvider A function that establishes gRPC {@link Channel} to a remote store service * for the given {@link HostInfo} parameter * @param parallel {@code true} if lookups that need to query many stores in the cluster are * to be performed in parallel * @param filterPredicate filter predicate to filter out keys and values */ public DistributedReadOnlyKeyValueStore( KafkaStreams streams, HostInfo localApplicationServer, String storeName, Serde<K> keySerde, Serde<V> valSerde, Function<? super HostInfo, ? extends Channel> grpcChannelProvider, boolean parallel, FilterPredicate<K, V> filterPredicate ) { super( streams, localApplicationServer, storeName, keySerde, valSerde, grpcChannelProvider, parallel ); this.filterPredicate = filterPredicate; }
Example #7
Source File: StreamsRegistryConfiguration.java From apicurio-registry with Apache License 2.0 | 6 votes |
@Produces @ApplicationScoped @Current public AsyncBiFunctionService<Void, Void, KafkaStreams.State> stateService( KafkaStreams streams, HostInfo storageLocalHost, LocalService<AsyncBiFunctionService.WithSerdes<Void, Void, KafkaStreams.State>> localStateService ) { return new DistributedAsyncBiFunctionService<>( streams, storageLocalHost, "stateStore", localStateService, new DefaultGrpcChannelProvider() ); }
Example #8
Source File: KafkaStreamWordCount.java From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License | 6 votes |
public static void main(String[] args) throws Exception { Properties kafkaStreamProperties = new Properties(); kafkaStreamProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, "kafka-stream-wordCount"); kafkaStreamProperties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); kafkaStreamProperties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "localhost:2181"); kafkaStreamProperties.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); kafkaStreamProperties.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); Serde<String> stringSerde = Serdes.String(); Serde<Long> longSerde = Serdes.Long(); KStreamBuilder streamTopology = new KStreamBuilder(); KStream<String, String> topicRecords = streamTopology.stream(stringSerde, stringSerde, "input"); KStream<String, Long> wordCounts = topicRecords .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+"))) .map((key, word) -> new KeyValue<>(word, word)) .countByKey("Count") .toStream(); wordCounts.to(stringSerde, longSerde, "wordCount"); KafkaStreams streamManager = new KafkaStreams(streamTopology, kafkaStreamProperties); streamManager.start(); Runtime.getRuntime().addShutdownHook(new Thread(streamManager::close)); }
Example #9
Source File: RangeKeyValuesQueryVerticleTest.java From kiqr with Apache License 2.0 | 6 votes |
@Test public void notFoundWithNoResult(TestContext context){ KafkaStreams streamMock = mock(KafkaStreams.class); ReadOnlyKeyValueStore<Object, Object> storeMock = mock(ReadOnlyKeyValueStore.class); KeyValueIterator<Object, Object> iteratorMock = mock(KeyValueIterator.class); when(streamMock.store(eq("store"), any(QueryableStoreType.class))).thenReturn(storeMock); SimpleKeyValueIterator iterator = new SimpleKeyValueIterator(); when(storeMock.range(any(), any())).thenReturn(iterator); rule.vertx().deployVerticle(new RangeKeyValueQueryVerticle("host", streamMock), context.asyncAssertSuccess(deployment->{ RangeKeyValueQuery query = new RangeKeyValueQuery("store", Serdes.String().getClass().getName(), Serdes.String().getClass().getName(), "key".getBytes(), "key".getBytes()); rule.vertx().eventBus().send(Config.RANGE_KEY_VALUE_QUERY_ADDRESS_PREFIX + "host", query, context.asyncAssertSuccess(reply ->{ context.assertTrue(reply.body() instanceof MultiValuedKeyValueQueryResponse); MultiValuedKeyValueQueryResponse response = (MultiValuedKeyValueQueryResponse) reply.body(); context.assertEquals(0, response.getResults().size()); context.assertTrue(iterator.closed); })); })); }
Example #10
Source File: MetricsResource.java From kafka-streams-example with Apache License 2.0 | 6 votes |
/** * Query local state store to extract metrics * * @return local Metrics */ private Metrics getLocalMetrics() { HostInfo thisInstance = GlobalAppState.getInstance().getHostPortInfo(); KafkaStreams ks = GlobalAppState.getInstance().getKafkaStreams(); String source = thisInstance.host() + ":" + thisInstance.port(); Metrics localMetrics = new Metrics(); ReadOnlyKeyValueStore<String, Double> averageStore = ks .store(storeName, QueryableStoreTypes.<String, Double>keyValueStore()); LOGGER.log(Level.INFO, "Entries in store {0}", averageStore.approximateNumEntries()); KeyValueIterator<String, Double> storeIterator = averageStore.all(); while (storeIterator.hasNext()) { KeyValue<String, Double> kv = storeIterator.next(); localMetrics.add(source, kv.key, String.valueOf(kv.value)); } LOGGER.log(Level.INFO, "Local store state {0}", localMetrics); return localMetrics; }
Example #11
Source File: StockPerformanceApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Deserializer<String> stringDeserializer = Serdes.String().deserializer(); Serializer<String> stringSerializer = Serdes.String().serializer(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serializer<StockPerformance> stockPerformanceSerializer = stockPerformanceSerde.serializer(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); Deserializer<StockTransaction> stockTransactionDeserializer = stockTransactionSerde.deserializer(); Topology topology = new Topology(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.inMemoryKeyValueStore(stocksStateStore); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); topology.addSource("stocks-source", stringDeserializer, stockTransactionDeserializer,"stock-transactions") .addProcessor("stocks-processor", () -> new StockPerformanceProcessor(stocksStateStore, differentialThreshold), "stocks-source") .addStateStore(storeBuilder,"stocks-processor") .addSink("stocks-sink", "stock-performance", stringSerializer, stockPerformanceSerializer, "stocks-processor"); topology.addProcessor("stocks-printer", new KStreamPrinter("StockPerformance"), "stocks-processor"); KafkaStreams kafkaStreams = new KafkaStreams(topology, streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50,50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #12
Source File: KafkaSpanStore.java From zipkin-storage-kafka with Apache License 2.0 | 5 votes |
GetTraceManyCall(KafkaStreams traceStoreStream, BiFunction<String, Integer, String> httpBaseUrl, String traceIds) { super(traceStoreStream, TRACES_STORE_NAME, httpBaseUrl, "/traceMany?traceIds=" + traceIds); this.traceStoreStream = traceStoreStream; this.httpBaseUrl = httpBaseUrl; this.traceIds = traceIds; }
Example #13
Source File: FilterEvents.java From kafka-tutorials with Apache License 2.0 | 5 votes |
private void runRecipe(final String configPath) throws IOException { Properties envProps = this.loadEnvProperties(configPath); Properties streamProps = this.buildStreamsProperties(envProps); Topology topology = this.buildTopology(envProps, this.publicationSerde(envProps)); this.createTopics(envProps); final KafkaStreams streams = new KafkaStreams(topology, streamProps); final CountDownLatch latch = new CountDownLatch(1); // Attach shutdown handler to catch Control-C. Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { @Override public void run() { streams.close(); latch.countDown(); } }); try { streams.start(); latch.await(); } catch (Throwable e) { System.exit(1); } System.exit(0); }
Example #14
Source File: KafkaStreamsPropertiesTest.java From quarkus with Apache License 2.0 | 5 votes |
@Test public void testProperties() throws Exception { // reflection hack ... no other way to get raw props ... Field configField = KafkaStreams.class.getDeclaredField("config"); configField.setAccessible(true); StreamsConfig config = (StreamsConfig) configField.get(streams); Map<String, Object> originals = config.originals(); Assertions.assertEquals("20", originals.get(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS)); Assertions.assertEquals("http://localhost:8080", originals.get("apicurio.registry.url")); Assertions.assertEquals("dummy", originals.get("some-property")); }
Example #15
Source File: PhysicalPlanBuilder.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private KafkaStreams buildStreams( final OutputNode outputNode, final StreamsBuilder builder, final String applicationId, final KsqlConfig ksqlConfig, final Map<String, Object> overriddenProperties ) { Map<String, Object> newStreamsProperties = ksqlConfig.getKsqlStreamConfigProps(); newStreamsProperties.putAll(overriddenProperties); newStreamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId); newStreamsProperties.put( ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, ksqlConfig.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) ); newStreamsProperties.put( StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, ksqlConfig.get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) ); newStreamsProperties.put( StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, ksqlConfig.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG) ); final Integer timestampIndex = (Integer) ksqlConfig.get(KsqlConfig.KSQL_TIMESTAMP_COLUMN_INDEX); if (timestampIndex != null && timestampIndex >= 0) { outputNode.getSourceTimestampExtractionPolicy().applyTo(ksqlConfig, newStreamsProperties); } updateListProperty( newStreamsProperties, StreamsConfig.consumerPrefix(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG), ConsumerCollector.class.getCanonicalName() ); updateListProperty( newStreamsProperties, StreamsConfig.producerPrefix(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG), ProducerCollector.class.getCanonicalName() ); return kafkaStreamsBuilder.buildKafkaStreams(builder, new StreamsConfig(newStreamsProperties)); }
Example #16
Source File: KafkaStreamsLiveTest.java From tutorials with MIT License | 5 votes |
@Test @Ignore("it needs to have kafka broker running on local") public void shouldTestKafkaStreams() throws InterruptedException { //given String inputTopic = "inputTopic"; Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-live-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); //when KStreamBuilder builder = new KStreamBuilder(); KStream<String, String> textLines = builder.stream(inputTopic); Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); KTable<String, Long> wordCounts = textLines .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))) .groupBy((key, word) -> word) .count(); wordCounts.foreach((word, count) -> System.out.println("word: " + word + " -> " + count)); String outputTopic = "outputTopic"; final Serde<String> stringSerde = Serdes.String(); final Serde<Long> longSerde = Serdes.Long(); wordCounts.to(stringSerde, longSerde, outputTopic); KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration); streams.start(); //then Thread.sleep(30000); streams.close(); }
Example #17
Source File: KafkaStreamsService.java From emodb with Apache License 2.0 | 5 votes |
@Override protected final void doStart() { _streams = new KafkaStreams(topology(), _streamsConfiguration); _streams.setUncaughtExceptionHandler((thread, throwable) -> { _uncaughtException.compareAndSet(null, throwable); _fatalErrorEncountered.set(true); _streamsExceptionMeter.mark(); _streams.close(Duration.ofMillis(1)); }); _streams.setStateListener(this); _streams.start(); notifyStarted(); }
Example #18
Source File: CommandProcessor.java From cqrs-manager-for-distributed-reactive-services with Apache License 2.0 | 5 votes |
public void start() { KStreamBuilder builder = new KStreamBuilder(); Serde<UUID> keySerde = new FressianSerde(); Serde<Map> valSerde = new FressianSerde(); KStream<UUID, Map> commands = builder.stream(keySerde, valSerde, commandsTopic); KStream<UUID, Map> customerEvents = commands .filter((id, command) -> command.get(new Keyword("action")).equals(new Keyword("create-customer"))) .map((id, command) -> { logger.debug("Command received"); Map userEvent = new HashMap(command); userEvent.put(new Keyword("action"), new Keyword("customer-created")); userEvent.put(new Keyword("parent"), id); Map userValue = (Map) userEvent.get(new Keyword("data")); userValue.put(new Keyword("id"), UUID.randomUUID()); return new KeyValue<>(UUID.randomUUID(), userEvent); }).through(keySerde, valSerde, eventsTopic); KStream<UUID, Map> customers = customerEvents .map((id, event) -> { Map customer = (Map) event.get(new Keyword("data")); UUID customerId = (UUID) customer.get(new Keyword("id")); return new KeyValue<UUID, Map>(customerId, customer); }); customers.through(keySerde, valSerde, customersTopic); StateStoreSupplier store = Stores.create("Customers") .withKeys(keySerde) .withValues(valSerde) .persistent() .build(); builder.addStateStore(store); customers.process(customerStore, "Customers"); this.kafkaStreams = new KafkaStreams(builder, kafkaStreamsConfig); this.kafkaStreams.start(); }
Example #19
Source File: KafkaSpanStore.java From zipkin-storage-kafka with Apache License 2.0 | 5 votes |
GetRemoteServiceNamesCall(KafkaStreams traceStoreStream, String serviceName, BiFunction<String, Integer, String> httpBaseUrl) { super(traceStoreStream, REMOTE_SERVICE_NAMES_STORE_NAME, httpBaseUrl, "/serviceNames/" + serviceName + "/remoteServiceNames", serviceName); this.traceStoreStream = traceStoreStream; this.serviceName = serviceName; this.httpBaseUrl = httpBaseUrl; }
Example #20
Source File: DynamicOutputTopic.java From kafka-tutorials with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (args.length < 1) { throw new IllegalArgumentException("This program takes one argument: the path to an environment configuration file."); } final DynamicOutputTopic instance = new DynamicOutputTopic(); final Properties envProps = instance.loadEnvProperties(args[0]); final Properties streamProps = instance.buildStreamsProperties(envProps); final Topology topology = instance.buildTopology(envProps); instance.createTopics(envProps); final KafkaStreams streams = new KafkaStreams(topology, streamProps); final CountDownLatch latch = new CountDownLatch(1); // Attach shutdown handler to catch Control-C. Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { @Override public void run() { streams.close(Duration.ofSeconds(5)); latch.countDown(); } }); try { streams.start(); latch.await(); } catch (Throwable e) { System.exit(1); } System.exit(0); }
Example #21
Source File: KafkaSpanStore.java From zipkin-storage-kafka with Apache License 2.0 | 5 votes |
GetTraceCall(KafkaStreams traceStoreStream, BiFunction<String, Integer, String> httpBaseUrl, String traceId) { super(traceStoreStream, TRACES_STORE_NAME, httpBaseUrl, String.format("/traces/%s", traceId), traceId); this.traceStoreStream = traceStoreStream; this.httpBaseUrl = httpBaseUrl; this.traceId = traceId; }
Example #22
Source File: StockPerformanceStreamsAndProcessorApplication.java From kafka-streams-in-action with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamsConfig = new StreamsConfig(getProperties()); Serde<String> stringSerde = Serdes.String(); Serde<StockPerformance> stockPerformanceSerde = StreamsSerdes.StockPerformanceSerde(); Serde<StockTransaction> stockTransactionSerde = StreamsSerdes.StockTransactionSerde(); StreamsBuilder builder = new StreamsBuilder(); String stocksStateStore = "stock-performance-store"; double differentialThreshold = 0.02; KeyValueBytesStoreSupplier storeSupplier = Stores.lruMap(stocksStateStore, 100); StoreBuilder<KeyValueStore<String, StockPerformance>> storeBuilder = Stores.keyValueStoreBuilder(storeSupplier, Serdes.String(), stockPerformanceSerde); builder.addStateStore(storeBuilder); builder.stream("stock-transactions", Consumed.with(stringSerde, stockTransactionSerde)) .transform(() -> new StockPerformanceTransformer(stocksStateStore, differentialThreshold), stocksStateStore) .print(Printed.<String, StockPerformance>toSysOut().withLabel("StockPerformance")); //Uncomment this line and comment out the line above for writing to a topic //.to(stringSerde, stockPerformanceSerde, "stock-performance"); KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsConfig); MockDataProducer.produceStockTransactionsWithKeyFunction(50, 50, 25, StockTransaction::getSymbol); System.out.println("Stock Analysis KStream/Process API App Started"); kafkaStreams.cleanUp(); kafkaStreams.start(); Thread.sleep(70000); System.out.println("Shutting down the Stock KStream/Process API Analysis App now"); kafkaStreams.close(); MockDataProducer.shutdown(); }
Example #23
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_mapValues_withKey() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transformValues(kafkaStreamsTracing.mapValues("mapValue-1", (key, value) -> { try { Thread.sleep(100L); } catch (InterruptedException e) { e.printStackTrace(); } return value; })) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }
Example #24
Source File: PurchaseProcessorDriver.java From kafka-streams with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamsConfig streamingConfig = new StreamsConfig(getProperties()); JsonDeserializer<Purchase> purchaseJsonDeserializer = new JsonDeserializer<>(Purchase.class); JsonSerializer<Purchase> purchaseJsonSerializer = new JsonSerializer<>(); JsonSerializer<RewardAccumulator> rewardAccumulatorJsonSerializer = new JsonSerializer<>(); JsonSerializer<PurchasePattern> purchasePatternJsonSerializer = new JsonSerializer<>(); StringDeserializer stringDeserializer = new StringDeserializer(); StringSerializer stringSerializer = new StringSerializer(); TopologyBuilder topologyBuilder = new TopologyBuilder(); topologyBuilder.addSource("SOURCE", stringDeserializer, purchaseJsonDeserializer, "src-topic") .addProcessor("PROCESS", CreditCardAnonymizer::new, "SOURCE") .addProcessor("PROCESS2", PurchasePatterns::new, "PROCESS") .addProcessor("PROCESS3", CustomerRewards::new, "PROCESS") .addSink("SINK", "patterns", stringSerializer, purchasePatternJsonSerializer, "PROCESS2") .addSink("SINK2", "rewards",stringSerializer, rewardAccumulatorJsonSerializer, "PROCESS3") .addSink("SINK3", "purchases", stringSerializer, purchaseJsonSerializer, "PROCESS"); System.out.println("Starting PurchaseProcessor Example"); KafkaStreams streaming = new KafkaStreams(topologyBuilder, streamingConfig); streaming.start(); System.out.println("Now started PurchaseProcessor Example"); }
Example #25
Source File: OrderService.java From qcon-microservices with Apache License 2.0 | 5 votes |
private KafkaStreams startKStreams(String configFile, String stateDir) throws IOException { KafkaStreams streams = new KafkaStreams( createOrdersMaterializedView().build(), configStreams(configFile, stateDir, SERVICE_APP_ID)); streams.start(); return streams; }
Example #26
Source File: ITKafkaStreamsTracing.java From brave with Apache License 2.0 | 5 votes |
@Test public void should_create_spans_from_stream_with_tracing_peek() { String inputTopic = testName.getMethodName() + "-input"; String outputTopic = testName.getMethodName() + "-output"; long now = System.currentTimeMillis(); StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String())) .transformValues(kafkaStreamsTracing.peek("peek-1", (key, value) -> { try { Thread.sleep(100L); } catch (InterruptedException e) { e.printStackTrace(); } tracing.tracer().currentSpan().annotate(now, "test"); })) .to(outputTopic, Produced.with(Serdes.String(), Serdes.String())); Topology topology = builder.build(); KafkaStreams streams = buildKafkaStreams(topology); send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE)); waitForStreamToRun(streams); MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER); assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic); MutableSpan spanProcessor = testSpanHandler.takeLocalSpan(); assertChildOf(spanProcessor, spanInput); assertThat(spanProcessor.annotations()).contains(entry(now, "test")); MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER); assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic); assertChildOf(spanOutput, spanProcessor); streams.close(); streams.cleanUp(); }
Example #27
Source File: RunningAverage.java From kafka-tutorials with Apache License 2.0 | 5 votes |
private void run() { Properties envProps = this.loadEnvProperties(); Properties streamProps = this.buildStreamsProperties(envProps); Topology topology = this.buildTopology(new StreamsBuilder(), envProps); this.createTopics(envProps); final KafkaStreams streams = new KafkaStreams(topology, streamProps); final CountDownLatch latch = new CountDownLatch(1); // Attach shutdown handler to catch Control-C. Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { @Override public void run() { streams.close(Duration.ofSeconds(5)); latch.countDown(); } }); try { streams.cleanUp(); streams.start(); latch.await(); } catch (Throwable e) { System.exit(1); } System.exit(0); }
Example #28
Source File: AggregatingSum.java From kafka-tutorials with Apache License 2.0 | 5 votes |
private void runRecipe(final String configPath) throws IOException { Properties envProps = this.loadEnvProperties(configPath); Properties streamProps = this.buildStreamsProperties(envProps); Topology topology = this.buildTopology(envProps, this.ticketSaleSerde(envProps)); this.createTopics(envProps); final KafkaStreams streams = new KafkaStreams(topology, streamProps); final CountDownLatch latch = new CountDownLatch(1); // Attach shutdown handler to catch Control-C. Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { @Override public void run() { streams.close(); latch.countDown(); } }); try { streams.start(); latch.await(); } catch (Throwable e) { System.exit(1); } System.exit(0); }
Example #29
Source File: ProcessStreamService.java From SkaETL with Apache License 2.0 | 5 votes |
public void createStreamSystemOut(String inputTopic) { StreamsBuilder builder = new StreamsBuilder(); builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde())).process(() -> new LoggingProcessor<>()); KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(getProcessConsumer().getIdProcess() + ProcessConstants.SYSOUT_PROCESS, getBootstrapServer())); Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); streams.start(); addStreams(streams); }
Example #30
Source File: KafkaStreamsMetricsTest.java From micrometer with Apache License 2.0 | 5 votes |
@Test void shouldCreateMetersWithTags() { try (KafkaStreams kafkaStreams = createStreams()) { metrics = new KafkaStreamsMetrics(kafkaStreams, tags); MeterRegistry registry = new SimpleMeterRegistry(); metrics.bindTo(registry); assertThat(registry.getMeters()) .hasSizeGreaterThan(0) .extracting(meter -> meter.getId().getTag("app")) .allMatch(s -> s.equals("myapp")); } }