org.apache.kafka.streams.Topology Java Examples

The following examples show how to use org.apache.kafka.streams.Topology. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: UserClicksPerMinute.java    From fluent-kafka-streams-tests with MIT License 8 votes vote down vote up
public Topology getTopology() {
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<Integer, ClickEvent> clickEvents = builder.stream(this.inputTopic);

    final KTable<Windowed<Integer>, Long> counts = clickEvents
            .groupByKey()
            .windowedBy(TimeWindows.of(Duration.ofMinutes(1)))
            .count();

    counts.toStream()
            .map((key, value) -> KeyValue.pair(
                    key.key(),
                    new ClickOutput(key.key(), value, key.window().start())))
            .to(this.outputTopic, Produced.with(Serdes.Integer(), new JsonSerde<>(ClickOutput.class)));

    return builder.build();
}
 
Example #2
Source File: FkJoinTableToTable.java    From kafka-tutorials with Apache License 2.0 7 votes vote down vote up
public Topology buildTopology(Properties envProps) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String albumTopic = envProps.getProperty("album.topic.name");
    final String userTrackPurchaseTopic = envProps.getProperty("tracks.purchase.topic.name");
    final String musicInterestTopic = envProps.getProperty("music.interest.topic.name");

    final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true);
    final Serde<MusicInterest> musicInterestSerde = getSpecificAvroSerde(envProps);
    final Serde<Album> albumSerde = getSpecificAvroSerde(envProps);
    final Serde<TrackPurchase> trackPurchaseSerde = getSpecificAvroSerde(envProps);

    final KTable<Long, Album> albums = builder.table(albumTopic, Consumed.with(longSerde, albumSerde));

    final KTable<Long, TrackPurchase> trackPurchases = builder.table(userTrackPurchaseTopic, Consumed.with(longSerde, trackPurchaseSerde));
    final MusicInterestJoiner trackJoiner = new MusicInterestJoiner();

    final KTable<Long, MusicInterest> musicInterestTable = trackPurchases.join(albums,
                                                                         TrackPurchase::getAlbumId,
                                                                         trackJoiner);

    musicInterestTable.toStream().to(musicInterestTopic, Produced.with(longSerde, musicInterestSerde));

    return builder.build();
}
 
Example #3
Source File: KStreamsTopologyDescriptionParserTest.java    From netbeans-mmd-plugin with Apache License 2.0 7 votes vote down vote up
@Test
public void testFromTopology() {
  final Topology topology = new Topology();
  topology.addSource("SomeSource", "topic1", "topic2");
  topology.addProcessor("Processor1", () -> new FakeProcessor(), "SomeSource");
  topology.addProcessor("Processor2", () -> new FakeProcessor(), "Processor1");
  topology.addProcessor("Processor3", () -> new FakeProcessor(), "Processor2");
  topology.addSink("TheSink", "FinalTopic", "Processor3");

  final String src = topology.describe().toString();
  System.out.println(src);

  final KStreamsTopologyDescriptionParser graph = new KStreamsTopologyDescriptionParser(src);
  System.out.println(graph);

  assertEquals(5, graph.size());
}
 
Example #4
Source File: WordCount.java    From fluent-kafka-streams-tests with MIT License 6 votes vote down vote up
public Topology getTopology() {
    final Serde<String> stringSerde = Serdes.String();
    final Serde<Long> longSerde = Serdes.Long();

    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> textLines = builder.stream(this.inputTopic);

    final Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS);
    final KTable<String, Long> wordCounts = textLines
            .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase())))
            .groupBy((key, word) -> word)
            .count();

    wordCounts.toStream().to(this.outputTopic, Produced.with(stringSerde, longSerde));
    return builder.build();
}
 
Example #5
Source File: StockPerformanceStreamsProcessorTopologyTest.java    From kafka-streams-in-action with Apache License 2.0 6 votes vote down vote up
@BeforeEach
public void setUp() {
    Properties props = new Properties();
    props.put(StreamsConfig.CLIENT_ID_CONFIG, "ks-papi-stock-analysis-client");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "ks-papi-stock-analysis-group");
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "ks-stock-analysis-appid");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 1);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());


    StreamsConfig streamsConfig = new StreamsConfig(props);

    Topology topology = StockPerformanceStreamsProcessorTopology.build();

    topologyTestDriver = new ProcessorTopologyTestDriver(streamsConfig, topology);
}
 
Example #6
Source File: CampaignPerformanceApp.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 6 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(StreamsConfig.APPLICATION_ID_CONFIG,
        AppConfigs.applicationID);
    properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
        AppConfigs.bootstrapServers);
    properties.put(StreamsConfig.STATE_DIR_CONFIG,
        AppConfigs.stateStoreLocation);

    StreamsBuilder builder = new StreamsBuilder();
    AppTopology.withBuilder(builder);

    builder.stream(
        AppConfigs.outputTopic,
        Consumed.with(AppSerdes.String(), AppSerdes.CampaignPerformance())
    ).foreach((k, v) -> logger.info("outside = " + v));

    Topology topology = builder.build();

    KafkaStreams streams = new KafkaStreams(topology, properties);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
Example #7
Source File: AggregatingSum.java    From kafka-tutorials with Apache License 2.0 6 votes vote down vote up
public Topology buildTopology(Properties envProps,
                              final SpecificAvroSerde<TicketSale> ticketSaleSerde) {
  final StreamsBuilder builder = new StreamsBuilder();

  final String inputTopic = envProps.getProperty("input.topic.name");
  final String outputTopic = envProps.getProperty("output.topic.name");

  builder.stream(inputTopic, Consumed.with(Serdes.String(), ticketSaleSerde))
      // Set key to title and value to ticket value
      .map((k, v) -> new KeyValue<>((String) v.getTitle(), (Integer) v.getTicketTotalValue()))
      // Group by title
      .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer()))
      // Apply SUM aggregation
      .reduce(Integer::sum)
      // Write to stream specified by outputTopic
      .toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Integer()));

  return builder.build();
}
 
Example #8
Source File: KafkaStreamsStreamListenerSetupMethodOrchestrator.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
private KStream<?, ?> getkStream(String inboundName,
								KafkaStreamsStateStoreProperties storeSpec,
								BindingProperties bindingProperties,
								KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder,
								Serde<?> keySerde, Serde<?> valueSerde,
								Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
	if (storeSpec != null) {
		StoreBuilder storeBuilder = buildStateStore(storeSpec);
		streamsBuilder.addStateStore(storeBuilder);
		if (LOG.isInfoEnabled()) {
			LOG.info("state store " + storeBuilder.name() + " added to topology");
		}
	}
	return getKStream(inboundName, bindingProperties, kafkaStreamsConsumerProperties, streamsBuilder,
			keySerde, valueSerde, autoOffsetReset, firstBuild);
}
 
Example #9
Source File: AbstractKafkaStreamsBinderProcessor.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
protected Topology.AutoOffsetReset getAutoOffsetReset(String inboundName, KafkaStreamsConsumerProperties extendedConsumerProperties) {
	final KafkaConsumerProperties.StartOffset startOffset = extendedConsumerProperties
			.getStartOffset();
	Topology.AutoOffsetReset autoOffsetReset = null;
	if (startOffset != null) {
		switch (startOffset) {
			case earliest:
				autoOffsetReset = Topology.AutoOffsetReset.EARLIEST;
				break;
			case latest:
				autoOffsetReset = Topology.AutoOffsetReset.LATEST;
				break;
			default:
				break;
		}
	}
	if (extendedConsumerProperties.isResetOffsets()) {
		AbstractKafkaStreamsBinderProcessor.LOG.warn("Detected resetOffsets configured on binding "
				+ inboundName + ". "
				+ "Setting resetOffsets in Kafka Streams binder does not have any effect.");
	}
	return autoOffsetReset;
}
 
Example #10
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 6 votes vote down vote up
@Test
public void should_create_span_from_stream_input_topic_using_kafka_client_supplier() {
  String inputTopic = testName.getMethodName() + "-input";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic).foreach((k, v) -> {
  });
  Topology topology = builder.build();

  KafkaStreams streams =
    new KafkaStreams(topology, streamsProperties(), kafkaStreamsTracing.kafkaClientSupplier());

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  streams.close();
  streams.cleanUp();
}
 
Example #11
Source File: SerializationTutorialTest.java    From kafka-tutorials with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldChangeSerializationFormat() throws IOException {
  SerializationTutorial tutorial = new SerializationTutorial();
  final Properties envProps = tutorial.loadEnvProperties(TEST_CONFIG_FILE);
  final Properties streamsProps = tutorial.buildStreamsProperties(envProps);

  String inputTopicName = envProps.getProperty("input.avro.movies.topic.name");
  String outputTopicName = envProps.getProperty("output.proto.movies.topic.name");

  final SpecificAvroSerde<Movie> avroSerde = tutorial.movieAvroSerde(envProps);
  final KafkaProtobufSerde<MovieProtos.Movie> protobufSerde = tutorial.movieProtobufSerde(envProps);

  Topology topology = tutorial.buildTopology(envProps, avroSerde, protobufSerde);
  TopologyTestDriver testDriver = new TopologyTestDriver(topology, streamsProps);

  testDriver
      .createInputTopic(inputTopicName, Long().serializer(), avroSerde.serializer())
      .pipeValueList(this.prepareInputFixture());

  final List<MovieProtos.Movie> moviesProto =
      testDriver.createOutputTopic(outputTopicName, Long().deserializer(), protobufSerde.deserializer())
          .readValuesToList();

  assertThat(moviesProto, equalTo(expectedMovies()));
}
 
Example #12
Source File: KafkaStreamsTopologyManager.java    From quarkus with Apache License 2.0 6 votes vote down vote up
@Inject
public KafkaStreamsTopologyManager(Instance<Topology> topology, Instance<KafkaClientSupplier> kafkaClientSupplier,
        Instance<StateListener> stateListener, Instance<StateRestoreListener> globalStateRestoreListener) {
    // No producer for Topology -> nothing to do
    if (topology.isUnsatisfied()) {
        LOGGER.debug("No Topology producer; Kafka Streams will not be started");
        this.executor = null;
        return;
    }

    this.executor = Executors.newSingleThreadExecutor();
    this.topology = topology;
    this.kafkaClientSupplier = kafkaClientSupplier;
    this.stateListener = stateListener;
    this.globalStateRestoreListener = globalStateRestoreListener;
}
 
Example #13
Source File: MergeStreams.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (args.length < 1) {
        throw new IllegalArgumentException("This program takes one argument: the path to an environment configuration file.");
    }

    MergeStreams ms = new MergeStreams();
    Properties envProps = ms.loadEnvProperties(args[0]);
    Properties streamProps = ms.buildStreamsProperties(envProps);
    Topology topology = ms.buildTopology(envProps);

    ms.createTopics(envProps);

    final KafkaStreams streams = new KafkaStreams(topology, streamProps);
    final CountDownLatch latch = new CountDownLatch(1);

    // Attach shutdown handler to catch Control-C.
    Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
        @Override
        public void run() {
            streams.close();
            latch.countDown();
        }
    });

    try {
        streams.start();
        latch.await();
    } catch (Throwable e) {
        System.exit(1);
    }
    System.exit(0);
}
 
Example #14
Source File: MergeStreams.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
public Topology buildTopology(Properties envProps) {
    final StreamsBuilder builder = new StreamsBuilder();

    final String rockTopic = envProps.getProperty("input.rock.topic.name");
    final String classicalTopic = envProps.getProperty("input.classical.topic.name");
    final String allGenresTopic = envProps.getProperty("output.topic.name");

    KStream<String, SongEvent> rockSongs = builder.stream(rockTopic);
    KStream<String, SongEvent> classicalSongs = builder.stream(classicalTopic);
    KStream<String, SongEvent> allSongs = rockSongs.merge(classicalSongs);

    allSongs.to(allGenresTopic);
    return builder.build();
}
 
Example #15
Source File: KafkaStreamsLiveTest.java    From tutorials with MIT License 5 votes vote down vote up
@Test
@Ignore("it needs to have kafka broker running on local")
public void shouldTestKafkaStreams() throws InterruptedException {
    // given
    String inputTopic = "inputTopic";

    Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-live-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());

    // when
    StreamsBuilder builder = new StreamsBuilder();
    KStream<String, String> textLines = builder.stream(inputTopic);
    Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS);

    KTable<String, Long> wordCounts = textLines.flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))).groupBy((key, word) -> word).count();

    textLines.foreach((word, count) -> System.out.println("word: " + word + " -> " + count));

    String outputTopic = "outputTopic";
    final Serde<String> stringSerde = Serdes.String();
    final Serde<String> longSerde = Serdes.String();
    textLines.to(outputTopic, Produced.with(stringSerde,longSerde));

    KafkaStreams streams = new KafkaStreams(new Topology(), streamsConfiguration);
    streams.start();

    // then
    Thread.sleep(30000);
    streams.close();
}
 
Example #16
Source File: DynamicOutputTopic.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
public Topology buildTopology(Properties envProps) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String orderInputTopic = envProps.getProperty("input.topic.name");
    final String orderOutputTopic = envProps.getProperty("output.topic.name");
    final String specialOrderOutput = envProps.getProperty("special.order.topic.name");

    final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true);
    final Serde<Order> orderSerde = getSpecificAvroSerde(envProps);
    final Serde<CompletedOrder> completedOrderSerde = getSpecificAvroSerde(envProps);

    final ValueMapper<Order, CompletedOrder> orderProcessingSimulator = v -> {
       double amount = v.getQuantity() * FAKE_PRICE;
       return CompletedOrder.newBuilder().setAmount(amount).setId(v.getId() + "-" + v.getSku()).setName(v.getName()).build();
    };

    final TopicNameExtractor<Long, CompletedOrder> orderTopicNameExtractor = (key, completedOrder, recordContext) -> {
          final String compositeId = completedOrder.getId();
          final String skuPart = compositeId.substring(compositeId.indexOf('-') + 1, 5);
          final String outTopic;
          if (skuPart.equals("QUA")) {
              outTopic = specialOrderOutput;
          } else {
              outTopic = orderOutputTopic;
          }
          return outTopic;
    };

    final KStream<Long, Order> exampleStream = builder.stream(orderInputTopic, Consumed.with(longSerde, orderSerde));

    exampleStream.mapValues(orderProcessingSimulator).to(orderTopicNameExtractor, Produced.with(longSerde, completedOrderSerde));

    return builder.build();
}
 
Example #17
Source File: DomainUpdater.java    From football-events with MIT License 5 votes vote down vote up
public void init(Topology topology) {
    addProcessor(topology, PlayerStartedCareer.class, (eventId, event, store) -> {
        Player player = new Player(event.getPlayerId(), event.getName());
        store.put(player.getId(), player);
    }, PLAYER_STORE);

    addStore(topology, Player.class, PLAYER_STORE, PlayerStartedCareer.class);
}
 
Example #18
Source File: TopViewApplication.java    From football-events with MIT License 5 votes vote down vote up
@Bean
public KafkaStreams kafkaStreams() {
    StreamsBuilder streamsBuilder = new StreamsBuilder();
    new TopScorersBuilder(streamsBuilder).build();
    Topology topology = streamsBuilder.build();
    KafkaStreamsStarter starter = new KafkaStreamsStarter(kafkaBootstrapAddress, topology, APP_ID);
    starter.setKafkaTimeout(kafkaTimeout);
    starter.setStreamsStartupTimeout(streamsStartupTimeout);
    return starter.start();
}
 
Example #19
Source File: NamingChangelogAndRepartitionTopicsTest.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldUpdateNamesOfStoresAndRepartitionTopics() {

    envProps.put("add.filter", "false");
    Topology topology = namingChangelogAndRepartitionTopics.buildTopology(envProps);

    final String firstTopologyNoFilter = topology.describe().toString();

    // Names of auto-generated state store and repartition topic in original topology
    final String initialStateStoreName = "KSTREAM-AGGREGATE-STATE-STORE-0000000002";
    final String initialAggregationRepartition = "KSTREAM-AGGREGATE-STATE-STORE-0000000002-repartition";

    // Names of auto-generated state store and repartition topic after adding an operator upstream
    // notice that the number in the names is incremented by 1 reflecting the addition of a
    // new operation
    final String stateStoreNameWithFilterAdded = "KSTREAM-AGGREGATE-STATE-STORE-0000000003";
    final String aggregationRepartitionWithFilterAdded = "KSTREAM-AGGREGATE-STATE-STORE-0000000003-repartition";

    assertThat(firstTopologyNoFilter.indexOf(initialStateStoreName), greaterThan(0));
    assertThat(firstTopologyNoFilter.indexOf(initialAggregationRepartition), greaterThan(0));
    assertThat(firstTopologyNoFilter.indexOf(stateStoreNameWithFilterAdded), is(-1));
    assertThat(firstTopologyNoFilter.indexOf(aggregationRepartitionWithFilterAdded), is(-1));

    envProps.put("add.filter", "true");
    topology = namingChangelogAndRepartitionTopics.buildTopology(envProps);
    final String topologyWithFilter = topology.describe().toString();

    assertThat(topologyWithFilter.indexOf(initialStateStoreName), is(-1));
    assertThat(topologyWithFilter.indexOf(initialAggregationRepartition), is(-1));
    assertThat(topologyWithFilter.indexOf(stateStoreNameWithFilterAdded), greaterThan(0));
    assertThat(topologyWithFilter.indexOf(aggregationRepartitionWithFilterAdded), greaterThan(0));
}
 
Example #20
Source File: PlayerCommandConnectorTest.java    From football-events with MIT License 5 votes vote down vote up
@Before
public void setUp() {
    tester = new StreamsTester(getClass().getName());

    StreamsBuilder streamsBuilder = new StreamsBuilder();

    producer = new PlayerCommandConnector(new EventPublisher(null, getClass().getSimpleName(), 1));
    producer.build(streamsBuilder);

    Topology topology = streamsBuilder.build();
    tester.setUp(topology);
}
 
Example #21
Source File: FilterEvents.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
public Topology buildTopology(Properties envProps,
                              final SpecificAvroSerde<Publication> publicationSerde) {
  final StreamsBuilder builder = new StreamsBuilder();

  final String inputTopic = envProps.getProperty("input.topic.name");
  final String outputTopic = envProps.getProperty("output.topic.name");

  builder.stream(inputTopic, Consumed.with(Serdes.String(), publicationSerde))
      .filter((name, publication) -> "George R. R. Martin".equals(publication.getName()))
      .to(outputTopic, Produced.with(Serdes.String(), publicationSerde));

  return builder.build();
}
 
Example #22
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_with_tracing_processor() {
  ProcessorSupplier<String, String> processorSupplier =
    kafkaStreamsTracing.processor(
      "forward-1", () ->
        new AbstractProcessor<String, String>() {
          @Override
          public void process(String key, String value) {
            try {
              Thread.sleep(100L);
            } catch (InterruptedException e) {
              e.printStackTrace();
            }
          }
        });

  String inputTopic = testName.getMethodName() + "-input";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
    .process(processorSupplier);
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreams(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  MutableSpan spanProcessor = testSpanHandler.takeLocalSpan();
  assertChildOf(spanProcessor, spanInput);

  streams.close();
  streams.cleanUp();
}
 
Example #23
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_with_tracing_mark_as_not_filtered_predicate_true() {
  String inputTopic = testName.getMethodName() + "-input";
  String outputTopic = testName.getMethodName() + "-output";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
    .transformValues(kafkaStreamsTracing.markAsNotFiltered("filterNot-1", (key, value) -> true))
    .filterNot((k, v) -> Objects.isNull(v))
    .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreams(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  MutableSpan spanProcessor = testSpanHandler.takeLocalSpan();
  assertChildOf(spanProcessor, spanInput);
  assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "true");

  // the filterNot transformer returns true so record is dropped

  streams.close();
  streams.cleanUp();
}
 
Example #24
Source File: AggregatingCount.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
private void runRecipe(final String configPath) throws IOException {
  Properties envProps = this.loadEnvProperties(configPath);
  Properties streamProps = this.buildStreamsProperties(envProps);

  Topology topology = this.buildTopology(envProps, this.ticketSaleSerde(envProps));
  this.createTopics(envProps);

  final KafkaStreams streams = new KafkaStreams(topology, streamProps);
  final CountDownLatch latch = new CountDownLatch(1);

  // Attach shutdown handler to catch Control-C.
  Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
    @Override
    public void run() {
      streams.close();
      latch.countDown();
    }
  });

  try {
    streams.start();
    latch.await();
  } catch (Throwable e) {
    System.exit(1);
  }
  System.exit(0);

}
 
Example #25
Source File: MegabusRefResolver.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Override
protected Topology topology() {
    StreamsBuilder streamsBuilder = new StreamsBuilder();

    // merge the ref stream with the ref-retry stream. They must be merged into a single stream for ordering purposes
    final KStream<String, List<MegabusRef>> refStream = streamsBuilder.stream(_megabusRefTopic.getName(), Consumed.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<List<MegabusRef>>() {})))
            .merge(streamsBuilder.stream(_retryRefTopic.getName(), Consumed.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<List<MegabusRef>>() {}))));

    // resolve refs into documents
    KStream<String, ResolutionResult> resolutionResults = refStream.mapValues(value -> {
        try {
            return resolveRefs(value);
        } catch (Throwable t) {
            _errorProcessingMeter.mark();
            throw t;
        }
    });

    resolutionResults
            // extract the resolved documents
            .flatMap((key, value) -> value.getKeyedResolvedDocs())
            // convert deleted documents to null
            .mapValues(doc -> Optional.ofNullable(doc).map(Intrinsic::isDeleted).orElse(true) ? null : doc)
            // send to megabus
            .to(_megabusResolvedTopic.getName(), Produced.with(Serdes.String(), new JsonPOJOSerde<>(new TypeReference<Map<String, Object>>() {})));

    resolutionResults
            // filter out all resolution results without missing refs
            .filterNot((key, result) -> result.getMissingRefs().isEmpty())
            // add timestamp for missing refs
            .mapValues(result -> new MissingRefCollection(result.getMissingRefs(), Date.from(_clock.instant())))
            // send to missing topic
            .to(_missingRefTopic.getName(), Produced.with(Serdes.String(), new JsonPOJOSerde<>(MissingRefCollection.class)));
    return streamsBuilder.build();
}
 
Example #26
Source File: RunningAverage.java    From kafka-tutorials with Apache License 2.0 5 votes vote down vote up
private void run() {

    Properties envProps = this.loadEnvProperties();
    Properties streamProps = this.buildStreamsProperties(envProps);
    Topology topology = this.buildTopology(new StreamsBuilder(), envProps);

    this.createTopics(envProps);

    final KafkaStreams streams = new KafkaStreams(topology, streamProps);
    final CountDownLatch latch = new CountDownLatch(1);

    // Attach shutdown handler to catch Control-C.
    Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
      @Override
      public void run() {
        streams.close(Duration.ofSeconds(5));
        latch.countDown();
      }
    });

    try {
      streams.cleanUp();
      streams.start();
      latch.await();
    } catch (Throwable e) {
      System.exit(1);
    }
    System.exit(0);
  }
 
Example #27
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_without_tracing_and_tracing_processor() {
  ProcessorSupplier<String, String> processorSupplier =
    kafkaStreamsTracing.processor(
      "forward-1", () ->
        new AbstractProcessor<String, String>() {
          @Override
          public void process(String key, String value) {
            try {
              Thread.sleep(100L);
            } catch (InterruptedException e) {
              e.printStackTrace();
            }
          }
        });

  String inputTopic = testName.getMethodName() + "-input";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
    .process(processorSupplier);
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreamsWithoutTracing(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  assertThat(testSpanHandler.takeLocalSpan().tags())
    .containsOnlyKeys("kafka.streams.application.id", "kafka.streams.task.id");

  streams.close();
  streams.cleanUp();
}
 
Example #28
Source File: MatchApplication.java    From football-events with MIT License 5 votes vote down vote up
@Bean
public KafkaStreams kafkaStreams() {
    StreamsBuilder streamsBuilder = new StreamsBuilder();
    DomainUpdater snapshotBuilder = new DomainUpdater(leagueRepository());
    Topology topology = streamsBuilder.build();
    snapshotBuilder.init(topology);
    KafkaStreamsStarter starter = new KafkaStreamsStarter(kafkaBootstrapAddress, topology, APP_ID);
    starter.setKafkaTimeout(kafkaTimeout);
    starter.setStreamsStartupTimeout(streamsStartupTimeout);
    return starter.start();
}
 
Example #29
Source File: BasicViewApplication.java    From football-events with MIT License 5 votes vote down vote up
@Bean
public KafkaStreams kafkaStreams() {
    StreamsBuilder streamsBuilder = new StreamsBuilder();
    new StatisticsBuilder(streamsBuilder).build();
    Topology topology = streamsBuilder.build();
    KafkaStreamsStarter starter = new KafkaStreamsStarter(kafkaBootstrapAddress, topology, APP_ID);
    starter.setKafkaTimeout(kafkaTimeout);
    starter.setStreamsStartupTimeout(streamsStartupTimeout);
    return starter.start();
}
 
Example #30
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
Properties streamsProperties() {
  Properties properties = new Properties();
  properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
    kafka.helper().consumerConfig().getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
  properties.put(StreamsConfig.STATE_DIR_CONFIG, "target/kafka-streams");
  properties.put(StreamsConfig.APPLICATION_ID_CONFIG, testName.getMethodName());
  properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG),
    Topology.AutoOffsetReset.EARLIEST.name().toLowerCase());
  return properties;
}