Java Code Examples for org.apache.kafka.streams.StreamsBuilder

The following examples show how to use org.apache.kafka.streams.StreamsBuilder. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: kafka-tutorials   Source File: FkJoinTableToTable.java    License: Apache License 2.0 7 votes vote down vote up
public Topology buildTopology(Properties envProps) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String albumTopic = envProps.getProperty("album.topic.name");
    final String userTrackPurchaseTopic = envProps.getProperty("tracks.purchase.topic.name");
    final String musicInterestTopic = envProps.getProperty("music.interest.topic.name");

    final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true);
    final Serde<MusicInterest> musicInterestSerde = getSpecificAvroSerde(envProps);
    final Serde<Album> albumSerde = getSpecificAvroSerde(envProps);
    final Serde<TrackPurchase> trackPurchaseSerde = getSpecificAvroSerde(envProps);

    final KTable<Long, Album> albums = builder.table(albumTopic, Consumed.with(longSerde, albumSerde));

    final KTable<Long, TrackPurchase> trackPurchases = builder.table(userTrackPurchaseTopic, Consumed.with(longSerde, trackPurchaseSerde));
    final MusicInterestJoiner trackJoiner = new MusicInterestJoiner();

    final KTable<Long, MusicInterest> musicInterestTable = trackPurchases.join(albums,
                                                                         TrackPurchase::getAlbumId,
                                                                         trackJoiner);

    musicInterestTable.toStream().to(musicInterestTopic, Produced.with(longSerde, musicInterestSerde));

    return builder.build();
}
 
Example 2
Source Project: fluent-kafka-streams-tests   Source File: NameJoinGlobalKTable.java    License: MIT License 6 votes vote down vote up
public Topology getTopology() {
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<Long, Long> inputStream =
            builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Long(), Serdes.Long()));

    final GlobalKTable<Long, String> joinTable = builder.globalTable(NAME_INPUT);

    inputStream
            .join(joinTable,
                    (id, valueId) -> valueId,
                    (id, name) -> name)
            .to(OUTPUT_TOPIC, Produced.with(Serdes.Long(), Serdes.String()));

    return builder.build();
}
 
Example 3
Source Project: kafka-tutorials   Source File: AggregatingCount.java    License: Apache License 2.0 6 votes vote down vote up
public Topology buildTopology(Properties envProps,
                              final SpecificAvroSerde<TicketSale> ticketSaleSerde) {
  final StreamsBuilder builder = new StreamsBuilder();

  final String inputTopic = envProps.getProperty("input.topic.name");
  final String outputTopic = envProps.getProperty("output.topic.name");

  builder.stream(inputTopic, Consumed.with(Serdes.String(), ticketSaleSerde))
      // Set key to title and value to ticket value
      .map((k, v) -> new KeyValue<>((String) v.getTitle(), (Integer) v.getTicketTotalValue()))
      // Group by title
      .groupByKey(Grouped.with(Serdes.String(), Serdes.Integer()))
      // Apply COUNT method
      .count()
      // Write to stream specified by outputTopic
      .toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));

  return builder.build();
}
 
Example 4
private KStream<?, ?> getkStream(String inboundName,
								KafkaStreamsStateStoreProperties storeSpec,
								BindingProperties bindingProperties,
								KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties, StreamsBuilder streamsBuilder,
								Serde<?> keySerde, Serde<?> valueSerde,
								Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
	if (storeSpec != null) {
		StoreBuilder storeBuilder = buildStateStore(storeSpec);
		streamsBuilder.addStateStore(storeBuilder);
		if (LOG.isInfoEnabled()) {
			LOG.info("state store " + storeBuilder.name() + " added to topology");
		}
	}
	return getKStream(inboundName, bindingProperties, kafkaStreamsConsumerProperties, streamsBuilder,
			keySerde, valueSerde, autoOffsetReset, firstBuild);
}
 
Example 5
@Override
public void start() throws Exception {
    Predicate<String, EventEnvelope> inventoryItemCreated = (k, v) -> k.equals(InventoryItemCreated.class.getSimpleName());
    Predicate<String, EventEnvelope> inventoryItemRenamed =  (k, v) -> k.equals(InventoryItemRenamed.class.getSimpleName());
    Predicate<String, EventEnvelope> inventoryItemDeactivated = (k, v) -> k.equals(InventoryItemDeactivated.class.getSimpleName());

    StreamsBuilder builder = new StreamsBuilder();

    KStream<String, EventEnvelope>[] filteredStreams = builder
            .stream(INVENTORY_ITEM_TOPIC, Consumed.with(Serdes.String(), initializeEnvelopeSerde()))
            .selectKey((k, v) -> v.eventType)
            .branch(inventoryItemCreated, inventoryItemRenamed, inventoryItemDeactivated);

    filteredStreams[0].process(InventoryItemCreatedHandler::new);
    filteredStreams[1].process(InventoryItemRenamedHandler::new);
    filteredStreams[2].process(InventoryItemDeactivatedHandler::new);

    kafkaStreams = new KafkaStreams(builder.build(), getProperties());
    kafkaStreams.cleanUp(); // -- only because we are using in-memory
    kafkaStreams.start();
}
 
Example 6
private KTable createKTable(
    StreamsBuilder builder, final Topology.AutoOffsetReset autoOffsetReset,
    final KsqlTable ksqlTable,
    final Serde<GenericRow> genericRowSerde,
    final Serde<GenericRow> genericRowSerdeAfterRead
) {
  if (ksqlTable.isWindowed()) {
    return table(
        builder.stream(
            ksqlTable.getKsqlTopic().getKafkaTopicName(),
            Consumed.with(windowedSerde, genericRowSerde).withOffsetResetPolicy(autoOffsetReset)
        ).mapValues(windowedMapper).transformValues(new AddTimestampColumn()),
        windowedSerde,
        genericRowSerdeAfterRead
    );
  } else {
    return table(
        builder.stream(
            ksqlTable.getKsqlTopic().getKafkaTopicName(),
            Consumed.with(Serdes.String(), genericRowSerde).withOffsetResetPolicy(autoOffsetReset)
        ).mapValues(nonWindowedValueMapper).transformValues(new AddTimestampColumn()),
        Serdes.String(),
        genericRowSerdeAfterRead
    );
  }
}
 
Example 7
Source Project: kafka-graphs   Source File: GraphAlgorithmHandler.java    License: Apache License 2.0 6 votes vote down vote up
public Mono<ServerResponse> configure(ServerRequest request) {
    List<String> appIdHeaders = request.headers().header(X_KGRAPH_APPID);
    String appId = appIdHeaders.isEmpty() ? ClientUtils.generateRandomHexString(8) : appIdHeaders.iterator().next();
    return request.bodyToMono(GraphAlgorithmCreateRequest.class)
        .doOnNext(input -> {
            PregelGraphAlgorithm<?, ?, ?, ?> algorithm = getAlgorithm(appId, input);
            StreamsBuilder builder = new StreamsBuilder();
            Properties streamsConfig = streamsConfig(
                appId, props.getBootstrapServers(),
                algorithm.serialized().keySerde(), algorithm.serialized().vertexValueSerde()
            );
            algorithm.configure(builder, streamsConfig);
            algorithms.put(appId, algorithm);
        })
        .flatMapMany(input -> proxyConfigure(appIdHeaders.isEmpty()
            ? group.getCurrentMembers().keySet() : Collections.emptySet(), appId, input))
        .then(ServerResponse.ok()
            .contentType(MediaType.APPLICATION_JSON)
            .body(Mono.just(new GraphAlgorithmId(appId)), GraphAlgorithmId.class));
}
 
Example 8
public PhysicalPlanBuilder(
    final StreamsBuilder builder,
    final KsqlConfig ksqlConfig,
    final KafkaTopicClient kafkaTopicClient,
    final FunctionRegistry functionRegistry,
    final Map<String, Object> overriddenStreamsProperties,
    final boolean updateMetastore,
    final MetaStore metaStore,
    final SchemaRegistryClient schemaRegistryClient,
    final KafkaStreamsBuilder kafkaStreamsBuilder
) {
  this.builder = builder;
  this.ksqlConfig = ksqlConfig;
  this.kafkaTopicClient = kafkaTopicClient;
  this.functionRegistry = functionRegistry;
  this.overriddenStreamsProperties = overriddenStreamsProperties;
  this.metaStore = metaStore;
  this.updateMetastore = updateMetastore;
  this.schemaRegistryClient = schemaRegistryClient;
  this.kafkaStreamsBuilder = kafkaStreamsBuilder;
}
 
Example 9
public PhysicalPlanBuilder(
    final StreamsBuilder builder,
    final KsqlConfig ksqlConfig,
    final KafkaTopicClient kafkaTopicClient,
    final FunctionRegistry functionRegistry,
    final Map<String, Object> overriddenStreamsProperties,
    final boolean updateMetastore,
    final MetaStore metaStore,
    final SchemaRegistryClient schemaRegistryClient
) {
  this(
      builder,
      ksqlConfig,
      kafkaTopicClient,
      functionRegistry,
      overriddenStreamsProperties,
      updateMetastore,
      metaStore,
      schemaRegistryClient,
      new KafkaStreamsBuilderImpl()
  );
}
 
Example 10
@Test
public void shouldCreateSinkWithCorrectCleanupPolicyWindowedTable() {
  KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class);
  KsqlStructuredDataOutputNode outputNode = getKsqlStructuredDataOutputNode(true);

  StreamsBuilder streamsBuilder = new StreamsBuilder();
  topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap());
  EasyMock.replay(topicClientForWindowTable);
  SchemaKStream schemaKStream = outputNode.buildStream(
      streamsBuilder,
      ksqlConfig,
      topicClientForWindowTable,
      new FunctionRegistry(),
      new HashMap<>(),
      new MockSchemaRegistryClient());
  assertThat(schemaKStream, instanceOf(SchemaKTable.class));
  EasyMock.verify();

}
 
Example 11
@Test
public void shouldCreateSinkWithCorrectCleanupPolicyStream() {
  KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class);

  StreamsBuilder streamsBuilder = new StreamsBuilder();
  topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap());
  EasyMock.replay(topicClientForWindowTable);
  SchemaKStream schemaKStream = outputNode.buildStream(
      streamsBuilder,
      ksqlConfig,
      topicClientForWindowTable,
      new FunctionRegistry(),
      new HashMap<>(),
      new MockSchemaRegistryClient());
  assertThat(schemaKStream, instanceOf(SchemaKStream.class));
  EasyMock.verify();

}
 
Example 12
Source Project: kafka-graphs   Source File: StreamUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static <K, V> KStream<K, V> streamFromCollection(
    StreamsBuilder builder,
    Properties props,
    String topic,
    int numPartitions,
    short replicationFactor,
    Serde<K> keySerde,
    Serde<V> valueSerde,
    Collection<KeyValue<K, V>> values) {

    ClientUtils.createTopic(topic, numPartitions, replicationFactor, props);
    try (Producer<K, V> producer = new KafkaProducer<>(props, keySerde.serializer(), valueSerde.serializer())) {
        for (KeyValue<K, V> value : values) {
            ProducerRecord<K, V> producerRecord = new ProducerRecord<>(topic, value.key, value.value);
            producer.send(producerRecord);
        }
        producer.flush();
    }
    return builder.stream(topic, Consumed.with(keySerde, valueSerde));
}
 
Example 13
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(StreamsConfig.APPLICATION_ID_CONFIG,
        AppConfigs.applicationID);
    properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
        AppConfigs.bootstrapServers);
    properties.put(StreamsConfig.STATE_DIR_CONFIG,
        AppConfigs.stateStoreLocation);

    StreamsBuilder builder = new StreamsBuilder();
    AppTopology.withBuilder(builder);

    builder.stream(
        AppConfigs.outputTopic,
        Consumed.with(AppSerdes.String(), AppSerdes.CampaignPerformance())
    ).foreach((k, v) -> logger.info("outside = " + v));

    Topology topology = builder.build();

    KafkaStreams streams = new KafkaStreams(topology, properties);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
Example 14
protected KStream<?, ?> getKStream(String inboundName, BindingProperties bindingProperties, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
		StreamsBuilder streamsBuilder, Serde<?> keySerde, Serde<?> valueSerde, Topology.AutoOffsetReset autoOffsetReset, boolean firstBuild) {
	if (firstBuild) {
		addStateStoreBeans(streamsBuilder);
	}

	KStream<?, ?> stream;
	if (this.kafkaStreamsExtendedBindingProperties
			.getExtendedConsumerProperties(inboundName).isDestinationIsPattern()) {
		final Pattern pattern = Pattern.compile(this.bindingServiceProperties.getBindingDestination(inboundName));
		stream = streamsBuilder.stream(pattern);
	}
	else {
		String[] bindingTargets = StringUtils.commaDelimitedListToStringArray(
				this.bindingServiceProperties.getBindingDestination(inboundName));
		final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
		stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
				consumed);
	}
	final boolean nativeDecoding = this.bindingServiceProperties
			.getConsumerProperties(inboundName).isUseNativeDecoding();
	if (nativeDecoding) {
		LOG.info("Native decoding is enabled for " + inboundName
				+ ". Inbound deserialization done at the broker.");
	}
	else {
		LOG.info("Native decoding is disabled for " + inboundName
				+ ". Inbound message conversion done by Spring Cloud Stream.");
	}

	return getkStream(bindingProperties, stream, nativeDecoding);
}
 
Example 15
@Override public Topology get() {
  StreamsBuilder builder = new StreamsBuilder();
  if (aggregationEnabled) {
    // Aggregate Spans to Traces
    KStream<String, List<Span>> tracesStream =
        builder.stream(spansTopic, Consumed.with(Serdes.String(), spansSerde))
            .groupByKey()
            // how long to wait for another span
            .windowedBy(SessionWindows.with(traceTimeout).grace(Duration.ZERO))
            .aggregate(ArrayList::new, aggregateSpans(), joinAggregates(),
                Materialized
                    .<String, List<Span>>as(
                        Stores.persistentSessionStore(TRACE_AGGREGATION_STORE,
                            Duration.ofDays(1)))
                    .withKeySerde(Serdes.String())
                    .withValueSerde(spansSerde)
                    .withLoggingDisabled()
                    .withCachingEnabled())
            // hold until a new record tells that a window is closed and we can process it further
            .suppress(untilWindowCloses(unbounded()))
            .toStream()
            .selectKey((windowed, spans) -> windowed.key());
    // Downstream to traces topic
    tracesStream.to(traceTopic, Produced.with(Serdes.String(), spansSerde));
    // Map to dependency links
    tracesStream.flatMapValues(spansToDependencyLinks())
        .selectKey((key, value) -> linkKey(value))
        .to(dependencyTopic, Produced.with(Serdes.String(), dependencyLinkSerde));
  }
  return builder.build();
}
 
Example 16
@Test
public void testSumOfOutNeighbors() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> verticesWithSumOfOutNeighborValues =
        graph.groupReduceOnNeighbors(new SumOutNeighbors(), EdgeDirection.OUT);

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, verticesWithSumOfOutNeighborValues);

    expectedResult = "1,5\n" +
        "2,3\n" +
        "3,9\n" +
        "4,5\n" +
        "5,1\n";

    compareResultAsTuples(result, expectedResult);
}
 
Example 17
Source Project: brave   Source File: ITKafkaStreamsTracing.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_with_tracing_filter_not_predicate_true() {
  String inputTopic = testName.getMethodName() + "-input";
  String outputTopic = testName.getMethodName() + "-output";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
    .transform(kafkaStreamsTracing.filterNot("filterNot-1", (key, value) -> true))
    .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreams(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  MutableSpan spanProcessor = testSpanHandler.takeLocalSpan();
  assertChildOf(spanProcessor, spanInput);
  assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "true");

  // the filterNot transformer returns true so record is dropped

  streams.close();
  streams.cleanUp();
}
 
Example 18
Source Project: kafka-graphs   Source File: ReduceOnEdgesMethodsITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testLowestWeightOutNeighborNoValue() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> verticesWithLowestOutNeighbor =
        graph.reduceOnEdges(new SelectMinWeightNeighborNoValue(), EdgeDirection.OUT);

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, verticesWithLowestOutNeighbor);

    expectedResult = "1,12\n" +
        "2,23\n" +
        "3,34\n" +
        "4,45\n" +
        "5,51\n";

    TestUtils.compareResultAsTuples(result, expectedResult);
}
 
Example 19
Source Project: kafka-tutorials   Source File: RunningAverage.java    License: Apache License 2.0 5 votes vote down vote up
private void run() {

    Properties envProps = this.loadEnvProperties();
    Properties streamProps = this.buildStreamsProperties(envProps);
    Topology topology = this.buildTopology(new StreamsBuilder(), envProps);

    this.createTopics(envProps);

    final KafkaStreams streams = new KafkaStreams(topology, streamProps);
    final CountDownLatch latch = new CountDownLatch(1);

    // Attach shutdown handler to catch Control-C.
    Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
      @Override
      public void run() {
        streams.close(Duration.ofSeconds(5));
        latch.countDown();
      }
    });

    try {
      streams.cleanUp();
      streams.start();
      latch.await();
    } catch (Throwable e) {
      System.exit(1);
    }
    System.exit(0);
  }
 
Example 20
Source Project: kafka-tutorials   Source File: RunningAverage.java    License: Apache License 2.0 5 votes vote down vote up
private Topology buildTopology(StreamsBuilder bldr,
                               Properties envProps) {

  final String ratingTopicName = envProps.getProperty("input.ratings.topic.name");
  final String avgRatingsTopicName = envProps.getProperty("output.rating-averages.topic.name");

  KStream<Long, Rating> ratingStream = bldr.stream(ratingTopicName,
                                                   Consumed.with(Serdes.Long(), getRatingSerde(envProps)));

  getRatingAverageTable(ratingStream, avgRatingsTopicName, getCountAndSumSerde(envProps));

  // finish the topology
  return bldr.build();
}
 
Example 21
Source Project: simplesource   Source File: TestDriverInitializer.java    License: Apache License 2.0 5 votes vote down vote up
TopologyTestDriver build(Consumer<StreamsBuilder> builderConsumer) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "test");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
    props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 0);
    props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);

    builderConsumer.accept(streamsBuilder);
    Topology topology = streamsBuilder.build();
    return new TopologyTestDriver(topology, props);
}
 
Example 22
/**
   * Setup Stream topology
   * Add KStream based on @StreamListener annotation
   * Add to(topic) based @SendTo annotation
   */
  @Before
  public void setup() {
      final StreamsBuilder builder = new StreamsBuilder();
      KStream<Bytes, String> input = builder.stream(INPUT_TOPIC, Consumed.with(nullSerde, stringSerde));
      KafkaStreamsWordCountApplication.WordCountProcessorApplication app = new KafkaStreamsWordCountApplication.WordCountProcessorApplication();
      final Function<KStream<Bytes, String>, KStream<Bytes, KafkaStreamsWordCountApplication.WordCount>> process = app.process();

final KStream<Bytes, KafkaStreamsWordCountApplication.WordCount> output = process.apply(input);

output.to(OUTPUT_TOPIC, Produced.with(nullSerde, countSerde));

      testDriver = new TopologyTestDriver(builder.build(), getStreamsConfiguration());
  }
 
Example 23
Source Project: kafka-tutorials   Source File: DynamicOutputTopic.java    License: Apache License 2.0 5 votes vote down vote up
public Topology buildTopology(Properties envProps) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String orderInputTopic = envProps.getProperty("input.topic.name");
    final String orderOutputTopic = envProps.getProperty("output.topic.name");
    final String specialOrderOutput = envProps.getProperty("special.order.topic.name");

    final Serde<Long> longSerde = getPrimitiveAvroSerde(envProps, true);
    final Serde<Order> orderSerde = getSpecificAvroSerde(envProps);
    final Serde<CompletedOrder> completedOrderSerde = getSpecificAvroSerde(envProps);

    final ValueMapper<Order, CompletedOrder> orderProcessingSimulator = v -> {
       double amount = v.getQuantity() * FAKE_PRICE;
       return CompletedOrder.newBuilder().setAmount(amount).setId(v.getId() + "-" + v.getSku()).setName(v.getName()).build();
    };

    final TopicNameExtractor<Long, CompletedOrder> orderTopicNameExtractor = (key, completedOrder, recordContext) -> {
          final String compositeId = completedOrder.getId();
          final String skuPart = compositeId.substring(compositeId.indexOf('-') + 1, 5);
          final String outTopic;
          if (skuPart.equals("QUA")) {
              outTopic = specialOrderOutput;
          } else {
              outTopic = orderOutputTopic;
          }
          return outTopic;
    };

    final KStream<Long, Order> exampleStream = builder.stream(orderInputTopic, Consumed.with(longSerde, orderSerde));

    exampleStream.mapValues(orderProcessingSimulator).to(orderTopicNameExtractor, Produced.with(longSerde, completedOrderSerde));

    return builder.build();
}
 
Example 24
private GlobalKTable<?, ?> getGlobalKTable(KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties,
		StreamsBuilder streamsBuilder,
		Serde<?> keySerde, Serde<?> valueSerde, String materializedAs,
		String bindingDestination, Topology.AutoOffsetReset autoOffsetReset) {
	final Consumed<?, ?> consumed = getConsumed(kafkaStreamsConsumerProperties, keySerde, valueSerde, autoOffsetReset);
	return materializedAs != null
			? materializedAsGlobalKTable(streamsBuilder, bindingDestination,
			materializedAs, keySerde, valueSerde, autoOffsetReset, kafkaStreamsConsumerProperties)
			: streamsBuilder.globalTable(bindingDestination,
			consumed);
}
 
Example 25
Source Project: kafka-graphs   Source File: ReduceOnEdgesMethodsITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testLowestWeightOutNeighbor() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> verticesWithLowestOutNeighbor =
        graph.groupReduceOnEdges(new SelectMinWeightNeighbor(), EdgeDirection.OUT);

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, verticesWithLowestOutNeighbor);

    expectedResult = "1,2\n" +
        "2,3\n" +
        "3,4\n" +
        "4,5\n" +
        "5,1\n";

    TestUtils.compareResultAsTuples(result, expectedResult);
}
 
Example 26
Source Project: brave   Source File: ITKafkaStreamsTracing.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_with_tracing_filter_not_predicate_false() {
  String inputTopic = testName.getMethodName() + "-input";
  String outputTopic = testName.getMethodName() + "-output";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
    .transform(kafkaStreamsTracing.filterNot("filterNot-2", (key, value) -> false))
    .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreams(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  MutableSpan spanProcessor = testSpanHandler.takeLocalSpan();
  assertChildOf(spanProcessor, spanInput);
  assertThat(spanProcessor.tags()).containsEntry(KAFKA_STREAMS_FILTERED_TAG, "false");

  // the filter transformer returns true so record is not dropped

  MutableSpan spanOutput = testSpanHandler.takeRemoteSpan(PRODUCER);
  assertThat(spanOutput.tags()).containsEntry("kafka.topic", outputTopic);
  assertChildOf(spanOutput, spanProcessor);

  streams.close();
  streams.cleanUp();
}
 
Example 27
Source Project: kafka-graphs   Source File: GraphGenerators.java    License: Apache License 2.0 5 votes vote down vote up
public static KGraph<Long, Long, Long> completeGraph(
    StreamsBuilder builder, Properties producerConfig, int numVertices) {
    List<KeyValue<Edge<Long>, Long>> edgeList = new ArrayList<>();
    for (long i = 0; i < numVertices; i++) {
        for (long j = 0; j < numVertices; j++) {
            if (i != j) edgeList.add(new KeyValue<>(new Edge<>(i, j), 1L));
        }
    }
    KTable<Edge<Long>, Long> edges = StreamUtils.tableFromCollection(
        builder, producerConfig, new KryoSerde<>(), Serdes.Long(), edgeList);

    return KGraph.fromEdges(edges, v -> 1L,
        GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));
}
 
Example 28
Source Project: SkaETL   Source File: ProcessStreamService.java    License: Apache License 2.0 5 votes vote down vote up
private void createStreamValidAndTransformAndFilter(String inputTopic, String outputTopic) {
    StreamsBuilder builder = new StreamsBuilder();
    KStream<String, JsonNode> streamInput = builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde()));
    String applicationId = getProcessConsumer().getIdProcess() + ProcessConstants.VALIDATE_PROCESS;
    Counter counter = Metrics.counter("skaetl_nb_transformation_validation_count", Lists.newArrayList(Tag.of("processConsumerName", getProcessConsumer().getName())));
    KStream<String, ValidateData> streamValidation = streamInput.mapValues((value) -> {
        ObjectNode resultTransformer = getGenericTransformator().apply(value, getProcessConsumer());
        ValidateData item = getGenericValidator().process(resultTransformer, getProcessConsumer());
        counter.increment();
        return item;
    }).filter((key, value) -> {
        //Validation
        if (!value.success) {
            //produce to errorTopic
            esErrorRetryWriter.sendToErrorTopic(applicationId, value);
            return false;
        }
        //FILTER
        return processFilter(value);
    });

    KStream<String, JsonNode> streamOfJsonNode = streamValidation.mapValues(value -> value.getJsonValue());
    streamOfJsonNode.to(outputTopic, Produced.with(Serdes.String(), GenericSerdes.jsonNodeSerde()));

    KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(applicationId, getBootstrapServer()));
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
    streams.start();
    addStreams(streams);
}
 
Example 29
Source Project: brave   Source File: ITKafkaStreamsTracing.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void should_create_spans_from_stream_with_tracing_processor() {
  ProcessorSupplier<String, String> processorSupplier =
    kafkaStreamsTracing.processor(
      "forward-1", () ->
        new AbstractProcessor<String, String>() {
          @Override
          public void process(String key, String value) {
            try {
              Thread.sleep(100L);
            } catch (InterruptedException e) {
              e.printStackTrace();
            }
          }
        });

  String inputTopic = testName.getMethodName() + "-input";

  StreamsBuilder builder = new StreamsBuilder();
  builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()))
    .process(processorSupplier);
  Topology topology = builder.build();

  KafkaStreams streams = buildKafkaStreams(topology);

  send(new ProducerRecord<>(inputTopic, TEST_KEY, TEST_VALUE));

  waitForStreamToRun(streams);

  MutableSpan spanInput = testSpanHandler.takeRemoteSpan(CONSUMER);
  assertThat(spanInput.tags()).containsEntry("kafka.topic", inputTopic);

  MutableSpan spanProcessor = testSpanHandler.takeLocalSpan();
  assertChildOf(spanProcessor, spanInput);

  streams.close();
  streams.cleanUp();
}
 
Example 30
Source Project: SkaETL   Source File: ProcessStreamService.java    License: Apache License 2.0 5 votes vote down vote up
public void createStreamSystemOut(String inputTopic) {

        StreamsBuilder builder = new StreamsBuilder();

        builder.stream(inputTopic, Consumed.with(Serdes.String(), GenericSerdes.jsonNodeSerde())).process(() -> new LoggingProcessor<>());

        KafkaStreams streams = new KafkaStreams(builder.build(), KafkaUtils.createKStreamProperties(getProcessConsumer().getIdProcess() + ProcessConstants.SYSOUT_PROCESS, getBootstrapServer()));
        Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
        streams.start();
        addStreams(streams);
    }