Java Code Examples for org.apache.kafka.common.utils.Bytes

The following examples show how to use org.apache.kafka.common.utils.Bytes. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: spring_io_2019   Source File: ScsApplication.java    License: Apache License 2.0 6 votes vote down vote up
@StreamListener
@SendTo(Bindings.RATED_MOVIES)
KStream<Long, RatedMovie> rateMoviesFor(@Input(Bindings.AVG_TABLE) KTable<Long, Double> ratings,
                                        @Input(Bindings.MOVIES) KTable<Long, Movie> movies) {

  ValueJoiner<Movie, Double, RatedMovie> joiner = (movie, rating) ->
      new RatedMovie(movie.getMovieId(), movie.getReleaseYear(), movie.getTitle(), rating);

  movies
      .join(ratings, joiner, Materialized
          .<Long, RatedMovie, KeyValueStore<Bytes, byte[]>>as(Bindings.RATED_MOVIES_STORE)
          .withKeySerde(Serdes.Long())
          .withValueSerde(new JsonSerde<>(RatedMovie.class)));

  return movies.join(ratings, joiner).toStream();
}
 
Example 2
@Test
public void shouldNotMatchAvroFormatter() throws Exception {

  /**
   * Setup expects
   */
  SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
  replay(schemaRegistryClient);

  /**
   * Test data
   */
  ConsumerRecord<String, Bytes> record = new ConsumerRecord<String, Bytes>("topic", 1, 1, "key", new Bytes("test-data".getBytes()));

  /** Assert
   */
  assertFalse(TopicStreamWriter.Format.AVRO.isFormat("topic", record, schemaRegistryClient));
}
 
Example 3
@Test
public void shouldMatchJsonFormatter() throws Exception {

  SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
  replay(schemaRegistryClient);

  /**
   * Test data
   */
  String json = "{    \"name\": \"myrecord\"," +
          "    \"type\": \"record\"" +
          "}";

  ConsumerRecord<String, Bytes> record = new ConsumerRecord<String, Bytes>("topic", 1, 1, "key", new Bytes(json.getBytes()));

  assertTrue(TopicStreamWriter.Format.JSON.isFormat("topic", record, schemaRegistryClient));
}
 
Example 4
@Test
public void shouldNotMatchJsonFormatter() throws Exception {

  SchemaRegistryClient schemaRegistryClient = mock(SchemaRegistryClient.class);
  replay(schemaRegistryClient);

  /**
   * Test data
   */
  String json = "{  BAD DATA  \"name\": \"myrecord\"," +
          "    \"type\": \"record\"" +
          "}";

  ConsumerRecord<String, Bytes> record = new ConsumerRecord<String, Bytes>("topic", 1, 1, "key", new Bytes(json.getBytes()));


  assertFalse(TopicStreamWriter.Format.JSON.isFormat("topic", record, schemaRegistryClient));
}
 
Example 5
@Test
public void shouldCreateTumblingWindowAggregate() {
  final KGroupedStream stream = EasyMock.createNiceMock(KGroupedStream.class);
  final TimeWindowedKStream windowedKStream = EasyMock.createNiceMock(TimeWindowedKStream.class);
  final UdafAggregator aggregator = EasyMock.createNiceMock(UdafAggregator.class);
  final TumblingWindowExpression windowExpression = new TumblingWindowExpression(10, TimeUnit.SECONDS);
  final Initializer initializer = () -> 0;
  final Materialized<String, GenericRow, WindowStore<Bytes, byte[]>> store = Materialized.as("store");

  EasyMock.expect(stream.windowedBy(TimeWindows.of(10000L))).andReturn(windowedKStream);
  EasyMock.expect(windowedKStream.aggregate(same(initializer), same(aggregator), same(store))).andReturn(null);
  EasyMock.replay(stream, windowedKStream);

  windowExpression.applyAggregate(stream, initializer, aggregator, store);
  EasyMock.verify(stream, windowedKStream);
}
 
Example 6
@Test
public void shouldCreateHoppingWindowAggregate() {
  final KGroupedStream stream = EasyMock.createNiceMock(KGroupedStream.class);
  final TimeWindowedKStream windowedKStream = EasyMock.createNiceMock(TimeWindowedKStream.class);
  final UdafAggregator aggregator = EasyMock.createNiceMock(UdafAggregator.class);
  final HoppingWindowExpression windowExpression = new HoppingWindowExpression(10, TimeUnit.SECONDS, 4, TimeUnit.MILLISECONDS);
  final Initializer initializer = () -> 0;
  final Materialized<String, GenericRow, WindowStore<Bytes, byte[]>> store = Materialized.as("store");

  EasyMock.expect(stream.windowedBy(TimeWindows.of(10000L).advanceBy(4L))).andReturn(windowedKStream);
  EasyMock.expect(windowedKStream.aggregate(same(initializer), same(aggregator), same(store))).andReturn(null);
  EasyMock.replay(stream, windowedKStream);

  windowExpression.applyAggregate(stream, initializer, aggregator, store);
  EasyMock.verify(stream, windowedKStream);
}
 
Example 7
Source Project: kafka-graphs   Source File: KGraph.java    License: Apache License 2.0 6 votes vote down vote up
public <T> KGraph<K, VV, EV> joinWithEdgesOnSource(KTable<K, T> inputDataSet,
                                                   final EdgeJoinFunction<EV, T> edgeJoinFunction) {

    KTable<Edge<K>, EV> resultedEdges = edgesGroupedBySource()
        .leftJoin(inputDataSet,
            new ApplyLeftJoinToEdgeValuesOnEitherSourceOrTarget<>(edgeJoinFunction),
            Materialized.with(keySerde(), new KryoSerde<>()))
        .toStream()
        .flatMap((k, edgeWithValues) -> {
            List<KeyValue<Edge<K>, EV>> edges = new ArrayList<>();
            for (EdgeWithValue<K, EV> edge : edgeWithValues) {
                edges.add(new KeyValue<>(new Edge<>(edge.source(), edge.target()), edge.value()));
            }
            return edges;
        })
        .groupByKey(Grouped.with(new KryoSerde<>(), edgeValueSerde()))
        .<EV>reduce((v1, v2) -> v2, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as(
            generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(edgeValueSerde()));

    return new KGraph<>(this.vertices, resultedEdges, serialized);
}
 
Example 8
Source Project: kafka-graphs   Source File: KGraph.java    License: Apache License 2.0 6 votes vote down vote up
public <T> KGraph<K, VV, EV> joinWithEdgesOnTarget(KTable<K, T> inputDataSet,
                                                   final EdgeJoinFunction<EV, T> edgeJoinFunction) {

    KTable<Edge<K>, EV> resultedEdges = edgesGroupedByTarget()
        .leftJoin(inputDataSet,
            new ApplyLeftJoinToEdgeValuesOnEitherSourceOrTarget<>(edgeJoinFunction),
            Materialized.with(keySerde(), new KryoSerde<>()))
        .toStream()
        .flatMap((k, edgeWithValues) -> {
            List<KeyValue<Edge<K>, EV>> edges = new ArrayList<>();
            for (EdgeWithValue<K, EV> edge : edgeWithValues) {
                edges.add(new KeyValue<>(new Edge<>(edge.source(), edge.target()), edge.value()));
            }
            return edges;
        })
        .groupByKey(Grouped.with(new KryoSerde<>(), edgeValueSerde()))
        .<EV>reduce((v1, v2) -> v2, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as(
            generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(edgeValueSerde()));

    return new KGraph<>(vertices, resultedEdges, serialized);
}
 
Example 9
Source Project: kafka-graphs   Source File: KGraph.java    License: Apache License 2.0 6 votes vote down vote up
public KGraph<K, VV, EV> subgraph(Predicate<K, VV> vertexFilter, Predicate<Edge<K>, EV> edgeFilter) {
    KTable<K, VV> filteredVertices = vertices.filter(vertexFilter);

    KTable<Edge<K>, EV> remainingEdges = edgesBySource()
        .join(filteredVertices, (e, v) -> e, Joined.with(keySerde(), new KryoSerde<>(), vertexValueSerde()))
        .map((k, edge) -> new KeyValue<>(edge.target(), edge))
        .join(filteredVertices, (e, v) -> e, Joined.with(keySerde(), new KryoSerde<>(), vertexValueSerde()))
        .map((k, edge) -> new KeyValue<>(new Edge<>(edge.source(), edge.target()), edge.value()))
        .groupByKey(Grouped.with(new KryoSerde<>(), edgeValueSerde()))
        .reduce((v1, v2) -> v2, Materialized.with(new KryoSerde<>(), edgeValueSerde()));

    KTable<Edge<K>, EV> filteredEdges = remainingEdges
        .filter(edgeFilter, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as(generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(edgeValueSerde()));

    return new KGraph<>(filteredVertices, filteredEdges, serialized);
}
 
Example 10
Source Project: super-cloudops   Source File: KafkaMetricReceiver.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Receiving consumer messages on multiple topics
 *
 * @param records
 * @param ack
 */
@KafkaListener(topicPattern = TOPIC_KAFKA_RECEIVE_PATTERN, containerFactory = BEAN_KAFKA_BATCH_FACTORY)
public void onMetricReceive(List<ConsumerRecord<byte[], Bytes>> records, Acknowledgment ack) {
	try {
		if (log.isDebugEnabled()) {
			log.debug("Receive metric records - {}", records);
		}
		if (log.isInfoEnabled()) {
			log.info("Receive metric records size - {}", records.size());
		}

		doProcess(records, new MultiAcknowledgmentState(ack));
	} catch (Exception e) {
		log.error(String.format("Failed to receive process for ", records.size()), e);
	}
}
 
Example 11
Source Project: super-cloudops   Source File: KafkaMetricReceiver.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * UMC agent metric processing.
 *
 * @param records
 * @param state
 */
private void doProcess(List<ConsumerRecord<byte[], Bytes>> records, MultiAcknowledgmentState state) {
	for (ConsumerRecord<byte[], Bytes> record : records) {
		try {
			MetricAggregate aggregate = MetricAggregate.parseFrom(record.value().get());
			if (log.isDebugEnabled()) {
				log.debug("Put metric aggregate for - {}", aggregate);
			}

			// Storage metrics.
			putMetrics(aggregate);

			// Metrics alarm.
			alarm(aggregate);
		} catch (InvalidProtocolBufferException e) {
			log.error("Failed to parse metric message.", e);
		}
	}
	state.completed();
}
 
Example 12
Source Project: java-11-examples   Source File: SingleRecordConsumerJob.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ServiceResponse call() throws Exception {
    LOG.info("Consumer thread started.");
    while (true) {
        ConsumerRecords<String, Bytes> records = consumer.poll(Duration.ofMillis(10));
        if (!records.isEmpty()) {
            for (ConsumerRecord<String, Bytes> record: records) {
                if (key.equals(record.key())) {
                    LOG.info("Record: {}", record.key());
                    LOG.info("received response");
                    return dataMapper.deserialize(record.value(), ServiceResponse.class);
                }
            }
        }
    }
}
 
Example 13
Source Project: java-11-examples   Source File: ProcessingServiceBackend.java    License: Apache License 2.0 6 votes vote down vote up
public void start() {
    Collection<String> topics = Collections.singletonList(TOPIC_SERVICE_REQUESTS);
    this.consumer.subscribe(topics);
    LOG.info("Waiting for requests {} ...", serviceId);
    this.running = true;
    while (running) {
        ConsumerRecords<String, Bytes> records = consumer.poll(Duration.ofMillis(10));
        if (!records.isEmpty()) {
            for (ConsumerRecord<String, Bytes> record: records) {
                try {
                    ServiceRequest request = dataMapper.deserialize(record.value(), ServiceRequest.class);
                    LOG.info("Received Request: {}:{}:{}", record.key(), request.getClientId(), request.getTaskId());
                    ServiceResponse response =
                            new ServiceResponse(request.getTaskId(), request.getClientId(), request.getData(), "response:" + request.getData());
                    Bytes bytes = dataMapper.serialize(response);
                    ProducerRecord<String, Bytes> recordReply = new ProducerRecord<>(TOPIC_SERVICE_RESPONSES, response.getTaskId(), bytes);
                    producer.send(recordReply);
                    LOG.info("Response has been send !");
                } catch (IOException e) {
                    LOG.error("Exception: ", e);
                }
            }
        }
    }
    LOG.info("done {}.", serviceId);
}
 
Example 14
public static void main(String[] args) {
	// 1. 指定流的配置
	Properties config = new Properties();
	config.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application");
	config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
	config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

	// 设置流构造器
	StreamsBuilder builder = new StreamsBuilder();
	KStream<String, String> textLines = builder.stream("TextLinesTopic");
	KTable<String, Long> wordCounts = textLines
		.flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\\W+")))
		.groupBy((key, word) -> word)
		.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store"));
	wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long()));

	// 根据流构造器和流配置初始化 Kafka 流
	KafkaStreams streams = new KafkaStreams(builder.build(), config);
	streams.start();
}
 
Example 15
@Bean
public Consumer<KStream<String, DomainEvent>> aggregate() {

	ObjectMapper mapper = new ObjectMapper();
	Serde<DomainEvent> domainEventSerde = new JsonSerde<>( DomainEvent.class, mapper );

	return input -> input
			.groupBy(
					(s, domainEvent) -> domainEvent.boardUuid,
					Grouped.with(null, domainEventSerde))
			.aggregate(
					String::new,
					(s, domainEvent, board) -> board.concat(domainEvent.eventType),
					Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("test-events-snapshots")
							.withKeySerde(Serdes.String()).
							withValueSerde(Serdes.String())
			);
}
 
Example 16
Source Project: mongo-kafka   Source File: MongoKafkaTestCase.java    License: Apache License 2.0 5 votes vote down vote up
public List<Bytes> getProduced(final String topicName, final int expectedCount) {
  if (expectedCount != Integer.MAX_VALUE) {
    LOGGER.info("Subscribing to {} expecting to see #{}", topicName, expectedCount);
  } else {
    LOGGER.info("Subscribing to {} getting all messages", topicName);
  }

  try (KafkaConsumer<?, ?> consumer = createConsumer()) {
    consumer.subscribe(singletonList(topicName));
    List<Bytes> data = new ArrayList<>();
    int counter = 0;
    int retryCount = 0;
    int previousDataSize;
    while (data.size() < expectedCount && retryCount < 30) {
      counter++;
      LOGGER.info("Polling {} ({}) seen: #{}", topicName, counter, data.size());
      previousDataSize = data.size();

      consumer
          .poll(Duration.ofSeconds(2))
          .records(topicName)
          .forEach((r) -> data.add((Bytes) r.value()));

      // Wait at least 3 minutes for the first set of data to arrive
      if (data.size() > 0 || counter > 90) {
        retryCount = data.size() == previousDataSize ? retryCount + 1 : 0;
      }
    }
    return data;
  }
}
 
Example 17
@SuppressWarnings("unchecked")
public SchemaKTable aggregate(
    final Initializer initializer,
    final UdafAggregator aggregator,
    final WindowExpression windowExpression,
    final Serde<GenericRow> topicValueSerDe) {
  final KTable aggKtable;
  if (windowExpression != null) {
    final Materialized<String, GenericRow, ?> materialized
        = Materialized.<String, GenericRow, WindowStore<Bytes, byte[]>>with(
            Serdes.String(), topicValueSerDe);

    final KsqlWindowExpression ksqlWindowExpression = windowExpression.getKsqlWindowExpression();
    aggKtable = ksqlWindowExpression.applyAggregate(
        kgroupedStream,
        initializer,
        aggregator,
        materialized
    );
  } else {
    aggKtable = kgroupedStream.aggregate(
        initializer,
        aggregator,
        Materialized.with(Serdes.String(), topicValueSerDe)
    );
  }
  return new SchemaKTable(
      schema,
      aggKtable,
      keyField,
      sourceSchemaKStreams,
      windowExpression != null,
      SchemaKStream.Type.AGGREGATE,
      functionRegistry,
      schemaRegistryClient
  );

}
 
Example 18
@Override
public boolean isFormat(
    String topicName, ConsumerRecord<String, Bytes> record,
    SchemaRegistryClient schemaRegistryClient
) {
  this.topicName = topicName;
  try {
    avroDeserializer = new KafkaAvroDeserializer(schemaRegistryClient);
    avroDeserializer.deserialize(topicName, record.value().get());
    return true;
  } catch (Throwable t) {
    return false;
  }
}
 
Example 19
@Override
String print(ConsumerRecord<String, Bytes> consumerRecord) {
  String time = dateFormat.format(new Date(consumerRecord.timestamp()));
  GenericRecord record = (GenericRecord) avroDeserializer.deserialize(
      topicName,
      consumerRecord
          .value()
          .get()
  );
  String key = consumerRecord.key() != null ? consumerRecord.key() : "null";
  return time + ", " + key + ", " + record.toString() + "\n";
}
 
Example 20
@Override
public boolean isFormat(
    String topicName, ConsumerRecord<String, Bytes> record,
    SchemaRegistryClient schemaRegistryClient
) {
  try {
    objectMapper.readTree(record.value().toString());
    return true;
  } catch (Throwable t) {
    return false;
  }
}
 
Example 21
@Override
String print(ConsumerRecord<String, Bytes> record) throws IOException {
  JsonNode jsonNode = objectMapper.readTree(record.value().toString());
  ObjectNode objectNode = objectMapper.createObjectNode();
  objectNode.put(SchemaUtil.ROWTIME_NAME, record.timestamp());
  objectNode.put(SchemaUtil.ROWKEY_NAME, (record.key() != null) ? record.key() : "null");
  objectNode.setAll((ObjectNode) jsonNode);
  StringWriter stringWriter = new StringWriter();
  objectMapper.writeValue(stringWriter, objectNode);
  return stringWriter.toString() + "\n";
}
 
Example 22
@Override
public boolean isFormat(
    String topicName,
    ConsumerRecord<String, Bytes> record,
    SchemaRegistryClient schemaRegistryClient
) {
  /**
   * STRING always returns true because its last in the enum list
   */
  return true;
}
 
Example 23
static Format getFormatter(
    String topicName,
    ConsumerRecord<String, Bytes> record,
    SchemaRegistryClient schemaRegistryClient
) {
  Format result = Format.UNDEFINED;
  while (!(result.isFormat(topicName, record, schemaRegistryClient))) {
    result = Format.values()[result.ordinal() + 1];
  }
  return result;

}
 
Example 24
boolean isFormat(
    String topicName,
    ConsumerRecord<String, Bytes> record,
    SchemaRegistryClient schemaRegistryClient
) {
  return false;
}
 
Example 25
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
    props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreName);

    StreamsBuilder streamsBuilder = new StreamsBuilder();

    KStream<String, UserClicks> KS0 = streamsBuilder.stream(
        AppConfigs.posTopicName,
        Consumed.with(AppSerdes.String(), AppSerdes.UserClicks())
            .withTimestampExtractor(new AppTimestampExtractor())
    );

    KGroupedStream<String, UserClicks> KS1 = KS0.groupByKey(
        Grouped.with(AppSerdes.String(),
            AppSerdes.UserClicks()));

    SessionWindowedKStream<String, UserClicks> KS2 = KS1.windowedBy(
        SessionWindows.with(Duration.ofMinutes(5))
            .grace(Duration.ofMinutes(1))
    );

    KTable<Windowed<String>, Long> KT3 = KS2.count(
        //Materialized is not needed if you don't want to override defaults
        Materialized.<String, Long, SessionStore<Bytes, byte[]>>as("clicks-by-user-session")
    );

    KT3.toStream().foreach(
        (kWindowed, v) -> logger.info(
            "UserID: " + kWindowed.key() +
                " Window Start: " + utcTimeString(kWindowed.window().start()) +
                " Window End: " + utcTimeString(kWindowed.window().end()) +
                " Count: " + v
        ));

    KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
}
 
Example 26
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, AppConfigs.applicationID);
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, AppConfigs.bootstrapServers);
    props.put(StreamsConfig.STATE_DIR_CONFIG, AppConfigs.stateStoreName);

    StreamsBuilder streamsBuilder = new StreamsBuilder();
    KStream<String, Employee> KS0 = streamsBuilder.stream(AppConfigs.topicName,
        Consumed.with(AppSerdes.String(), AppSerdes.Employee()));

    KGroupedStream<String, Employee> KGS1 = KS0.groupBy(
        (k, v) -> v.getDepartment(),
        Serialized.with(AppSerdes.String(),
            AppSerdes.Employee()));

    KTable<String, DepartmentAggregate> KT2 = KGS1.aggregate(
        //Initializer
        () -> new DepartmentAggregate()
            .withEmployeeCount(0)
            .withTotalSalary(0)
            .withAvgSalary(0D),
        //Aggregator
        (k, v, aggV) -> new DepartmentAggregate()
            .withEmployeeCount(aggV.getEmployeeCount() + 1)
            .withTotalSalary(aggV.getTotalSalary() + v.getSalary())
            .withAvgSalary((aggV.getTotalSalary() + v.getSalary()) / (aggV.getEmployeeCount() + 1D)),
        //Serializer
        Materialized.<String, DepartmentAggregate, KeyValueStore<Bytes, byte[]>>as("agg-store")
            .withKeySerde(AppSerdes.String())
            .withValueSerde(AppSerdes.DepartmentAggregate())
    );

    KT2.toStream().foreach(
        (k, v) -> System.out.println("Key = " + k + " Value = " + v.toString()));

    KafkaStreams streams = new KafkaStreams(streamsBuilder.build(), props);
    streams.start();
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

}
 
Example 27
@StreamListener( "input" )
public void process( KStream<Object, byte[]> input ) {
    log.debug( "process : enter" );

    input
            .map( (key, value) -> {

                try {

                    DomainEvent domainEvent = mapper.readValue( value, DomainEvent.class );
                    log.debug( "process : domainEvent=" + domainEvent );

                    return new KeyValue<>( domainEvent.getBoardUuid().toString(), domainEvent );

                } catch( IOException e ) {
                    log.error( "process : error converting json to DomainEvent", e );
                }

                return null;
            })
            .groupBy( (s, domainEvent) -> s, Serialized.with( Serdes.String(), domainEventSerde ) )
            .aggregate(
                    Board::new,
                    (key, domainEvent, board) -> board.handleEvent( domainEvent ),
                    Materialized.<String, Board, KeyValueStore<Bytes, byte[]>>as( BOARD_EVENTS_SNAPSHOTS )
                        .withKeySerde( Serdes.String() )
                        .withValueSerde( boardSerde )
            );

    log.debug( "process : exit" );
}
 
Example 28
@StreamListener( "input" )
public void process( KStream<Object, byte[]> input ) {
    log.debug( "process : enter" );

    input
            .map( (key, value) -> {

                try {

                    DomainEvent domainEvent = mapper.readValue( value, DomainEvent.class );
                    log.debug( "process : domainEvent=" + domainEvent );

                    return new KeyValue<>( domainEvent.getBoardUuid().toString(), domainEvent );

                } catch( IOException e ) {
                    log.error( "process : error converting json to DomainEvent", e );
                }

                return null;
            })
            .groupBy( (s, domainEvent) -> s, Serialized.with( Serdes.String(), domainEventSerde ) )
            .aggregate(
                    Board::new,
                    (key, domainEvent, board) -> board.handleEvent( domainEvent ),
                    Materialized.<String, Board, KeyValueStore<Bytes, byte[]>>as( BOARD_EVENTS_SNAPSHOTS )
                        .withKeySerde( Serdes.String() )
                        .withValueSerde( boardSerde )
            );

    log.debug( "process : exit" );
}
 
Example 29
Source Project: kafka-graphs   Source File: KGraph.java    License: Apache License 2.0 5 votes vote down vote up
public KGraph<K, VV, EV> filterOnVertices(Predicate<K, VV> vertexFilter) {
    KTable<K, VV> filteredVertices = vertices.filter(vertexFilter);

    KTable<Edge<K>, EV> remainingEdges = edgesBySource()
        .join(filteredVertices, (e, v) -> e, Joined.with(keySerde(), new KryoSerde<>(), vertexValueSerde()))
        .map((k, edge) -> new KeyValue<>(edge.target(), edge))
        .join(filteredVertices, (e, v) -> e, Joined.with(keySerde(), new KryoSerde<>(), vertexValueSerde()))
        .map((k, edge) -> new KeyValue<>(new Edge<>(edge.source(), edge.target()), edge.value()))
        .groupByKey(Grouped.with(new KryoSerde<>(), edgeValueSerde()))
        .reduce((v1, v2) -> v2, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as(generateStoreName()).withKeySerde(new KryoSerde<>()).withValueSerde(edgeValueSerde()));

    return new KGraph<>(filteredVertices, remainingEdges, serialized);
}
 
Example 30
Source Project: kafka-graphs   Source File: KGraph.java    License: Apache License 2.0 5 votes vote down vote up
public KGraph<K, VV, EV> undirected() {

        KTable<Edge<K>, EV> undirectedEdges = edges
            .toStream()
            .flatMap(new UndirectEdges<>())
            .groupByKey(Grouped.with(new KryoSerde<>(), serialized.edgeValueSerde()))
            .reduce((v1, v2) -> v2, Materialized.<Edge<K>, EV, KeyValueStore<Bytes, byte[]>>as(generateStoreName())
                .withKeySerde(new KryoSerde<>()).withValueSerde(serialized.edgeValueSerde()));

        return new KGraph<>(vertices, undirectedEdges, serialized);
    }