org.apache.kafka.common.serialization.LongSerializer Java Examples

The following examples show how to use org.apache.kafka.common.serialization.LongSerializer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProducerInterceptorWrapperTest.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@DataProvider(name = "serializers")
public Object[][] serializers() {
    return new Object[][] {
        {
            new StringSerializer(), StringDeserializer.class
        },
        {
            new LongSerializer(), LongDeserializer.class
        },
        {
            new IntegerSerializer(), IntegerDeserializer.class,
        },
        {
            new DoubleSerializer(), DoubleDeserializer.class,
        },
        {
            new BytesSerializer(), BytesDeserializer.class
        },
        {
            new ByteBufferSerializer(), ByteBufferDeserializer.class
        },
        {
            new ByteArraySerializer(), ByteArrayDeserializer.class
        }
    };
}
 
Example #2
Source File: KafkaProducerInterceptorWrapper.java    From pulsar with Apache License 2.0 6 votes vote down vote up
static Deserializer getDeserializer(Serializer serializer) {
    if (serializer instanceof StringSerializer) {
        return new StringDeserializer();
    } else if (serializer instanceof LongSerializer) {
        return new LongDeserializer();
    } else if (serializer instanceof IntegerSerializer) {
        return new IntegerDeserializer();
    } else if (serializer instanceof DoubleSerializer) {
        return new DoubleDeserializer();
    } else if (serializer instanceof BytesSerializer) {
        return new BytesDeserializer();
    } else if (serializer instanceof ByteBufferSerializer) {
        return new ByteBufferDeserializer();
    } else if (serializer instanceof ByteArraySerializer) {
        return new ByteArrayDeserializer();
    } else {
        throw new IllegalArgumentException(serializer.getClass().getName() + " is not a valid or supported subclass of org.apache.kafka.common.serialization.Serializer.");
    }
}
 
Example #3
Source File: MessageProducerFactory.java    From alcor with Apache License 2.0 6 votes vote down vote up
public Producer Create() {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaAddress);
    props.put(ProducerConfig.CLIENT_ID_CONFIG, IKafkaConfiguration.PRODUCER_CLIENT_ID);

    // Key is set as long and Value is given by concrete implementation
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());

    Serializer serializer = getSerializer();
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, serializer.getClass().getName());

    //TODO: Optimizing partition
    // props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, CustomPartitioner.class.getName());

    return new KafkaProducer<>(props);
}
 
Example #4
Source File: MessageProducerFactory.java    From alcor with Apache License 2.0 6 votes vote down vote up
public Producer Create() {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.kafkaAddress);
    props.put(ProducerConfig.CLIENT_ID_CONFIG, IKafkaConfiguration.PRODUCER_CLIENT_ID);

    // Key is set as long and Value is given by concrete implementation
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());

    Serializer serializer = getSerializer();
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, serializer.getClass().getName());

    //TODO: Optimizing partition
    // props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, CustomPartitioner.class.getName());

    return new KafkaProducer<>(props);
}
 
Example #5
Source File: KafkaIOTest.java    From beam with Apache License 2.0 6 votes vote down vote up
@Test
public void testSinkDisplayData() {
  try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
    KafkaIO.Write<Integer, Long> write =
        KafkaIO.<Integer, Long>write()
            .withBootstrapServers("myServerA:9092,myServerB:9092")
            .withTopic("myTopic")
            .withValueSerializer(LongSerializer.class)
            .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))
            .withProducerConfigUpdates(ImmutableMap.of("retry.backoff.ms", 100));

    DisplayData displayData = DisplayData.from(write);

    assertThat(displayData, hasDisplayItem("topic", "myTopic"));
    assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServerA:9092,myServerB:9092"));
    assertThat(displayData, hasDisplayItem("retries", 3));
    assertThat(displayData, hasDisplayItem("retry.backoff.ms", 100));
  }
}
 
Example #6
Source File: KafkaMsgProducer.java    From dapeng-soa with Apache License 2.0 5 votes vote down vote up
public void init() {
    KafkaConfigBuilder.ProducerConfiguration builder = KafkaConfigBuilder.defaultProducer();

    final Properties properties = builder.withKeySerializer(LongSerializer.class)
            .withValueSerializer(ByteArraySerializer.class)
            .bootstrapServers(kafkaConnect)
            .build();

    producer = new KafkaProducer<>(properties);
}
 
Example #7
Source File: ConnectedComponentsTest.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testGridConnectedComponents() throws Exception {
    String suffix = "grid";
    StreamsBuilder builder = new StreamsBuilder();

    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    KGraph<Long, Tuple2<Long, Long>, Long> gridGraph = GraphGenerators.gridGraph(builder, producerConfig, 10, 10);
    KTable<Long, Long> initialVertices = gridGraph.vertices().mapValues((id, v) -> id);
    KGraph<Long, Long, Long> graph = new KGraph<>(initialVertices, gridGraph.edges(),
        GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    Properties props = ClientUtils.streamsConfig("prepare-" + suffix, "prepare-client-" + suffix,
        CLUSTER.bootstrapServers(), graph.keySerde().getClass(), graph.vertexValueSerde().getClass());
    CompletableFuture<Map<TopicPartition, Long>> state = GraphUtils.groupEdgesBySourceAndRepartition(builder, props, graph, "vertices-" + suffix, "edgesGroupedBySource-" + suffix, 2, (short) 1);
    Map<TopicPartition, Long> offsets = state.get();

    algorithm =
        new PregelGraphAlgorithm<>(null, "run-" + suffix, CLUSTER.bootstrapServers(),
            CLUSTER.zKConnectString(), "vertices-" + suffix, "edgesGroupedBySource-" + suffix, offsets, graph.serialized(),
            "solutionSet-" + suffix, "solutionSetStore-" + suffix, "workSet-" + suffix, 2, (short) 1,
            Collections.emptyMap(), Optional.empty(), new ConnectedComponents<>());
    props = ClientUtils.streamsConfig("run-" + suffix, "run-client-" + suffix, CLUSTER.bootstrapServers(),
        graph.keySerde().getClass(), KryoSerde.class);
    KafkaStreams streams = algorithm.configure(new StreamsBuilder(), props).streams();
    GraphAlgorithmState<KTable<Long, Long>> paths = algorithm.run();
    paths.result().get();

    Thread.sleep(2000);

    Map<Long, Long> map = StreamUtils.mapFromStore(paths.streams(), "solutionSetStore-" + suffix);
    log.debug("result: {}", map);

    for (long i = 0; i < 100; i++) {
        assertEquals(0L, map.get(i).longValue());
    }
}
 
Example #8
Source File: ReduceOnNeighborMethodsITCase.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testSumOfOutNeighborsNoValue() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> verticesWithSumOfOutNeighborValues =
        graph.reduceOnNeighbors((v1, v2) -> v1 + v2, EdgeDirection.OUT);

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, verticesWithSumOfOutNeighborValues);

    expectedResult = "1,5\n" +
        "2,3\n" +
        "3,9\n" +
        "4,5\n" +
        "5,1\n";

    compareResultAsTuples(result, expectedResult);
}
 
Example #9
Source File: ReduceOnNeighborMethodsITCase.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testSumOfOutNeighbors() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> verticesWithSumOfOutNeighborValues =
        graph.groupReduceOnNeighbors(new SumOutNeighbors(), EdgeDirection.OUT);

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, verticesWithSumOfOutNeighborValues);

    expectedResult = "1,5\n" +
        "2,3\n" +
        "3,9\n" +
        "4,5\n" +
        "5,1\n";

    compareResultAsTuples(result, expectedResult);
}
 
Example #10
Source File: SpannerTest.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void test() throws Exception {

    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    // Use 1 partition for deterministic ordering
    int numPartitions = 1;
    KStream<Edge<Long>, Void> edges = StreamUtils.streamFromCollection(builder, producerConfig,
        "temp-" + UUID.randomUUID(), numPartitions, (short) 1, new KryoSerde<>(), new KryoSerde<>(), getEdges()
    );
    KGraphStream<Long, Void, Void> graph =
        new EdgeStream<>(edges, GraphSerialized.with(new KryoSerde<>(), new KryoSerde<>(), new KryoSerde<>()));

    KTable<Windowed<Short>, AdjacencyListGraph<Long>> sets = graph.aggregate(new Spanner<>(mergeWindowTime, k));

    startStreams(builder, new KryoSerde<>(), new KryoSerde<>());

    Thread.sleep(10000);

    List<String> values = StreamUtils.listFromTable(streams, sets).stream()
        .map(kv -> kv.value.toString())
        .collect(Collectors.toList());

    // This result will vary depending on the number of partitions
    assertEquals(
        "[{1=[4], 2=[3], 3=[2, 4], 4=[1, 3, 5, 7], 5=[4, 6], 6=[5, 8], 7=[4, 8], 8=[6, 7, 9], 9=[8]}]",
        values.toString()
    );

    streams.close();
}
 
Example #11
Source File: BipartitenessCheckTest.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testNonBipartite() throws Exception {

    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KStream<Edge<Long>, Void> edges = StreamUtils.streamFromCollection(builder, producerConfig, new KryoSerde<>(),
        new KryoSerde<>(), getNonBipartiteEdges()
    );
    KGraphStream<Long, Void, Void> graph =
        new EdgeStream<>(edges, GraphSerialized.with(new KryoSerde<>(), new KryoSerde<>(), new KryoSerde<>()));

    KTable<Windowed<Short>, Candidates> candidates = graph.aggregate(new BipartitenessCheck<>(500L));

    startStreams(builder, new KryoSerde<>(), new KryoSerde<>());

    Thread.sleep(10000);

    List<String> result = StreamUtils.listFromTable(streams, candidates).stream()
        .map(kv -> kv.value.toString())
        .collect(Collectors.toList());

    // verify the results
    assertEquals(
        Lists.newArrayList(
            "(false,{})"),
        result
    );

    streams.close();
}
 
Example #12
Source File: BipartitenessCheckTest.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testBipartite() throws Exception {

    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KStream<Edge<Long>, Void> edges = StreamUtils.streamFromCollection(builder, producerConfig, new KryoSerde<>(),
        new KryoSerde<>(), getBipartiteEdges()
    );
    KGraphStream<Long, Void, Void> graph =
        new EdgeStream<>(edges, GraphSerialized.with(new KryoSerde<>(), new KryoSerde<>(), new KryoSerde<>()));

    KTable<Windowed<Short>, Candidates> candidates = graph.aggregate(new BipartitenessCheck<>(500L));

    startStreams(builder, new KryoSerde<>(), new KryoSerde<>());

    Thread.sleep(10000);

    List<String> result = StreamUtils.listFromTable(streams, candidates).stream()
        .map(kv -> kv.value.toString())
        .collect(Collectors.toList());

    // verify the results
    assertEquals(
        Lists.newArrayList(
            "(true,{1={1=(1,true), 2=(2,false), 3=(3,false), 4=(4,false), 5=(5,true), 7=(7,true), 9=(9,true)}})"),
        result
    );

    streams.close();
}
 
Example #13
Source File: GraphOperationsITCase.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testInDegrees() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> inDegrees = graph.inDegrees();

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, inDegrees);

    expectedResult = "1,1\n" +
        "2,1\n" +
        "3,2\n" +
        "4,1\n" +
        "5,2\n";

    compareResultAsTuples(result, expectedResult);
}
 
Example #14
Source File: SampleProducer.java    From kafka-encryption with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    // tag::produce[]

    Encryptor encryptor = new DefaultEncryptor(keyProvider, cryptoAlgorithm);

    // Wrap base LongSerializer and StringSerializer with encrypted wrappers
    CryptoSerializerPairFactory cryptoSerializerPairFactory = new CryptoSerializerPairFactory(encryptor, keyReferenceExtractor);
    SerializerPair<Long, String> serializerPair = cryptoSerializerPairFactory.build(new LongSerializer(), new StringSerializer());

    Properties producerProperties = new Properties();
    producerProperties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    try (KafkaProducer<Long, String> producer =
                 new KafkaProducer<>(producerProperties, serializerPair.getKeySerializer(), serializerPair.getValueSerializer())) {

        for (long i = 0L; i < Long.MAX_VALUE; i++) {
            producer.send(new ProducerRecord<>("sampletopic", i, "test number " + i));
            try {
                Thread.sleep(1000L);
            }
            catch (InterruptedException e) {
                return;
            }
        }
    }
    // end::produce[]
}
 
Example #15
Source File: TestKafkaPublishTask.java    From conductor with Apache License 2.0 5 votes vote down vote up
@Test
public void longSerializer_longObject() {
	KafkaPublishTask kPublishTask = new KafkaPublishTask(new SystemPropertiesConfiguration(), new KafkaProducerManager(new SystemPropertiesConfiguration()), objectMapper);
	KafkaPublishTask.Input input = new KafkaPublishTask.Input();
	input.setKeySerializer(LongSerializer.class.getCanonicalName());
	input.setKey(String.valueOf(Long.MAX_VALUE));
	Assert.assertEquals(kPublishTask.getKey(input), new Long(Long.MAX_VALUE));
}
 
Example #16
Source File: KafkaMsgProducer.java    From dapeng-soa with Apache License 2.0 5 votes vote down vote up
/**
 * 事务控制的 producer
 *
 * @return
 */
protected Producer<Long, byte[]> createTransactionalProducer() {
    KafkaConfigBuilder.ProducerConfiguration builder = KafkaConfigBuilder.defaultProducer();
    final Properties properties = builder.withKeySerializer(LongSerializer.class)
            .withValueSerializer(ByteArraySerializer.class)
            .bootstrapServers(kafkaConnect)
            .withTransactions("event")
            .build();

    producer = new KafkaProducer<>(properties);
    producer.initTransactions();
    return producer;
}
 
Example #17
Source File: GraphOperationsITCase.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testOutDegrees() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Long, Long> outDegrees = graph.outDegrees();

    expectedResult = "1,2\n" +
        "2,1\n" +
        "3,2\n" +
        "4,1\n" +
        "5,1\n";

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Long, Long>> result = StreamUtils.listFromTable(streams, outDegrees);

    compareResultAsTuples(result, expectedResult);
}
 
Example #18
Source File: GraphOperationsITCase.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Test
public void testSubGraph() throws Exception {
    Properties producerConfig = ClientUtils.producerConfig(CLUSTER.bootstrapServers(), LongSerializer.class,
        LongSerializer.class, new Properties()
    );
    StreamsBuilder builder = new StreamsBuilder();

    KTable<Long, Long> vertices =
        StreamUtils.tableFromCollection(builder, producerConfig, Serdes.Long(), Serdes.Long(),
            TestGraphUtils.getLongLongVertices());

    KTable<Edge<Long>, Long> edges =
        StreamUtils.tableFromCollection(builder, producerConfig, new KryoSerde<>(), Serdes.Long(),
            TestGraphUtils.getLongLongEdges());

    KGraph<Long, Long, Long> graph = new KGraph<>(
        vertices, edges, GraphSerialized.with(Serdes.Long(), Serdes.Long(), Serdes.Long()));

    KTable<Edge<Long>, Long> data = graph.subgraph((k, v) -> v > 2, (k, e) -> e > 34).edges();

    startStreams(builder, Serdes.Long(), Serdes.Long());

    Thread.sleep(5000);

    List<KeyValue<Edge<Long>, Long>> result = StreamUtils.listFromTable(streams, data);

    expectedResult = "3,5,35\n" +
        "4,5,45\n";

    compareResultAsTuples(result, expectedResult);
}
 
Example #19
Source File: GraphIntegrationTest.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
private MultiValueMap<String, HttpEntity<?>> generateCCBody() {
    MultipartBodyBuilder builder = new MultipartBodyBuilder();
    builder.part("verticesTopic", "initial-cc-vertices");
    builder.part("edgesTopic", "initial-cc-edges");
    builder.part("vertexFile", new ClassPathResource("vertices_simple.txt"));
    builder.part("edgeFile", new ClassPathResource("edges_simple.txt"));
    builder.part("vertexParser", VertexLongIdLongValueParser.class.getName());
    builder.part("edgeParser", EdgeLongIdLongValueParser.class.getName());
    builder.part("keySerializer", LongSerializer.class.getName());
    builder.part("vertexValueSerializer", LongSerializer.class.getName());
    builder.part("edgeValueSerializer", LongSerializer.class.getName());
    builder.part("numPartitions", "50");
    builder.part("replicationFactor", "1");
    return builder.build();
}
 
Example #20
Source File: KafkaPublishTask.java    From conductor with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
Object getKey(Input input) {

	String keySerializer = input.getKeySerializer();

	if (LongSerializer.class.getCanonicalName().equals(keySerializer)) {
		return Long.parseLong(String.valueOf(input.getKey()));
	} else if (IntegerSerializer.class.getCanonicalName().equals(keySerializer)) {
		return Integer.parseInt(String.valueOf(input.getKey()));
	} else {
		return String.valueOf(input.getKey());
	}

}
 
Example #21
Source File: TestingKafka.java    From presto with Apache License 2.0 5 votes vote down vote up
public KafkaProducer<Long, Object> createProducer()
{
    Map<String, String> properties = ImmutableMap.<String, String>builder()
            .put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getConnectString())
            .put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName())
            .put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class.getName())
            .put(ProducerConfig.PARTITIONER_CLASS_CONFIG, NumberPartitioner.class.getName())
            .put(ProducerConfig.ACKS_CONFIG, "1")
            .build();

    return new KafkaProducer<>(toProperties(properties));
}
 
Example #22
Source File: TestKafkaProducerManager.java    From conductor with Apache License 2.0 5 votes vote down vote up
private KafkaPublishTask.Input getInput() {
	KafkaPublishTask.Input input = new KafkaPublishTask.Input();
	input.setTopic("testTopic");
	input.setValue("TestMessage");
	input.setKeySerializer(LongSerializer.class.getCanonicalName());
	input.setBootStrapServers("servers");
	return input;
}
 
Example #23
Source File: KafkaAdapter.java    From mdw with Apache License 2.0 5 votes vote down vote up
private static Producer<Object, Object> createProducer() {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
            "localhost:9092,localhost:9093,localhost:9094");
    props.put(ProducerConfig.CLIENT_ID_CONFIG, "KafkaMDWProducer");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            LongSerializer.class.getName());
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class.getName());
    return new KafkaProducer<>(props);
}
 
Example #24
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testSink() throws Exception {
  // Simply read from kafka source and write to kafka sink. Then verify the records
  // are correctly published to mock kafka producer.

  int numElements = 1000;

  try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {

    ProducerSendCompletionThread completionThread =
        new ProducerSendCompletionThread(producerWrapper.mockProducer).start();

    String topic = "test";

    p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
        .apply(
            KafkaIO.<Integer, Long>write()
                .withBootstrapServers("none")
                .withTopic(topic)
                .withKeySerializer(IntegerSerializer.class)
                .withValueSerializer(LongSerializer.class)
                .withInputTimestamp()
                .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));

    p.run();

    completionThread.shutdown();

    verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false, true);
  }
}
 
Example #25
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testValuesSink() throws Exception {
  // similar to testSink(), but use values()' interface.

  int numElements = 1000;

  try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {

    ProducerSendCompletionThread completionThread =
        new ProducerSendCompletionThread(producerWrapper.mockProducer).start();

    String topic = "test";

    p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
        .apply(Values.create()) // there are no keys
        .apply(
            KafkaIO.<Integer, Long>write()
                .withBootstrapServers("none")
                .withTopic(topic)
                .withValueSerializer(LongSerializer.class)
                .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))
                .values());

    p.run();

    completionThread.shutdown();

    verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, true, false);
  }
}
 
Example #26
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testRecordsSink() throws Exception {
  // Simply read from kafka source and write to kafka sink using ProducerRecord transform. Then
  // verify the records are correctly published to mock kafka producer.

  int numElements = 1000;

  try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {

    ProducerSendCompletionThread completionThread =
        new ProducerSendCompletionThread(producerWrapper.mockProducer).start();

    String topic = "test";

    p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
        .apply(ParDo.of(new KV2ProducerRecord(topic)))
        .setCoder(ProducerRecordCoder.of(VarIntCoder.of(), VarLongCoder.of()))
        .apply(
            KafkaIO.<Integer, Long>writeRecords()
                .withBootstrapServers("none")
                .withTopic(topic)
                .withKeySerializer(IntegerSerializer.class)
                .withValueSerializer(LongSerializer.class)
                .withInputTimestamp()
                .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));

    p.run();

    completionThread.shutdown();

    verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false, true);
  }
}
 
Example #27
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testSinkProducerRecordsWithCustomTS() throws Exception {
  int numElements = 1000;

  try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {

    ProducerSendCompletionThread completionThread =
        new ProducerSendCompletionThread(producerWrapper.mockProducer).start();

    final String defaultTopic = "test";
    final Long ts = System.currentTimeMillis();

    p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
        .apply(ParDo.of(new KV2ProducerRecord(defaultTopic, ts)))
        .setCoder(ProducerRecordCoder.of(VarIntCoder.of(), VarLongCoder.of()))
        .apply(
            KafkaIO.<Integer, Long>writeRecords()
                .withBootstrapServers("none")
                .withKeySerializer(IntegerSerializer.class)
                .withValueSerializer(LongSerializer.class)
                .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));

    p.run();

    completionThread.shutdown();

    // Verify that messages are written with user-defined timestamp
    List<ProducerRecord<Integer, Long>> sent = producerWrapper.mockProducer.history();

    for (int i = 0; i < numElements; i++) {
      ProducerRecord<Integer, Long> record = sent.get(i);
      assertEquals(defaultTopic, record.topic());
      assertEquals(i, record.key().intValue());
      assertEquals(i, record.value().longValue());
      assertEquals(ts, record.timestamp());
    }
  }
}
 
Example #28
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testSinkWithSendErrors() throws Throwable {
  // similar to testSink(), except that up to 10 of the send calls to producer will fail
  // asynchronously.

  // TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail.
  // We limit the number of errors injected to 10 below. This would reflect a real streaming
  // pipeline. But I am sure how to achieve that. For now expect an exception:

  thrown.expect(InjectedErrorException.class);
  thrown.expectMessage("Injected Error #1");

  int numElements = 1000;

  try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {

    ProducerSendCompletionThread completionThreadWithErrors =
        new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start();

    String topic = "test";

    p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
        .apply(
            KafkaIO.<Integer, Long>write()
                .withBootstrapServers("none")
                .withTopic(topic)
                .withKeySerializer(IntegerSerializer.class)
                .withValueSerializer(LongSerializer.class)
                .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));

    try {
      p.run();
    } catch (PipelineExecutionException e) {
      // throwing inner exception helps assert that first exception is thrown from the Sink
      throw e.getCause().getCause();
    } finally {
      completionThreadWithErrors.shutdown();
    }
  }
}
 
Example #29
Source File: KafkaIOTest.java    From beam with Apache License 2.0 5 votes vote down vote up
MockProducerWrapper() {
  producerKey = String.valueOf(ThreadLocalRandom.current().nextLong());
  mockProducer =
      new MockProducer<Integer, Long>(
          false, // disable synchronous completion of send. see ProducerSendCompletionThread
          // below.
          new IntegerSerializer(),
          new LongSerializer()) {

        // override flush() so that it does not complete all the waiting sends, giving a chance
        // to
        // ProducerCompletionThread to inject errors.

        @Override
        public synchronized void flush() {
          while (completeNext()) {
            // there are some uncompleted records. let the completion thread handle them.
            try {
              Thread.sleep(10);
            } catch (InterruptedException e) {
              // ok to retry.
            }
          }
        }
      };

  // Add the producer to the global map so that producer factory function can access it.
  assertNull(MOCK_PRODUCER_MAP.putIfAbsent(producerKey, mockProducer));
}
 
Example #30
Source File: KafkaPluginIT.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private static Producer<Long, String> createProducer() {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(ProducerConfig.CLIENT_ID_CONFIG, "client1");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class.getName());
    return new KafkaProducer<Long, String>(props);
}