kafka.serializer.StringEncoder Java Examples

The following examples show how to use kafka.serializer.StringEncoder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ProducerDemo.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
private static Producer<String, String> initProducer() {
    Properties props = new Properties();
    props.put("metadata.broker.list", BROKER_LIST);
    // props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("serializer.class", StringEncoder.class.getName());
    props.put("partitioner.class", HashPartitioner.class.getName());
//    props.put("compression.codec", "0");
    props.put("producer.type", "sync");
    props.put("batch.num.messages", "1");
    props.put("queue.buffering.max.messages", "1000000");
    props.put("queue.enqueue.timeout.ms", "20000000");

    
    ProducerConfig config = new ProducerConfig(props);
    Producer<String, String> producer = new Producer<String, String>(config);
    return producer;
  }
 
Example #2
Source File: PulsarKafkaProducer.java    From pulsar with Apache License 2.0 5 votes vote down vote up
private String getKey(String topic, K key) {
    // If key is a String, we can use it as it is, otherwise, serialize to byte[] and encode in base64
    if (keySerializer!=null && keySerializer instanceof StringEncoder) {
        return (String) key;
    } else {
        byte[] keyBytes = keySerializer.toBytes(key);
        return Base64.getEncoder().encodeToString(keyBytes);
    }
}
 
Example #3
Source File: ProducerExample.java    From pulsar with Apache License 2.0 5 votes vote down vote up
private static void publishMessage(Arguments arguments) {
    // (2) Create producer
    Properties properties2 = new Properties();
    properties2.put(BROKER_URL, arguments.serviceUrl);
    properties2.put(PRODUCER_TYPE, "sync");
    properties2.put(SERIALIZER_CLASS, TestEncoder.class.getName());
    properties2.put(KEY_SERIALIZER_CLASS, StringEncoder.class.getName());
    properties2.put(PARTITIONER_CLASS, TestPartitioner.class.getName());
    properties2.put(COMPRESSION_CODEC, "gzip"); // compression: ZLIB
    properties2.put(QUEUE_ENQUEUE_TIMEOUT_MS, "-1"); // block queue if full => -1 = true
    properties2.put(QUEUE_BUFFERING_MAX_MESSAGES, "6000"); // queue max message
    properties2.put(QUEUE_BUFFERING_MAX_MS, "100"); // batch delay
    properties2.put(BATCH_NUM_MESSAGES, "500"); // batch msg
    properties2.put(CLIENT_ID, "test");
    ProducerConfig config = new ProducerConfig(properties2);
    Producer<String, Tweet> producer = new Producer<>(config);

    String name = "user";
    String msg = arguments.messageValue;
    for (int i = 0; i < arguments.totalMessages; i++) {
        String sendMessage = msg + i;
        Tweet tweet = new Tweet(name, sendMessage);
        KeyedMessage<String, Tweet> message = new KeyedMessage<>(arguments.topicName, name, tweet);
        producer.send(message);
    }

    producer.close();
    log.info("Successfully published messages {}", arguments.totalMessages);

}
 
Example #4
Source File: KafkaConnectionInformation.java    From nd4j with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a kafka connection uri
 * @return a kafka connection uri
 * represented by this connection information
 */
public String kafkaUri() {
    return String.format(
                    "kafka://%s?topic=%s&groupId=%s&zookeeperHost=%s&zookeeperPort=%d&serializerClass=%s&keySerializerClass=%s",
                    kafkaBrokerList, topicName, groupId, zookeeperHost, zookeeperPort,
                    StringEncoder.class.getName(), StringEncoder.class.getName());
}
 
Example #5
Source File: KafkaProducerConsumerTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testPulsarKafkaProducerWithSerializer() throws Exception {
    final String serviceUrl = lookupUrl.toString();
    final String topicName = "persistent://my-property/my-ns/my-topic1";

    // (1) Create consumer
    Properties properties = new Properties();
    properties.put("zookeeper.connect", serviceUrl);
    properties.put("group.id", "group1");
    properties.put("consumer.id", "cons1");
    properties.put("auto.commit.enable", "true");
    properties.put("auto.commit.interval.ms", "100");
    properties.put("queued.max.message.chunks", "100");

    ConsumerConfig conSConfig = new ConsumerConfig(properties);
    ConsumerConnector connector = new ConsumerConnector(conSConfig);
    Map<String, Integer> topicCountMap = Collections.singletonMap(topicName, 2);
    Map<String, List<PulsarKafkaStream<String, Tweet>>> streams = connector.createMessageStreams(topicCountMap,
            new StringDecoder(null), new TestDecoder());

    // (2) Create producer
    Properties properties2 = new Properties();
    properties2.put(BROKER_URL, serviceUrl);
    properties2.put(PRODUCER_TYPE, "sync");
    properties2.put(SERIALIZER_CLASS, TestEncoder.class.getName());
    properties2.put(KEY_SERIALIZER_CLASS, StringEncoder.class.getName());
    properties2.put(PARTITIONER_CLASS, TestPartitioner.class.getName());
    properties2.put(COMPRESSION_CODEC, "gzip"); // compression: ZLIB
    properties2.put(QUEUE_ENQUEUE_TIMEOUT_MS, "-1"); // block queue if full => -1 = true
    properties2.put(QUEUE_BUFFERING_MAX_MESSAGES, "6000"); // queue max message
    properties2.put(QUEUE_BUFFERING_MAX_MS, "100"); // batch delay
    properties2.put(BATCH_NUM_MESSAGES, "500"); // batch msg
    properties2.put(CLIENT_ID, "test");
    ProducerConfig config = new ProducerConfig(properties2);
    PulsarKafkaProducer<String, Tweet> producer = new PulsarKafkaProducer<>(config);

    String name = "user";
    String msg = "Hello World!";
    Set<Tweet> published = Sets.newHashSet();
    Set<Tweet> received = Sets.newHashSet();
    int total = 10;
    for (int i = 0; i < total; i++) {
        String sendMessage = msg + i;
        Tweet tweet = new Tweet(name, sendMessage);
        KeyedMessage<String, Tweet> message = new KeyedMessage<>(topicName, name, tweet);
        published.add(tweet);
        producer.send(message);
    }
    while (received.size() < total) {
        for (int i = 0; i < streams.size(); i++) {
            List<PulsarKafkaStream<String, Tweet>> kafkaStreams = streams.get(topicName);
            assertEquals(kafkaStreams.size(), 2);
            for (PulsarKafkaStream<String, Tweet> kafkaStream : kafkaStreams) {
                for (PulsarMessageAndMetadata<String, KafkaProducerConsumerTest.Tweet> record : kafkaStream) {
                    received.add(record.message());
                    assertEquals(record.key(), name);
                }
            }
        }
    }
    assertEquals(published.size(), received.size());
    published.removeAll(received);
    assertTrue(published.isEmpty());
}