kafka.serializer.DefaultDecoder Java Examples

The following examples show how to use kafka.serializer.DefaultDecoder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaStreamFactory.java    From zipkin-sparkstreaming with Apache License 2.0 5 votes vote down vote up
@Override public JavaDStream<byte[]> create(JavaStreamingContext jsc) {
  return KafkaUtils.createDirectStream(
      jsc,
      byte[].class,
      byte[].class,
      DefaultDecoder.class,
      DefaultDecoder.class,
      kafkaParams(),
      Collections.singleton(topic()))
      .map(m -> m._2); // get value
}
 
Example #2
Source File: AbstractSparkLayer.java    From spark-streaming-direct-kafka with Apache License 2.0 5 votes vote down vote up
public JavaInputDStream<MessageAndMetadata<String,byte[]>> buildInputDStream(
        JavaStreamingContext streamingContext) {

    HashMap<String, String> kafkaParams = config.getKafkaParams();

    // Ugly compiler-pleasing acrobatics:
    @SuppressWarnings("unchecked")
    Class<MessageAndMetadata<String, byte[]>> streamClass =
            (Class<MessageAndMetadata<String, byte[]>>) (Class<?>) MessageAndMetadata.class;

    if (!KafkaManager.topicExists(config.getZkKafka(), config.getTopic())) {
        throw new RuntimeException("Topic does not exist on server");
    }

    Map<TopicAndPartition, Long> seedOffsetsMap = KafkaManager.getOffsets(config.getZkKafka(),
            config.getZkOffsetManager(), config.getKafkaGroupId(), config.getTopic(), config.getKafkaParams());

    // TODO: try generics, instead of hardcoded values
    JavaInputDStream<MessageAndMetadata<String, byte[]>> dStream = org.apache.spark.streaming.kafka.KafkaUtils.createDirectStream(
            streamingContext,
            String.class,  // change as necessary
            byte[].class,  // change as necessary
            StringDecoder.class,
            DefaultDecoder.class,
            streamClass,
            kafkaParams,
            seedOffsetsMap,
            Functions.<MessageAndMetadata<String, byte[]>>identity());
    return dStream;
}
 
Example #3
Source File: KafkaSource08.java    From sylph with Apache License 2.0 4 votes vote down vote up
public JavaDStream<Row> createSource(JavaStreamingContext ssc, KafkaSourceConfig08 config, SourceContext context)
{
    String topics = requireNonNull(config.getTopics(), "topics not setting");
    String brokers = requireNonNull(config.getBrokers(), "brokers not setting"); //需要把集群的host 配置到程序所在机器
    String groupId = requireNonNull(config.getGroupid(), "group.id not setting"); //消费者的名字
    String offsetMode = requireNonNull(config.getOffsetMode(), "offsetMode not setting");

    Map<String, String> otherConfig = config.getOtherConfig().entrySet()
            .stream()
            .filter(x -> x.getValue() != null)
            .collect(Collectors.toMap(Map.Entry::getKey, v -> v.getValue().toString()));

    Map<String, String> kafkaParams = new HashMap<>(otherConfig);
    kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
    //kafkaParams.put("auto.commit.enable", true); //不自动提交偏移量
    //      "fetch.message.max.bytes" ->
    //      "session.timeout.ms" -> "30000", //session默认是30秒
    //      "heartbeat.interval.ms" -> "5000", //10秒提交一次 心跳周期
    kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); //注意不同的流 group.id必须要不同 否则会出现offect commit提交失败的错误
    kafkaParams.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offsetMode); //largest   smallest

    //----get fromOffsets
    @SuppressWarnings("unchecked")
    scala.collection.immutable.Map<String, String> map = (scala.collection.immutable.Map<String, String>) Map$.MODULE$.apply(JavaConverters.mapAsScalaMapConverter(kafkaParams).asScala().toSeq());
    final KafkaCluster kafkaCluster = new KafkaCluster(map);
    Map<TopicAndPartition, Long> fromOffsets = getFromOffset(kafkaCluster, topics, groupId);

    //--- createDirectStream  DirectKafkaInputDStream.class
    org.apache.spark.api.java.function.Function<MessageAndMetadata<byte[], byte[]>, ConsumerRecord<byte[], byte[]>> messageHandler =
            mmd -> new ConsumerRecord<>(mmd.topic(), mmd.partition(), mmd.key(), mmd.message(), mmd.offset());
    @SuppressWarnings("unchecked")
    Class<ConsumerRecord<byte[], byte[]>> recordClass = (Class<ConsumerRecord<byte[], byte[]>>) ClassTag$.MODULE$.<ConsumerRecord<byte[], byte[]>>apply(ConsumerRecord.class).runtimeClass();
    JavaInputDStream<ConsumerRecord<byte[], byte[]>> inputStream = KafkaUtils.createDirectStream(ssc,
            byte[].class, byte[].class, DefaultDecoder.class, DefaultDecoder.class, recordClass,
            kafkaParams, fromOffsets,
            messageHandler
    );
    JavaDStream<ConsumerRecord<byte[], byte[]>> dStream = settingCommit(inputStream, kafkaParams, kafkaCluster, groupId);

    if ("json".equalsIgnoreCase(config.getValueType())) {
        JsonSchema jsonParser = new JsonSchema(context.getSchema());
        return dStream
                .map(record -> {
                    return jsonParser.deserialize(record.key(), record.value(), record.topic(), record.partition(), record.offset());
                });
    }
    else {
        StructType structType = schemaToSparkType(context.getSchema());
        return dStream
                .map(record -> {
                    String[] names = structType.names();
                    Object[] values = new Object[names.length];
                    for (int i = 0; i < names.length; i++) {
                        switch (names[i]) {
                            case "_topic":
                                values[i] = record.topic();
                                continue;
                            case "_message":
                                values[i] = new String(record.value(), UTF_8);
                                continue;
                            case "_key":
                                values[i] = new String(record.key(), UTF_8);
                                continue;
                            case "_partition":
                                values[i] = record.partition();
                                continue;
                            case "_offset":
                                values[i] = record.offset();
                            default:
                                values[i] = null;
                        }
                    }
                    return (Row) new GenericRowWithSchema(values, structType);
                });  //.window(Duration(10 * 1000))
    }
}
 
Example #4
Source File: ITKafkaSender.java    From zipkin-reporter-java with Apache License 2.0 4 votes vote down vote up
private List<byte[]> readMessages(String topic) throws TimeoutException {
  return kafka.readMessages(topic, 1, new DefaultDecoder(kafka.consumerConfig().props()));
}
 
Example #5
Source File: KafkaConsumerTest.java    From pentaho-kafka-consumer with Apache License 2.0 4 votes vote down vote up
private static MessageAndMetadata<byte[], byte[]> generateKafkaMessage() {
    byte[] message = "aMessage".getBytes();

    return new MessageAndMetadata<byte[], byte[]>("topic", 0, new Message(message),
            0, new DefaultDecoder(null), new DefaultDecoder(null));
}
 
Example #6
Source File: KafkaMessageReceiverPoolTest.java    From message-queue-client-framework with Apache License 2.0 2 votes vote down vote up
@Test
public void test0() throws Exception {

    final KafkaMessageReceiverPool<byte[], byte[]> pool = new KafkaMessageReceiverPool<byte[], byte[]>();

    pool.destroy();
    Assert.assertFalse(pool.isRunning());

    Assert.assertNull(pool.getThreadFactory());
    pool.setThreadFactory(new KafkaPoolThreadFactory());

    Assert.assertNotNull(pool.getProps());
    pool.setProps(new Properties());

    Assert.assertEquals(0, pool.getPoolSize());
    pool.setPoolSize(1);

    Assert.assertNull(pool.getZookeeperStr());
    pool.setZookeeperStr("");

    Assert.assertNull(pool.getClientId());
    pool.setClientId("test");

    Assert.assertTrue(pool.getAutoCommit());
    pool.setAutoCommit(true);

    Assert.assertEquals(pool.getRetryCount(), 3);

    Assert.assertNull(pool.getConfig());
    pool.setConfig(new DefaultResourceLoader()
            .getResource("kafka/consumer1.properties"));
    pool.setConfig(new DefaultResourceLoader()
            .getResource("kafka/consumer.properties"));

    pool.setProps(TestUtils.createConsumerProperties(zkConnect, "group_1",
            "consumer_id", 1000));

    Assert.assertSame(DefaultDecoder.class, pool.getKeyDecoderClass());
    pool.setKeyDecoderClass(DefaultDecoder.class);

    Assert.assertSame(DefaultDecoder.class, pool.getValDecoderClass());
    pool.setValDecoderClass(DefaultDecoder.class);

    Assert.assertNull(pool.getMessageAdapter());
    pool.setMessageAdapter(getAdapter());

    Assert.assertNotNull(pool.getReceiver());

    pool.init();

    Assert.assertTrue(pool.isRunning());

    Thread.sleep(5000);

    Thread thread = new Thread(new Runnable() {

        @Override
        public void run() {
            pool.destroy();
        }
    });

    thread.start();

    pool.destroy();

    pool.returnReceiver(null);

    Assert.assertFalse(pool.isRunning());
}