Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#close()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CompositeTransactionManagerKafkaImpl.java    From microservices-transactions-tcc with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public List<EntityCommand<?>> fetch(String txId) {
	List<EntityCommand<?>> transactionOperations = new ArrayList<EntityCommand<?>>();

	Map<String, Object> consumerConfigs = (Map<String, Object>)configuration.get("kafkaConsumerConfiguration");
	consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
	
	KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(consumerConfigs);
	kafkaConsumer.subscribe(Arrays.asList(txId));
	
	ConsumerRecords<String, String> records = kafkaConsumer.poll(kafkaConsumerPollTimeout);
	for (ConsumerRecord<String, String> record : records){
		LOG.info("offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value());
		try {
			transactionOperations.add(serializer.readFromString(record.value()));
		} catch (SerializationFailedException e) {
			LOG.error("Unable to deserialize [{}] because of: {}", record.value(), e.getMessage());
		}
	}
	
	kafkaConsumer.close();
		
	return transactionOperations;
}
 
Example 2
Source File: ConsumerThread.java    From kafka-topic-exporter with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {

    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props.getProperties());
    consumer.subscribe(props.getTopicsPattern(), new KteConsumerRebalanceListener(consumer));
    try {
        while (!Thread.interrupted()) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            if (records.count() > 0)
            	LOG.info("Got records count: " + String.valueOf(records.count()));
            for (ConsumerRecord<String, String> record : records) {
                String topic = record.topic();
                if(props.get(PropertyConfig.Constants.KAKFA_CONSUMER_REMOVEPREFIX.key,null) != null) {
                    topic = topic.replaceFirst("^" + props.get(PropertyConfig.Constants.KAKFA_CONSUMER_REMOVEPREFIX.key), "");
                }
                collector.add(topic, record.value());
            }
        }
    }
    finally {
        LOG.info("Shutting down consumer");
        consumer.close();
    }
}
 
Example 3
Source File: TestKafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
public Map<TopicPartition, Long> getKafkaLogSize(String topic, Set<Integer> partitionids) {
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	Set<TopicPartition> tps = new HashSet<>();
	Map<Integer, Long> partitionOffset = new HashMap<Integer, Long>();
	for (int partitionid : partitionids) {
		TopicPartition tp = new TopicPartition(topic, partitionid);
		long offset = consumer.position(tp);
		partitionOffset.put(partitionid, offset);
	}

	System.out.println(partitionOffset.toString());

	if (consumer != null) {
		consumer.close();
	}
	return null;
}
 
Example 4
Source File: KafkaSource.java    From kylin with Apache License 2.0 6 votes vote down vote up
@Override
public StreamingTableSourceInfo load(String cubeName) {
    KylinConfig kylinConf = KylinConfig.getInstanceFromEnv();
    CubeInstance cube = CubeManager.getInstance(kylinConf).getCube(cubeName);
    String streamingTableName = cube.getRootFactTable();
    StreamingSourceConfig streamingSourceConfig = StreamingSourceConfigManager.getInstance(kylinConf)
            .getConfig(streamingTableName);

    String topicName = getTopicName(streamingSourceConfig.getProperties());
    Map<String, Object> conf = getKafkaConf(streamingSourceConfig.getProperties(), cube.getConfig());

    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(conf);
    try {
        List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(topicName);
        List<Partition> kafkaPartitions = Lists.transform(partitionInfos, new Function<PartitionInfo, Partition>() {
            @Nullable
            @Override
            public Partition apply(@Nullable PartitionInfo input) {
                return new Partition(input.partition());
            }
        });
        return new StreamingTableSourceInfo(kafkaPartitions);
    } finally {
        kafkaConsumer.close();
    }
}
 
Example 5
Source File: SimulateResultService.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
public List<SimulateData> readOutPut(String bootStrapServers, String maxRecords, String windowTime) {
    KafkaConsumer kafkaConsumer = kafkaUtils.kafkaConsumer("latest", bootStrapServers, "simulate");
    log.info("Subscribe Topic for {}", SIMULATE_OUTPUT);
    kafkaConsumer.subscribe(Arrays.asList(SIMULATE_OUTPUT), new Rebalancer());
    List<SimulateData> res = new ArrayList<>();
    long start = System.currentTimeMillis();
    try {
        while (checkWindow(start, Long.valueOf(windowTime), res.size(), Long.valueOf(maxRecords))) {
            ConsumerRecords<String, SimulateData> records = kafkaConsumer.poll(100);
            for (ConsumerRecord<String, SimulateData> record : records) {
                res.add(record.value());
            }
            log.info("Number item for read OutPut {}", res.size());
            kafkaConsumer.commitSync();
        }
    } catch (WakeupException e) {
        // Ignore exception if closing
        throw e;
    } catch (RuntimeException re) {
        log.error("RuntimeException {}", re);
    } finally {
        kafkaConsumer.close();
    }
    return res;
}
 
Example 6
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Get kafka 0.10.x topic history logsize.
 */
public Map<TopicPartition, Long> getKafkaLogSize(String clusterAlias, String topic, Set<Integer> partitionids) {
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getKafkaBrokerServer(clusterAlias));
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(props, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(props, clusterAlias);
	}
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	Set<TopicPartition> tps = new HashSet<>();
	for (int partitionid : partitionids) {
		TopicPartition tp = new TopicPartition(topic, partitionid);
		tps.add(tp);
	}

	consumer.assign(tps);
	java.util.Map<TopicPartition, Long> endLogSize = consumer.endOffsets(tps);
	if (consumer != null) {
		consumer.close();
	}
	return endLogSize;
}
 
Example 7
Source File: TopicVerification.java    From ja-micro with Apache License 2.0 5 votes vote down vote up
public boolean verifyTopicsExist(String kafkaBrokers, Set<String> requiredTopics,
                                 boolean checkPartitionCounts) {
    Properties props = new Properties();
    props.put("bootstrap.servers", kafkaBrokers);
    props.put("group.id", UUID.randomUUID().toString());
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());
    if (serviceProperties != null) {
        SaslConfigurator configurator = new SaslConfigurator();
        configurator.configureSasl(props, serviceProperties.getServiceName(), serviceProperties.getKafkaPassword());
    } else {
        logger.warn("TopicVerification was not initialized, SASL will not be supported for this connection");
    }
    KafkaConsumer consumer = new KafkaConsumer(props);
    try {
        @SuppressWarnings("unchecked")
        Map<String, List<PartitionInfo>> topics = consumer.listTopics();

        Set<Integer> partitionCount = new HashSet<>();
        for (String requiredTopic : requiredTopics) {
            List<PartitionInfo> partitions = topics.get(requiredTopic);
            if (partitions == null) {
                logger.info("Required kafka topic {} not present", requiredTopic);
                return false;
            }
            partitionCount.add(partitions.size());
        }
        if (checkPartitionCounts && partitionCount.size() > 1) {
            logger.warn("Partition count mismatch in topics {}",
                    Arrays.toString(requiredTopics.toArray()));
            return false;
        }
        return true;
    } finally {
        consumer.close();
    }
}
 
Example 8
Source File: SchemaRegistryConsumer.java    From blog with MIT License 5 votes vote down vote up
public static void main(String[] args) {

    /** TODO: 设置 Consumer 属性 */
    Properties properties = new Properties();
    /** TODO: Kafka 服务地址 */
    properties.put(
        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
    /** TODO: Key 序列化类 */
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    /** TODO: Value 序列化类 */
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class);
    /** TODO: Consumer 组 */
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_schema");

    /** TODO: 设置 schema.registry */
    properties.put("schema.registry.url", "http://node-160:8081");

    /** TODO: 创建 Consumer */
    KafkaConsumer<String, GenericRecord> consumer = new KafkaConsumer<>(properties);

    /** TODO: 订阅主题:可以使用 Pattern.compile("") 正则表达式 */
    consumer.subscribe(Arrays.asList("topic01"));

    /** TODO: 遍历消息队列 */
    try {
      while (true) {
        /** TODO: 设置间隔多长时间获取消息 */
        ConsumerRecords<String, GenericRecord> consumerRecords =
            consumer.poll(Duration.ofSeconds(1));
        consumerRecords.forEach(
            r ->
                System.out.printf(
                    "partition = %d, offset = %d, key = %s, value = %s%n",
                    r.partition(), r.offset(), r.key(), r.value()));
      }
    } finally {
      /** TODO: 关闭 Consumer */
      consumer.close();
    }
  }
 
Example 9
Source File: KafkaDispatcherImpl.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private void stop(KafkaConsumer<?, ?> consumer) {
	try {
		// note: this will commit offsets if offset commit is enabled
		consumer.close();
	}
	catch(Exception e) {
		logger.warn("Error cleanly disconnecting from topic [{}]", topic, e);
	}
}
 
Example 10
Source File: CaseController.java    From skywalking with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
    Properties consumerProperties = new Properties();
    consumerProperties.put("bootstrap.servers", bootstrapServers);
    consumerProperties.put("group.id", "testGroup");
    consumerProperties.put("enable.auto.commit", "true");
    consumerProperties.put("auto.commit.interval.ms", "1000");
    consumerProperties.put("auto.offset.reset", "earliest");
    consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProperties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);
    consumer.subscribe(Arrays.asList(topicName));
    int i = 0;
    while (i++ <= 10) {
        try {
            Thread.sleep(1 * 1000);
        } catch (InterruptedException e) {
        }

        ConsumerRecords<String, String> records = consumer.poll(100);

        if (!records.isEmpty()) {
            for (ConsumerRecord<String, String> record : records) {
                logger.info("header: {}", new String(record.headers()
                                                           .headers("TEST")
                                                           .iterator()
                                                           .next()
                                                           .value()));
                logger.info("offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value());
            }
            break;
        }
    }

    consumer.close();
}
 
Example 11
Source File: KafkaConsumerAnalysis.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
        Properties props = initConfig();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        // 订阅主题
//        consumer.subscribe(Arrays.asList(topic));
        // 还可以使用正则表达式
        // consumer.subscribe(Pattern.compile("topic.test.*"));

        // 直接订阅分区
        List<TopicPartition> partitions = new ArrayList<>();
        // 查询指定主题的元数据信息
        List<PartitionInfo> infos = consumer.partitionsFor(topic);
        if (infos != null) {
            for (PartitionInfo info : infos) {
                // 订阅所有分区,也可以订阅一部分分区
                partitions.add(new TopicPartition(info.topic(), info.partition()));
            }
            consumer.assign(partitions);
        }
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord<String, String> record : records) {
                    System.out.println(String.format("topic = %s, partition = %s, offset = %s",
                            record.topic(), record.partition(), record.offset()));
                    System.out.println(String.format("key = %s, value = %s", record.key(), record.value()));
                }
            }
        } catch (Exception e) {
            log.error("发生异常 ", e);
            // 取消订阅
            consumer.unsubscribe();
        } finally {
            consumer.close();
        }
    }
 
Example 12
Source File: KafkaAvroSerDesWithKafkaServerTest.java    From registry with Apache License 2.0 5 votes vote down vote up
private ConsumerRecords<String, Object> consumeMessage(String topicName, String bootstrapServers, String consumerGroup) {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true));
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());

    KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props);

    List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName);
    Collection<TopicPartition> partitions = new ArrayList<>();
    for (PartitionInfo partitionInfo : partitionInfos) {
        partitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
    }
    LOG.info("partitions [{}]", partitions);
    LOG.info("subscribed topis: [{}] ", consumer.listTopics());

    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);

    ConsumerRecords<String, Object> consumerRecords = null;
    int ct = 0;
    while (ct++ < 100 && (consumerRecords == null || consumerRecords.isEmpty())) {
        LOG.info("Polling for consuming messages");
        consumerRecords = consumer.poll(Duration.ofMillis(500));
    }
    consumer.commitSync();
    consumer.close();

    return consumerRecords;
}
 
Example 13
Source File: Consumer.java    From blog with MIT License 5 votes vote down vote up
public static void main(String[] args) {
  /** TODO: 设置 Consumer 属性 */
  Properties properties = new Properties();
  /** TODO: Kafka 服务地址 */
  properties.put(
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
  /** TODO: Key 序列化类 */
  properties.put(
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  /** TODO: Value 序列化类 */
  properties.put(
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  /** TODO: Consumer 组 */
  properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_2");

  /** TODO: 创建 Consumer */
  KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);

  /** TODO: 订阅主题:可以使用 Pattern.compile("") 正则表达式 */
  consumer.subscribe(Arrays.asList("topic01"));

  /** TODO: 遍历消息队列 */
  try {
    while (true) {
      /** TODO: 设置间隔多长时间获取消息 */
      ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
      consumerRecords.forEach(
          r ->
              System.out.printf(
                  "partition = %d, offset = %d, key = %s, value = %s%n",
                  r.partition(), r.offset(), r.key(), r.value()));
    }
  } finally {
    /** TODO: 关闭 Consumer */
    consumer.close();
  }
}
 
Example 14
Source File: CheckFlowLineHandler.java    From DBus with Apache License 2.0 4 votes vote down vote up
@Override
public void check(BufferedWriter bw) throws Exception {

    KafkaConsumer<String, byte[]> consumerSecond = null;
    KafkaConsumer<String, byte[]> consumerThird = null;
    KafkaConsumer<String, byte[]> consumerFourth = null;

    try {
        bw.newLine();
        bw.write("check flow line start: ");
        bw.newLine();
        bw.write("============================================");
        bw.newLine();

        List<Object> listSecond = initConsumer("testdb", "second");
        consumerSecond = (KafkaConsumer<String, byte[]>) listSecond.get(0);
        long offsetSecond = (Long) listSecond.get(1);

        List<Object> listThird = initConsumer("testdb.testschema", "third");
        consumerThird = (KafkaConsumer<String, byte[]>) listThird.get(0);
        long offsetThird = (Long) listThird.get(1);

        List<Object> listFourth = initConsumer("testdb.testschema.result", "fourth");
        consumerFourth = (KafkaConsumer<String, byte[]>) listFourth.get(0);
        // long offsetFourth = (Long) listFourth.get(1);

        long time = System.currentTimeMillis();

        firstStep(bw, time);
        secondStep(bw, consumerSecond, offsetSecond);
        thirdStep(bw, consumerThird, offsetThird);
        fourthStep(bw, consumerFourth, time);
    } catch (Exception e) {
        throw e;
    } finally {
        if (consumerSecond != null) consumerSecond.close();
        if (consumerThird != null) consumerThird.close();
        if (consumerFourth != null) consumerFourth.close();
    }

}
 
Example 15
Source File: AvroKafkaConsumer.java    From blog with MIT License 4 votes vote down vote up
public static void main(String[] args) {

    /** TODO: 使用 Avro 解析默认 */
    Schema.Parser parser = new Schema.Parser();
    Schema schema = parser.parse(AvroKafkaProducer.USER_SCHEMA);

    /** TODO: 使用 Bijection 将对象序列化转成字节数组 */
    Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(schema);

    /** TODO: 设置 Consumer 属性 */
    Properties properties = new Properties();
    /** TODO: Kafka 服务地址 */
    properties.put(
        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
    /** TODO: Key 序列化类 */
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    /** TODO: Value 序列化类 */
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    /** TODO: Consumer 组 */
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_avro");

    /** TODO: 创建 Consumer */
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(properties);

    /** TODO: 订阅 Topic */
    consumer.subscribe(Pattern.compile("^topic01*$"));
    try {
      while (true) {
        /** TODO: 设置间隔多长时间获取消息 */
        ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofSeconds(1));
        records.forEach(
            record -> {
              /** TODO: 反序列化字节数组 */
              GenericRecord genericRecord = recordInjection.invert(record.value()).get();
              System.out.printf(
                  "partition = %d, offset = %d, name = %s, age = %d %n",
                  record.partition(),
                  record.offset(),
                  genericRecord.get("name"),
                  genericRecord.get("age"));
            });
      }
    } finally {
      /** TODO: 关闭 Consumer */
      consumer.close();
    }
  }
 
Example 16
Source File: MetricsServer.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
@Override
protected void start() throws Exception {
   log.info("Starting metrics processing server...");

   Properties props = kafkaOpsConfig.toNuConsumerProperties();
   Deserializer<JsonObject> delegate = JSON.createDeserializer(JsonObject.class);
   KafkaConsumer<String, JsonObject> consumer = new KafkaConsumer<>(props, new StringDeserializer(), new org.apache.kafka.common.serialization.Deserializer<JsonObject>() {
      @Override
      public void configure(Map<String, ?> configs, boolean isKey) {
         // no-op
      }

      @Override
      public JsonObject deserialize(String topic, byte[] data) {
         try {
            return delegate.deserialize(data);
         }
         catch(Exception e) {
            log.warn("could not deserialize: ", new String(data,StandardCharsets.UTF_8));
            return null;
         }
      }

      @Override
      public void close() {
         // no-op
      }
   });
   try {
      log.info("starting metrics consumer...");
      consumer.subscribe(ImmutableSet.of(kafkaOpsConfig.getTopicMetrics()));
      while(true) {
         ConsumerRecords<String, JsonObject> records = consumer.poll(kafkaOpsConfig.getPollingTimeoutMs());
         if(!records.isEmpty()) {
            consume(records);
         }
      }
   }
   catch (Exception ex) {
      log.warn("exiting abnormally: {}", ex.getMessage(), ex);
   }
   finally {
      consumer.commitSync();
      consumer.close();
   }
}
 
Example 17
Source File: PastReplicaStatsProcessor.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
public void run() {
  KafkaConsumer<byte[], byte[]> kafkaConsumer = null;
  try {
    String brokers = KafkaUtils.getBrokers(zkUrl, securityProtocol);
    LOG.info("ZkUrl: {}, Brokers: {}", zkUrl, brokers);
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "doctorkafka_" + topicPartition);
    props.put(KafkaUtils.KEY_DESERIALIZER,
        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    props.put(KafkaUtils.VALUE_DESERIALIZER,
        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    props.put(KafkaUtils.MAX_POLL_RECORDS, 2000);
    props.put("max.partition.fetch.bytes", 1048576 * 4);

    kafkaConsumer = new KafkaConsumer<>(props);
    Set<TopicPartition> topicPartitions = new HashSet<>();
    topicPartitions.add(topicPartition);
    kafkaConsumer.assign(topicPartitions);
    kafkaConsumer.seek(topicPartition, startOffset);

    ConsumerRecords<byte[], byte[]> records = null;
    while (kafkaConsumer.position(topicPartition) < endOffset) {
      records = kafkaConsumer.poll(100);
      for (ConsumerRecord<byte[], byte[]> record : records) {
        BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record);
        if (brokerStats == null || brokerStats.getName() == null) {
          OpenTsdbMetricConverter.incr(DoctorKafkaMetrics.MESSAGE_DESERIALIZE_ERROR, 1);
          continue;
        }
        replicaStatsManager.update(brokerStats);
      }
    }
  } catch (Exception e) {
    LOG.error("Exception in processing brokerstats", e);
  } finally {
    if (kafkaConsumer != null) {
      kafkaConsumer.close();
    }
  }
}
 
Example 18
Source File: DuplicatePublishingDetector.java    From light-eventuate-4j with Apache License 2.0 4 votes vote down vote up
private Optional<BinlogFileOffset> fetchMaxOffsetFor(String destinationTopic) {
  String subscriberId = "duplicate-checker-" + destinationTopic + "-" + System.currentTimeMillis();
  Properties consumerProperties = ConsumerPropertiesFactory.makeConsumerProperties(config, subscriberId);
  KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);

  List<PartitionInfo> partitions = EventuateKafkaConsumer.verifyTopicExistsBeforeSubscribing(consumer, destinationTopic);

  List<TopicPartition> topicPartitionList = partitions.stream().map(p -> new TopicPartition(destinationTopic, p.partition())).collect(toList());
  consumer.assign(topicPartitionList);
  consumer.poll(0);

  logger.info("Seeking to end");

  try {
    consumer.seekToEnd(topicPartitionList);
  } catch (IllegalStateException e) {
    logger.error("Error seeking " + destinationTopic, e);
    return Optional.empty();
  }
  List<PartitionOffset> positions = topicPartitionList.stream()
          .map(tp -> new PartitionOffset(tp.partition(), consumer.position(tp) - 1))
          .filter(po -> po.offset >= 0)
          .collect(toList());

  logger.info("Seeking to positions=" + positions);

  positions.forEach(po -> {
    consumer.seek(new TopicPartition(destinationTopic, po.partition), po.offset);
  });

  logger.info("Polling for records");

  List<ConsumerRecord<String, String>> records = new ArrayList<>();
  while (records.size()<positions.size()) {
    ConsumerRecords<String, String> consumerRecords = consumer.poll(1000);
    consumerRecords.forEach(records::add);
  }

  logger.info("Got records: {}", records.size());
  Optional<BinlogFileOffset> max = StreamSupport.stream(records.spliterator(), false).map(record -> {
    logger.info(String.format("got record: %s %s %s", record.partition(), record.offset(), record.value()));
    return JSonMapper.fromJson(record.value(), PublishedEvent.class).getBinlogFileOffset();
  }).filter(binlogFileOffset -> binlogFileOffset!=null).max((blfo1, blfo2) -> blfo1.isSameOrAfter(blfo2) ? 1 : -1);
  consumer.close();
  return max;
}
 
Example 19
Source File: KafkaService.java    From cerberus-source with GNU General Public License v3.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public AnswerItem<Map<TopicPartition, Long>> seekEvent(String topic, String bootstrapServers,
        List<AppServiceHeader> serviceHeader) throws InterruptedException, ExecutionException {

    MessageEvent message = new MessageEvent(MessageEventEnum.ACTION_SUCCESS_CALLSERVICE_SEARCHKAFKA);
    AnswerItem<Map<TopicPartition, Long>> result = new AnswerItem<>();

    KafkaConsumer consumer = null;

    try {

        Properties props = new Properties();
        serviceHeader.add(factoryAppServiceHeader.create(null, "bootstrap.servers", bootstrapServers, "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "enable.auto.commit", "false", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "max.poll.records", "10", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "Y", 0, "", "", null, "", null));

        for (AppServiceHeader object : serviceHeader) {
            if (StringUtil.parseBoolean(object.getActive())) {
                props.put(object.getKey(), object.getValue());
            }
        }

        LOG.info("Open Consumer : " + getKafkaConsumerKey(topic, bootstrapServers));
        consumer = new KafkaConsumer<>(props);

        //Get a list of the topics' partitions
        List<PartitionInfo> partitionList = consumer.partitionsFor(topic);

        if (partitionList == null) {

            message = new MessageEvent(MessageEventEnum.ACTION_FAILED_CALLSERVICE_SEEKKAFKA);
            message.setDescription(message.getDescription().replace("%EX%", "Maybe Topic does not exist.").replace("%TOPIC%", topic).replace("%HOSTS%", bootstrapServers));

        } else {

            List<TopicPartition> topicPartitionList = partitionList.stream().map(info -> new TopicPartition(topic, info.partition())).collect(Collectors.toList());
            //Assign all the partitions to this consumer
            consumer.assign(topicPartitionList);
            consumer.seekToEnd(topicPartitionList); //default to latest offset for all partitions

            HashMap<TopicPartition, Long> valueResult = new HashMap<>();

            Map<TopicPartition, Long> partitionOffset = consumer.endOffsets(topicPartitionList);

            result.setItem(partitionOffset);

        }

    } catch (Exception ex) {
        message = new MessageEvent(MessageEventEnum.ACTION_FAILED_CALLSERVICE_SEEKKAFKA);
        message.setDescription(message.getDescription().replace("%EX%", ex.toString()).replace("%TOPIC%", topic).replace("%HOSTS%", bootstrapServers));
        LOG.debug(ex, ex);
    } finally {
        if (consumer != null) {
            consumer.close();
            LOG.info("Closed Consumer : " + getKafkaConsumerKey(topic, bootstrapServers));
        } else {
            LOG.info("Consumer not opened : " + getKafkaConsumerKey(topic, bootstrapServers));
        }
    }
    result.setResultMessage(message);
    return result;
}
 
Example 20
Source File: SingerHeartbeatTest.java    From singer with Apache License 2.0 4 votes vote down vote up
void testHeartBeat() {
  int numReceivedHeartbeats = 0;

  SingerTestHelper.createSingerConfig(
      singerConfigDir,
      singerConfigConfDir,
      singerDataDir,
      "singer_test_event",
      100,
      "singer_test_event",
      "",
      heartbeatIntervalInMilliSeconds / 1000,
      heartbeatTopic);

  SingerTestHelper.createSingerConfigLConfFile(
      singerConfigConfDir,
      "singer.test2.properties",
      singerDataDir,
      "singer_test_event_2",
      100,
      "singer_test_event",
      "");

  Process
      singerProc =
      SingerTestHelper
          .startSingerProcess(singerBinaryDir, singerConfigDir, SingerHeartbeatTest.class);

  File outputDir = new File(singerDataDir);
  ThriftLoggerFactory.initialize(outputDir, 100);

  SingerTestHelper.createLogStream(singerDataDir, "singer_test_event", 100, 500);
  SingerTestHelper.createLogStream(singerDataDir, "singer_test_event_2", 100, 500);

  SingerOutputRetriever outputRetriever = new SingerOutputRetriever(singerProc.getErrorStream());
  Thread outThread = new Thread(outputRetriever);
  outThread.start();

  try {
    Thread.sleep(20 * 1000);

    Properties properties = SingerTestHelper.createKafkaConsumerConfig();
    KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(properties);
    kafkaConsumer.subscribe(Arrays.asList(heartbeatTopic));

    SingerStatus status = null;

    for (int i = 0; i < numHeartbeats; i++) {
      Thread.sleep(heartbeatIntervalInMilliSeconds);

      String hostName = SingerUtils.getHostname();
      System.out.println("Fetching heartbeat messages from " + hostName + " : ");

      ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(Duration.ofMillis(500L));
      for (ConsumerRecord<byte[], byte[]> record : records) {
        String msg = new String(record.value());
        status = new Gson().fromJson(msg, SingerStatus.class);

        if (System.currentTimeMillis() - status.getTimestamp() > heartbeatIntervalInMilliSeconds
                || !status.hostName.equals(hostName)) {
          System.out.println(msg);
          status = new Gson().fromJson(msg, SingerStatus.class);
          kafkaConsumer.commitSync();
        }

        System.out.println(msg);
        kafkaConsumer.commitSync();
        numReceivedHeartbeats++;
        assert (msg.contains("data.test"));
        assert (msg.contains("singer.test2"));
      }
    }
    kafkaConsumer.close();
  } catch (Exception e) {
    e.printStackTrace();
    assert (false);
  } finally {
    singerProc.destroy();
  }

  assert (numReceivedHeartbeats == numHeartbeats);
}