Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#seekToBeginning()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#seekToBeginning() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DebeziumMySQLDatashapeStrategy.java    From syndesis with Apache License 2.0 7 votes vote down vote up
private static String pollDDLTableSchema(KafkaConsumer<String, String> consumer, String topicSchemaChange, String topicTableName) throws JsonProcessingException {
    String ddlTableExpected = null;
    // Seek the offset to the beginning in case any offset was committed
    // previously
    consumer.subscribe(Collections.singletonList(topicSchemaChange));
    consumer.seekToBeginning(consumer.assignment());
    // We assume we get the structure query in one poll
    final ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(15));
    for (final ConsumerRecord<String, String> record : records) {
        final String ddl = MAPPER.readTree(record.value()).get("ddl").asText();
        final String matchingDDL = String.format("CREATE TABLE `%s`", topicTableName);
        if (ddl.startsWith(matchingDDL)) {
            ddlTableExpected = ddl;
        }
    }
    return ddlTableExpected;
}
 
Example 2
Source File: KafkaUtils.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
public static long messageCount(String bootstrapServers, String topicName, int partition) {
    Properties props = new Properties();
    String consumerId = UUID.randomUUID().toString();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "spark-consumer-group-"+consumerId);
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "spark-consumer-"+consumerId);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ExternalizableDeserializer.class.getName());

    KafkaConsumer<Integer, Externalizable> consumer = new KafkaConsumer<Integer, Externalizable>(props);

    TopicPartition topicPartition = new TopicPartition(topicName, partition);
    List<TopicPartition> partitionList = Arrays.asList(topicPartition);
    consumer.assign(partitionList);
    consumer.seekToEnd(partitionList);
    long nextOffset = consumer.position(topicPartition);

    consumer.seekToBeginning(partitionList);
    long firstOffset = consumer.position(topicPartition);

    consumer.close();

    return nextOffset - firstOffset;
}
 
Example 3
Source File: KafkaSource.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
public String getMessageTemplate(StreamingSourceConfig streamingSourceConfig) {
    String template = null;
    KafkaConsumer<byte[], byte[]> consumer = null;
    try {
        String topicName = getTopicName(streamingSourceConfig.getProperties());
        Map<String, Object> config = getKafkaConf(streamingSourceConfig.getProperties());
        consumer = new KafkaConsumer<>(config);
        Set<TopicPartition> partitions = Sets.newHashSet(FluentIterable.from(consumer.partitionsFor(topicName))
                .transform(new Function<PartitionInfo, TopicPartition>() {
                    @Override
                    public TopicPartition apply(PartitionInfo input) {
                        return new TopicPartition(input.topic(), input.partition());
                    }
                }));
        consumer.assign(partitions);
        consumer.seekToBeginning(partitions);
        ConsumerRecords<byte[], byte[]> records = consumer.poll(500);
        if (records == null) {
            return null;
        }
        Iterator<ConsumerRecord<byte[], byte[]>> iterator = records.iterator();
        if (iterator == null || !iterator.hasNext()) {
            return null;
        }
        ConsumerRecord<byte[], byte[]> record = iterator.next();
        template = new String(record.value(), "UTF8");
    } catch (Exception e) {
        logger.error("error when fetch one record from kafka, stream:" + streamingSourceConfig.getName(), e);
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
    return template;
}
 
Example 4
Source File: KafkaClient.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToBeginning(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 5
Source File: SinkerKafkaSource.java    From DBus with Apache License 2.0 5 votes vote down vote up
public SinkerKafkaSource() throws IOException, PropertyException {
    Properties config = ConfUtils.getProps(CONFIG_PROPERTIES);
    topic = config.getProperty(Constants.SINKER_HEARTBEAT_TOPIC);
    if (topic == null) {
        throw new PropertyException("[sinker] 配置参数文件内容不能为空! " + Constants.SINKER_HEARTBEAT_TOPIC);
    }

    topicPartition = new TopicPartition(topic, 0);

    Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
    statProps.setProperty("enable.auto.commit", "true");
    statProps.setProperty("client.id", "heartbeat_consumer_sinker_client");
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    //security
    if (KafkaUtil.checkSecurity()) {
        statProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    }

    LOG.info("[sinker] SinkerKafkaSource message: set max.poll.records=1000");
    statProps.setProperty("max.poll.records", "1000");

    consumer = new KafkaConsumer(statProps);
    consumer.assign(topics);

    long beforeOffset = consumer.position(topicPartition);
    String offset = config.getProperty("sinker.kafka.offset");
    if (StringUtils.isBlank(offset) || offset.equalsIgnoreCase("none")) {
        // do nothing
    } else if (offset.equalsIgnoreCase("begin")) {
        consumer.seekToBeginning(Lists.newArrayList(topicPartition));
    } else if (offset.equalsIgnoreCase("end")) {
        consumer.seekToEnd(Lists.newArrayList(topicPartition));
    } else {
        long nOffset = Long.parseLong(offset);
        consumer.seek(topicPartition, nOffset);
    }
    long afferOffset = consumer.position(topicPartition);
    LOG.info("[sinker] SinkerKafkaSource init OK. beforeOffset {}, afferOffset={}", beforeOffset, afferOffset);
}
 
Example 6
Source File: KafkaAvroSerDesWithKafkaServerTest.java    From registry with Apache License 2.0 5 votes vote down vote up
private ConsumerRecords<String, Object> consumeMessage(String topicName, String bootstrapServers, String consumerGroup) {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true));
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());

    KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props);

    List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName);
    Collection<TopicPartition> partitions = new ArrayList<>();
    for (PartitionInfo partitionInfo : partitionInfos) {
        partitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
    }
    LOG.info("partitions [{}]", partitions);
    LOG.info("subscribed topis: [{}] ", consumer.listTopics());

    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);

    ConsumerRecords<String, Object> consumerRecords = null;
    int ct = 0;
    while (ct++ < 100 && (consumerRecords == null || consumerRecords.isEmpty())) {
        LOG.info("Polling for consuming messages");
        consumerRecords = consumer.poll(Duration.ofMillis(500));
    }
    consumer.commitSync();
    consumer.close();

    return consumerRecords;
}
 
Example 7
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
@Override
public CompletableFuture<Subscription> subscribeAll(String channelName, EventHandler eventHandler) {
    TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly);
    KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0);
    kafkaConsumer.seekToBeginning(Arrays.asList(partition0));
    return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler));
}
 
Example 8
Source File: KafkaTestUtils.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Consume messages from a given partition of a Kafka topic, using given ReaderCallback
 */
public static void readTopic(String topic, Integer partition, String brokerList, ReaderCallback callback)
    throws Exception {
  Validate.notNull(topic);
  Validate.notNull(partition);
  Validate.notNull(brokerList);
  Validate.notNull(callback);

  KafkaConsumer<byte[], byte[]> consumer = createConsumer(brokerList);
  if (partition >= 0) {
    List<TopicPartition> topicPartitions = Collections.singletonList(new TopicPartition(topic, partition));
    consumer.assign(topicPartitions);
    consumer.seekToBeginning(topicPartitions);
  } else {
    consumer.subscribe(Collections.singletonList(topic));
  }

  boolean keepGoing = true;
  long now = System.currentTimeMillis();
  do {
    ConsumerRecords<byte[], byte[]> records = consumer.poll(1000);
    for (ConsumerRecord<byte[], byte[]> record : records.records(topic)) {
      if (!callback.onMessage(record.key(), record.value())) {
        keepGoing = false;
        break;
      }
    }

    // Guard against buggy test which can hang forever
    if (System.currentTimeMillis() - now >= DEFAULT_TIMEOUT_MS) {
      throw new TimeoutException("Timed out before reading all messages");
    }
  } while (keepGoing);
}
 
Example 9
Source File: KafkaSource.java    From kylin with Apache License 2.0 5 votes vote down vote up
@Override
public String getMessageTemplate(StreamingSourceConfig streamingSourceConfig) {
    String template = null;
    KafkaConsumer<byte[], byte[]> consumer = null;
    try {
        String topicName = getTopicName(streamingSourceConfig.getProperties());
        Map<String, Object> config = getKafkaConf(streamingSourceConfig.getProperties());
        consumer = new KafkaConsumer<>(config);
        Set<TopicPartition> partitions = Sets.newHashSet(FluentIterable.from(consumer.partitionsFor(topicName))
                .transform(new Function<PartitionInfo, TopicPartition>() {
                    @Override
                    public TopicPartition apply(PartitionInfo input) {
                        return new TopicPartition(input.topic(), input.partition());
                    }
                }));
        consumer.assign(partitions);
        consumer.seekToBeginning(partitions);
        ConsumerRecords<byte[], byte[]> records = consumer.poll(500);
        if (records == null) {
            return null;
        }
        Iterator<ConsumerRecord<byte[], byte[]>> iterator = records.iterator();
        if (iterator == null || !iterator.hasNext()) {
            return null;
        }
        ConsumerRecord<byte[], byte[]> record = iterator.next();
        template = new String(record.value(), "UTF8");
    } catch (Exception e) {
        logger.error("error when fetch one record from kafka, stream:" + streamingSourceConfig.getName(), e);
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
    return template;
}
 
Example 10
Source File: KafkaClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToBeginning(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 11
Source File: LogbackIntegrationIT.java    From logback-kafka-appender with Apache License 2.0 5 votes vote down vote up
@Test
public void testLogging() {

    for (int i = 0; i<1000; ++i) {
        logger.info("message"+(i));
    }

    final KafkaConsumer<byte[], byte[]> client = kafka.createClient();
    client.assign(Collections.singletonList(new TopicPartition("logs", 0)));
    client.seekToBeginning(Collections.singletonList(new TopicPartition("logs", 0)));


    int no = 0;

    ConsumerRecords<byte[],byte[]> poll = client.poll(1000);
    while(!poll.isEmpty()) {
        for (ConsumerRecord<byte[], byte[]> consumerRecord : poll) {
            final String messageFromKafka = new String(consumerRecord.value(), UTF8);
            assertThat(messageFromKafka, Matchers.equalTo("message"+no));
            ++no;
        }
        poll = client.poll(1000);
    }

    assertEquals(1000, no);

}
 
Example 12
Source File: KafkaConsumerCallBridge09.java    From flink with Apache License 2.0 4 votes vote down vote up
public void seekPartitionToBeginning(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToBeginning(partition);
}
 
Example 13
Source File: KafkaConsumerCallBridge010.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void seekPartitionToBeginning(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToBeginning(Collections.singletonList(partition));
}
 
Example 14
Source File: KafkaDestinationTest.java    From SpinalTap with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
public void KafkaDestination() throws Exception {
  createKafkaTopic(TOPIC);
  KafkaProducerConfiguration configs = new KafkaProducerConfiguration(this.bootstrapServers());
  KafkaDestination kafkaDestination = new KafkaDestination(null, configs, x -> x, metrics, 0L);
  List<Mutation> messages = new ArrayList<>();
  messages.add(createMutation(MutationType.INSERT));
  messages.add(createMutation(MutationType.UPDATE));
  messages.add(createMutation(MutationType.DELETE));
  kafkaDestination.publish(messages);

  Properties props = new Properties();
  props.setProperty("bootstrap.servers", this.bootstrapServers());
  props.setProperty(
      "key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  props.setProperty(
      "value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(props);
  kafkaConsumer.assign(Collections.singletonList(new TopicPartition(TOPIC, 0)));
  kafkaConsumer.seekToBeginning(new TopicPartition(TOPIC, 0));
  List<ConsumerRecords<byte[], byte[]>> records = new ArrayList<>();
  ConsumerRecords<byte[], byte[]> record;
  long startMs = current();
  while (current() - startMs <= 10000L) {
    record = kafkaConsumer.poll(1000L);
    records.add(record);
    if (records.size() == 3) break;
  }
  Assert.assertEquals(records.size(), 3);

  for (ConsumerRecords<byte[], byte[]> consumerRecords : records) {
    for (ConsumerRecord<byte[], byte[]> consumerRecord : consumerRecords) {
      com.airbnb.jitney.event.spinaltap.v1.Mutation mutation =
          getMutation(consumerRecord.value());
      switch (mutation.getType()) {
        case INSERT:
          Assert.assertEquals(mutation, createMutation(MutationType.INSERT));
          break;
        case UPDATE:
          Assert.assertEquals(mutation, createMutation(MutationType.UPDATE));
          break;
        case DELETE:
          Assert.assertEquals(mutation, createMutation(MutationType.DELETE));
          break;
      }
    }
  }
  kafkaDestination.close();
  kafkaConsumer.close();
}
 
Example 15
Source File: ProjectTableService.java    From DBus with Apache License 2.0 4 votes vote down vote up
public List<Map<String, String>> getTopicOffsets(String topic) {
    KafkaConsumer<String, String> consumer = null;
    try {
        Properties consumerProps = zkService.getProperties(KeeperConstants.KEEPER_CONSUMER_CONF);
        consumerProps.setProperty("client.id", "");
        consumerProps.setProperty("group.id", "topic.offsets.reader.temp");
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        consumerProps.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        List<Map<String, String>> topicMsg = new ArrayList<>();
        // 新建consumer
        consumer = new KafkaConsumer<String, String>(consumerProps);
        /*//订阅topic(订阅所有partition,否则会抛出"You can only check the position for partitions assigned to this consumer.")
        consumer.subscribe(Arrays.asList(topic));*/
        // 获取topic的partition列表
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);

        // 获取每个partition信息
        for (PartitionInfo partitionInfo : partitionInfos) {
            int partition = partitionInfo.partition();
            TopicPartition topicPartition = new TopicPartition(topic, partition);
            consumer.assign(Arrays.asList(topicPartition));

            consumer.seekToEnd(consumer.assignment());
            //下一次拉取位置
            long nextFetchOffset = consumer.position(topicPartition);

            consumer.seekToBeginning(consumer.assignment());
            long headOffset = consumer.position(topicPartition);

            Map<String, String> partitionMsg = new HashedMap();
            partitionMsg.put("topic", topic);
            partitionMsg.put("partition", String.valueOf(partition));
            partitionMsg.put("latestOffset", String.valueOf(nextFetchOffset));
            partitionMsg.put("headOffset", String.valueOf(headOffset));
            topicMsg.add(partitionMsg);

        }

        return topicMsg;
    } catch (Exception e) {
        logger.error("[table topic offset] Error encountered while getting topic messages. topic:{}", topic);
        return null;
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example 16
Source File: KafkaConsumerCallBridge010.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public void seekPartitionToBeginning(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToBeginning(Collections.singletonList(partition));
}
 
Example 17
Source File: KafkaAppenderIT.java    From logback-kafka-appender with Apache License 2.0 4 votes vote down vote up
@Test
public void testLogging() {

    final int messageCount = 2048;
    final int messageSize = 1024;

    final Logger logger = loggerContext.getLogger("ROOT");

    unit.start();

    assertTrue("appender is started", unit.isStarted());

    final BitSet messages = new BitSet(messageCount);

    for (int i = 0; i < messageCount; ++i) {
        final String prefix = Integer.toString(i)+ ";";
        final StringBuilder sb = new StringBuilder();
        sb.append(prefix);
        byte[] b = new byte[messageSize-prefix.length()];
        ThreadLocalRandom.current().nextBytes(b);
        for(byte bb : b) {
            sb.append((char)bb & 0x7F);
        }

        final LoggingEvent loggingEvent = new LoggingEvent("a.b.c.d", logger, Level.INFO, sb.toString(), null, new Object[0]);
        unit.append(loggingEvent);
        messages.set(i);
    }

    unit.stop();
    assertFalse("appender is stopped", unit.isStarted());

    final KafkaConsumer<byte[], byte[]> javaConsumerConnector = kafka.createClient();
    javaConsumerConnector.assign(Collections.singletonList(new TopicPartition("logs", 0)));
    javaConsumerConnector.seekToBeginning(Collections.singletonList(new TopicPartition("logs", 0)));
    final long position = javaConsumerConnector.position(new TopicPartition("logs", 0));
    assertEquals(0, position);

    ConsumerRecords<byte[], byte[]> poll = javaConsumerConnector.poll(10000);
    int readMessages = 0;
    while (!poll.isEmpty()) {
        for (ConsumerRecord<byte[], byte[]> aPoll : poll) {
            byte[] msg = aPoll.value();
            byte[] msgPrefix = new byte[32];
            System.arraycopy(msg, 0, msgPrefix, 0, 32);
            final String messageFromKafka = new String(msgPrefix, UTF8);
            int delimiter = messageFromKafka.indexOf(';');
            final int msgNo = Integer.parseInt(messageFromKafka.substring(0, delimiter));
            messages.set(msgNo, false);
            readMessages++;
        }
        poll = javaConsumerConnector.poll(1000);
    }

    assertEquals(messageCount, readMessages);
    assertThat(fallbackLoggingEvents, empty());
    assertEquals("all messages should have been read", BitSet.valueOf(new byte[0]), messages);

}
 
Example 18
Source File: KafkaConsumerCallBridge09.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public void seekPartitionToBeginning(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToBeginning(partition);
}