Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#assign()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#assign() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlowLineCheckService.java    From DBus with Apache License 2.0 6 votes vote down vote up
private List<Object> initConsumer(String topic, String step) throws Exception {
    Properties props = obtainKafkaConsumerProps();
    props.put("group.id", "auto-check-table-consumer-groupid-ss-" + step);
    props.put("client.id", "auto-check-table-consumer-clientid-ss-" + step);
    props.put("enable.auto.commit", false);
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    List<TopicPartition> assignTopics = new ArrayList<>();
    assignTopics(consumer.partitionsFor(topic), assignTopics);
    consumer.assign(assignTopics);
    consumer.seekToEnd(assignTopics);
    long position = consumer.position(assignTopics.get(0));

    logger.info("topic: {}, end position: {}", topic, position);

    List<Object> list = new ArrayList<>();
    list.add(consumer);
    list.add(position);
    return list;
}
 
Example 2
Source File: KafkaTestUtils.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Consume messages from a given partition of a Kafka topic, using given ReaderCallback
 */
public static void readTopic(String topic, Integer partition, String brokerList, ReaderCallback callback)
    throws Exception {
  Validate.notNull(topic);
  Validate.notNull(partition);
  Validate.notNull(brokerList);
  Validate.notNull(callback);

  KafkaConsumer<byte[], byte[]> consumer = createConsumer(brokerList);
  if (partition >= 0) {
    List<TopicPartition> topicPartitions = Collections.singletonList(new TopicPartition(topic, partition));
    consumer.assign(topicPartitions);
    consumer.seekToBeginning(topicPartitions);
  } else {
    consumer.subscribe(Collections.singletonList(topic));
  }

  boolean keepGoing = true;
  long now = System.currentTimeMillis();
  do {
    ConsumerRecords<byte[], byte[]> records = consumer.poll(1000);
    for (ConsumerRecord<byte[], byte[]> record : records.records(topic)) {
      if (!callback.onMessage(record.key(), record.value())) {
        keepGoing = false;
        break;
      }
    }

    // Guard against buggy test which can hang forever
    if (System.currentTimeMillis() - now >= DEFAULT_TIMEOUT_MS) {
      throw new TimeoutException("Timed out before reading all messages");
    }
  } while (keepGoing);
}
 
Example 3
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    String group = "kafka-insight-logOffsetListener";
    int sleepTime = 60000;
    KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;

    while (true) {

        try {
            if (null == kafkaConsumer) {
                kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
            }

            Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
            for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
                for (PartitionInfo partitionInfo : partitionInfoList) {
                    TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
                    Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
                    kafkaConsumer.assign(topicPartitions);
                    kafkaConsumer.seekToEnd(topicPartitions);
                    Long logEndOffset = kafkaConsumer.position(topicPartition);
                    logEndOffsetMap.put(topicPartition, logEndOffset);
                }
            }

            Thread.sleep(sleepTime);

        } catch (Exception e) {
            e.printStackTrace();
            if (null != kafkaConsumer) {
                kafkaConsumer.close();
                kafkaConsumer = null;
            }
        }
    }

}
 
Example 4
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Get topic producer send logsize records.
 */
public long getKafkaProducerLogSize(String clusterAlias, String topic, Set<Integer> partitionids) {
	long producerLogSize = 0L;
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getKafkaBrokerServer(clusterAlias));
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(props, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(props, clusterAlias);
	}
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	Set<TopicPartition> tps = new HashSet<>();
	for (int partitionid : partitionids) {
		TopicPartition tp = new TopicPartition(topic, partitionid);
		tps.add(tp);
	}
	consumer.assign(tps);
	java.util.Map<TopicPartition, Long> endLogSize = consumer.endOffsets(tps);
	try {
		for (Entry<TopicPartition, Long> entry : endLogSize.entrySet()) {
			producerLogSize += entry.getValue();
		}
	} catch (Exception e) {
		LOG.error("Get producer topic logsize has error, msg is " + e.getMessage());
		e.printStackTrace();
	} finally {
		if (consumer != null) {
			consumer.close();
		}
	}
	return producerLogSize;
}
 
Example 5
Source File: ExactlyOnceStaticConsumer.java    From javabase with Apache License 2.0 5 votes vote down vote up
/**
 * Manually listens for specific topic partition. But, if you are looking for example of how to dynamically listens
 * to partition and want to manually control offset then see ManualOffsetConsumerWithRebalanceExample.java
 */
private static TopicPartition registerConsumerToSpecificPartition(KafkaConsumer<String, String> consumer, String topic, int partition) {

    TopicPartition topicPartition = new TopicPartition(topic, partition);
    List<TopicPartition> partitions = Arrays.asList(topicPartition);
    consumer.assign(partitions);
    return topicPartition;

}
 
Example 6
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Get kafka 0.10.x topic history logsize.
 */
public long getKafkaLogSize(String clusterAlias, String topic, int partitionid) {
	long histyLogSize = 0L;
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getKafkaBrokerServer(clusterAlias));
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(props, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(props, clusterAlias);
	}
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	TopicPartition tp = new TopicPartition(topic, partitionid);
	consumer.assign(Collections.singleton(tp));
	java.util.Map<TopicPartition, Long> logsize = consumer.endOffsets(Collections.singleton(tp));
	try {
		histyLogSize = logsize.get(tp).longValue();
	} catch (Exception e) {
		LOG.error("Get history topic logsize has error, msg is " + e.getMessage());
		e.printStackTrace();
	} finally {
		if (consumer != null) {
			consumer.close();
		}
	}
	return histyLogSize;
}
 
Example 7
Source File: KafkaAvroSerDesWithKafkaServerTest.java    From registry with Apache License 2.0 5 votes vote down vote up
private ConsumerRecords<String, Object> consumeMessage(String topicName, String bootstrapServers, String consumerGroup) {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true));
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());

    KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props);

    List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName);
    Collection<TopicPartition> partitions = new ArrayList<>();
    for (PartitionInfo partitionInfo : partitionInfos) {
        partitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
    }
    LOG.info("partitions [{}]", partitions);
    LOG.info("subscribed topis: [{}] ", consumer.listTopics());

    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);

    ConsumerRecords<String, Object> consumerRecords = null;
    int ct = 0;
    while (ct++ < 100 && (consumerRecords == null || consumerRecords.isEmpty())) {
        LOG.info("Polling for consuming messages");
        consumerRecords = consumer.poll(Duration.ofMillis(500));
    }
    consumer.commitSync();
    consumer.close();

    return consumerRecords;
}
 
Example 8
Source File: ITKafkaStreamsTracing.java    From brave with Apache License 2.0 5 votes vote down vote up
Consumer<String, String> createTracingConsumer(String... topics) {
  if (topics.length == 0) {
    topics = new String[] {testName.getMethodName()};
  }
  KafkaConsumer<String, String> consumer = kafka.helper().createStringConsumer();
  List<TopicPartition> assignments = new ArrayList<>();
  for (String topic : topics) {
    assignments.add(new TopicPartition(topic, 0));
  }
  consumer.assign(assignments);
  return KafkaTracing.create(tracing).consumer(consumer);
}
 
Example 9
Source File: SinkerKafkaSource.java    From DBus with Apache License 2.0 5 votes vote down vote up
public SinkerKafkaSource() throws IOException, PropertyException {
    Properties config = ConfUtils.getProps(CONFIG_PROPERTIES);
    topic = config.getProperty(Constants.SINKER_HEARTBEAT_TOPIC);
    if (topic == null) {
        throw new PropertyException("[sinker] 配置参数文件内容不能为空! " + Constants.SINKER_HEARTBEAT_TOPIC);
    }

    topicPartition = new TopicPartition(topic, 0);

    Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
    statProps.setProperty("enable.auto.commit", "true");
    statProps.setProperty("client.id", "heartbeat_consumer_sinker_client");
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    //security
    if (KafkaUtil.checkSecurity()) {
        statProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    }

    LOG.info("[sinker] SinkerKafkaSource message: set max.poll.records=1000");
    statProps.setProperty("max.poll.records", "1000");

    consumer = new KafkaConsumer(statProps);
    consumer.assign(topics);

    long beforeOffset = consumer.position(topicPartition);
    String offset = config.getProperty("sinker.kafka.offset");
    if (StringUtils.isBlank(offset) || offset.equalsIgnoreCase("none")) {
        // do nothing
    } else if (offset.equalsIgnoreCase("begin")) {
        consumer.seekToBeginning(Lists.newArrayList(topicPartition));
    } else if (offset.equalsIgnoreCase("end")) {
        consumer.seekToEnd(Lists.newArrayList(topicPartition));
    } else {
        long nOffset = Long.parseLong(offset);
        consumer.seek(topicPartition, nOffset);
    }
    long afferOffset = consumer.position(topicPartition);
    LOG.info("[sinker] SinkerKafkaSource init OK. beforeOffset {}, afferOffset={}", beforeOffset, afferOffset);
}
 
Example 10
Source File: KafkaConsumerCommand.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer,String topic, int partition) {
	TopicPartition topicPartition = new TopicPartition(topic, partition);
	List<TopicPartition> asList = Arrays.asList(topicPartition);
	kafkaConsumer.assign(asList);
	kafkaConsumer.seekToEnd(asList);
	long logEndOffset = kafkaConsumer.position(topicPartition);
	return logEndOffset;
}
 
Example 11
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
private KafkaConsumer<String, byte[]> createAndAssignConsumer(TopicPartition partition) {
    kafkaConsumerProps.put("group.id", UUID.randomUUID().toString());
    KafkaConsumer<String, byte[]> kafkaConsumer =
                    new KafkaConsumer<String, byte[]>(kafkaConsumerProps,
                            new org.apache.kafka.common.serialization.StringDeserializer(),
                            new org.apache.kafka.common.serialization.ByteArrayDeserializer());
    kafkaConsumer.assign(Arrays.asList(partition));
    return kafkaConsumer;
}
 
Example 12
Source File: KafkaBinaryLog.java    From modernmt with Apache License 2.0 5 votes vote down vote up
/**
 * This method makes this KafkaBinaryLog connect to the Kafka server specified in its initial BinaryLogConfig.
 * <p>
 * A kafka consumer and/or a kafka producer will be launched depending on the passed "enable" params.
 * If both the "enable" params are false, this method will do nothing.
 * <p>
 * The connection process will use the passed timeout and timeunit values.
 * <p>
 * If the connection succeeds, this method returns a map that associates to the (short) ID of each kafka topic
 * the corresponding (long) last written position.
 *
 * @param timeout        timeout to use while connecting to the Kafka server
 * @param unit           time unit for the timeout
 * @param enableConsumer boolean value that specifies whether the kafka consumer should be started or not
 * @param enableProducer boolean value that specifies whether the kafka producer should be started or not
 * @return a map containing, for each Kafka topic, its latest written positions if a consumer was to be started; null if only the producer was to be started.
 * @throws HostUnreachableException if could not connect to the Kafka server
 */
@Override
public Map<Short, Long> connect(long timeout, TimeUnit unit, boolean enableConsumer, boolean enableProducer) throws HostUnreachableException {

    // Create Kafka producer
    if (enableProducer) {
        Properties producerProperties = loadProperties("kafka-producer.properties", hosts, port);
        this.producer = new KafkaProducer<>(producerProperties);    //write in the given partitions
    }

    // Create Kafka consumer and connect to the Kafka remote server to get the latest positions for each channel
    if (enableConsumer) {
        // load consumer properties and build kafka consumer for reading messages from the server from the given partitions
        Properties consumerProperties = loadProperties("kafka-consumer.properties", hosts, port);
        consumerProperties.put("group.id", uuid);
        KafkaConsumer<Integer, KafkaPacket> consumer = new KafkaConsumer<>(consumerProperties);
        consumer.assign(partitions);

        //use a separate thread to connect to the Kafka server
        ConnectionThread connectThread = new ConnectionThread(consumer);
        connectThread.start();
        try {
            unit.timedJoin(connectThread, timeout);
        } catch (InterruptedException e) {
            // ignore it
        }

        if (connectThread.isAlive())    // if the thread is still alive could not connect to the Kafka server
            throw new HostUnreachableException(hosts, port);

        this.pollingThread.start(consumer);

        return connectThread.getLatestPositions();
    }
    return null;
}
 
Example 13
Source File: KafkaClient.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToBeginning(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 14
Source File: KafkaClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static long getLatestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToEnd(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 15
Source File: KafkaDestinationTest.java    From SpinalTap with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
public void KafkaDestination() throws Exception {
  createKafkaTopic(TOPIC);
  KafkaProducerConfiguration configs = new KafkaProducerConfiguration(this.bootstrapServers());
  KafkaDestination kafkaDestination = new KafkaDestination(null, configs, x -> x, metrics, 0L);
  List<Mutation> messages = new ArrayList<>();
  messages.add(createMutation(MutationType.INSERT));
  messages.add(createMutation(MutationType.UPDATE));
  messages.add(createMutation(MutationType.DELETE));
  kafkaDestination.publish(messages);

  Properties props = new Properties();
  props.setProperty("bootstrap.servers", this.bootstrapServers());
  props.setProperty(
      "key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  props.setProperty(
      "value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(props);
  kafkaConsumer.assign(Collections.singletonList(new TopicPartition(TOPIC, 0)));
  kafkaConsumer.seekToBeginning(new TopicPartition(TOPIC, 0));
  List<ConsumerRecords<byte[], byte[]>> records = new ArrayList<>();
  ConsumerRecords<byte[], byte[]> record;
  long startMs = current();
  while (current() - startMs <= 10000L) {
    record = kafkaConsumer.poll(1000L);
    records.add(record);
    if (records.size() == 3) break;
  }
  Assert.assertEquals(records.size(), 3);

  for (ConsumerRecords<byte[], byte[]> consumerRecords : records) {
    for (ConsumerRecord<byte[], byte[]> consumerRecord : consumerRecords) {
      com.airbnb.jitney.event.spinaltap.v1.Mutation mutation =
          getMutation(consumerRecord.value());
      switch (mutation.getType()) {
        case INSERT:
          Assert.assertEquals(mutation, createMutation(MutationType.INSERT));
          break;
        case UPDATE:
          Assert.assertEquals(mutation, createMutation(MutationType.UPDATE));
          break;
        case DELETE:
          Assert.assertEquals(mutation, createMutation(MutationType.DELETE));
          break;
      }
    }
  }
  kafkaDestination.close();
  kafkaConsumer.close();
}
 
Example 16
Source File: KafkaReadFunction.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public Iterator<ExecRow> call(Integer partition) throws Exception {
    Properties props = new Properties();

    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

    String consumer_id = "spark-consumer-dss-krf-"+UUID.randomUUID();
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumer_id);
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, consumer_id);

    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ExternalizableDeserializer.class.getName());
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    KafkaConsumer<Integer, Externalizable> consumer = new KafkaConsumer<Integer, Externalizable>(props);
    consumer.assign(Arrays.asList(new TopicPartition(topicName, partition)));

    return new Iterator<ExecRow>() {
        Iterator<ConsumerRecord<Integer, Externalizable>> it = null;

        @Override
        public boolean hasNext() {
            if (it == null) {
                ConsumerRecords<Integer, Externalizable> records = null;
                while (records == null || records.isEmpty()) {
                    records = consumer.poll( java.time.Duration.ofMillis(1000) );
                    if (TaskContext.get().isInterrupted()) {
                        consumer.close();
                        throw new TaskKilledException();
                    }
                }
                it = records.iterator();
            }
            if (it.hasNext()) {
                return true;
            }
            else {
                consumer.close();
                return false;
            }
        }

        @Override
        public ExecRow next() {
            return (ExecRow)it.next().value();
        }
    };
}
 
Example 17
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testGiganticLargeMessages() throws Exception {
  MessageSplitter splitter = new MessageSplitterImpl(MAX_SEGMENT_SIZE,
      new DefaultSegmentSerializer(),
      new UUIDFactory.DefaultUUIDFactory<>());

  String topic = "testGiganticLargeMessages";
  createTopic(topic);
  TopicPartition tp = new TopicPartition(topic, 0);
  Collection<TopicPartition> tps = new ArrayList<>(Collections.singletonList(tp));

  //send 2 interleaved gigantic msgs

  Producer<byte[], byte[]> producer = createRawProducer();
  // M0, 20 segments
  UUID messageId0 = LiKafkaClientsUtils.randomUUID();
  String message0 = KafkaTestUtils.getRandomString(20 * MAX_SEGMENT_SIZE);
  List<ProducerRecord<byte[], byte[]>> m0Segs = splitter.split(topic, 0, messageId0, message0.getBytes());
  // M1, 30 segments
  UUID messageId1 = LiKafkaClientsUtils.randomUUID();
  String message1 = KafkaTestUtils.getRandomString(30 * MAX_SEGMENT_SIZE);
  List<ProducerRecord<byte[], byte[]>> m1Segs = splitter.split(topic, 0, messageId1, message1.getBytes());

  List<ProducerRecord<byte[], byte[]>> interleaved = interleave(m0Segs, m1Segs);
  for (ProducerRecord<byte[], byte[]> rec : interleaved) {
    producer.send(rec).get();
  }

  //create a consumer with not enough memory to assemble either

  Properties props = new Properties();
  String groupId = "testGiganticLargeMessages-" + UUID.randomUUID();
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  // Make sure we start to consume from the beginning.
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  // Only fetch one record at a time.
  props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
  // No auto commit
  props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
  // Not enough memory to assemble anything
  props.setProperty(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_BUFFER_CAPACITY_CONFIG, "" + (MAX_SEGMENT_SIZE + 1));
  props.setProperty(LiKafkaConsumerConfig.EXCEPTION_ON_MESSAGE_DROPPED_CONFIG, "false");

  LiKafkaConsumer<String, String> tempConsumer = createConsumer(props);
  tempConsumer.assign(tps);

  //traverse entire partition

  int topicSize = interleaved.size();
  long timeout = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(120);
  int msgsDelivered = 0;
  while (true) {
    ConsumerRecords<String, String> records = tempConsumer.poll(1000);
    msgsDelivered += records.count();
    long position = tempConsumer.position(tp);
    if (position >= topicSize) {
      break;
    }
    if (System.currentTimeMillis() > timeout) {
      throw new IllegalStateException("unable to consume to  the end of the topic within timeout."
          + " position=" + position + ". end=" + topicSize);
    }
  }

  Assert.assertTrue(msgsDelivered == 0, "no msgs were expected to be delivered. instead got " + msgsDelivered);

  //make sure offsets committed reflect the msgs we've given up on

  tempConsumer.commitSync();
  OffsetAndMetadata committed = tempConsumer.committed(tp);
  Assert.assertEquals(committed.offset(), topicSize); //li consumer would claim to be at end

  Properties vanillaProps = getConsumerProperties(props);
  KafkaConsumer<String, String> vanillaConsumer = new KafkaConsumer<>(vanillaProps);
  vanillaConsumer.assign(tps);
  OffsetAndMetadata vanillaCommitted = vanillaConsumer.committed(tp);
  Assert.assertEquals(vanillaCommitted.offset(), topicSize - 1); //vanilla offset is one before (1 fragment in buffer)
}
 
Example 18
Source File: KafkaConsumerCallBridge09.java    From flink with Apache License 2.0 4 votes vote down vote up
public void assignPartitions(KafkaConsumer<?, ?> consumer, List<TopicPartition> topicPartitions) throws Exception {
	consumer.assign(topicPartitions);
}
 
Example 19
Source File: KafkaConsumerCallBridge010.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public void assignPartitions(KafkaConsumer<?, ?> consumer, List<TopicPartition> topicPartitions) throws Exception {
	consumer.assign(topicPartitions);
}
 
Example 20
Source File: AvroConsumerExample.java    From javabase with Apache License 2.0 3 votes vote down vote up
private static void readMessages() throws InterruptedException {

        KafkaConsumer<String, byte[]> consumer = createConsumer();

        // Assign to specific topic and partition, subscribe could be used here to subscribe to all topic.
        consumer.assign(Arrays.asList(new TopicPartition("avro-topic", 0)));

        processRecords(consumer);
    }