Java Code Examples for org.apache.kafka.common.PartitionInfo#partition()

The following examples show how to use org.apache.kafka.common.PartitionInfo#partition() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
public Map<Integer, List<TopicPartition>> getBrokerLeaderPartitions(
    Map<String, List<PartitionInfo>> topicPartitonInfoMap) {
  Map<Integer, List<TopicPartition>> result = new HashMap<>();

  for (String topic : topicPartitonInfoMap.keySet()) {
    List<PartitionInfo> partitionInfoList = topicPartitonInfoMap.get(topic);
    if (partitionInfoList == null) {
      LOG.error("Failed to get partition info for {}", topic);
      continue;
    }

    for (PartitionInfo info : partitionInfoList) {
      Node leaderNode = info.leader();
      if (leaderNode != null) {
        result.putIfAbsent(leaderNode.id(), new ArrayList<>());
        TopicPartition topicPartiton = new TopicPartition(info.topic(), info.partition());
        result.get(leaderNode.id()).add(topicPartiton);
      }
    }
  }
  return result;
}
 
Example 2
Source File: KafkaStream.java    From moa with GNU General Public License v3.0 6 votes vote down vote up
/**
 * Gets a list of all topic partitions the consumer is consuming.
 */
protected List<TopicPartition> getPartitions() {
  // Create a buffer for the results
  List<TopicPartition> result = new LinkedList<>();

  // If the consumer isn't established, return the empty list
  if (m_Consumer == null)
    return result;

  // Iterate through the topics and partitions, adding them to the result
  for (Entry<String, List<PartitionInfo>> topicPartitions : m_Consumer.listTopics().entrySet()) {
    String topic = topicPartitions.getKey();
    for (PartitionInfo info : topicPartitions.getValue()) {
      int partition = info.partition();

      result.add(new TopicPartition(topic, partition));
    }
  }

  return result;
}
 
Example 3
Source File: TestSimpleConsumerManager.java    From tajo with Apache License 2.0 6 votes vote down vote up
@Test
public void testFetchData() throws Exception {
  Set<String> receivedDataSet = new HashSet<>();
  for (PartitionInfo partitionInfo : SimpleConsumerManager.getPartitions(KAFKA_SERVER_URI, TOPIC_NAME)) {
    int partitionId = partitionInfo.partition();
    try (SimpleConsumerManager cm = new SimpleConsumerManager(KAFKA_SERVER_URI, TOPIC_NAME, partitionId)) {
      long startOffset = cm.getEarliestOffset();
      long lastOffset = cm.getLatestOffset();
      if (startOffset < lastOffset) {
        for (ConsumerRecord<byte[], byte[]> message : cm.poll(startOffset, Long.MAX_VALUE)) {
          receivedDataSet.add(new String(message.value(), "UTF-8"));
        }
      }
    }
  }

  KafkaTestUtil.equalTestData(receivedDataSet);
}
 
Example 4
Source File: BaseKafkaConsumer11.java    From datacollector with Apache License 2.0 6 votes vote down vote up
private boolean firstConnection() throws StageException {
  try (Consumer kafkaAuxiliaryConsumer = new KafkaConsumer(auxiliaryKafkaConsumerProperties)) {
    List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);
    for (PartitionInfo partitionInfo : partitionInfoList) {
      TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition());
      try {
        OffsetAndMetadata offsetAndMetadata = kafkaAuxiliaryConsumer.committed(topicPartition);
        if (offsetAndMetadata != null) {
          // Already defined offset for that partition
          LOG.debug("Offset defined for partition {}", topicPartition.partition());
          kafkaAuxiliaryConsumer.close();
          return false;
        }
      } catch (Exception ex) {
        // Could not obtain committed offset for corresponding partition
        LOG.error(KafkaErrors.KAFKA_30.getMessage(), ex.toString(), ex);
        throw new StageException(KafkaErrors.KAFKA_30, ex.toString(), ex);
      }
    }
  }

  // There was no offset already defined for any partition so it is the first connection
  return true;
}
 
Example 5
Source File: KafkaMessageChannelBinder.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
private TopicPartitionOffset[] getTopicPartitionOffsets(
		Collection<PartitionInfo> listenedPartitions,
		ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties,
		ConsumerFactory<?, ?> consumerFactory) {

	final TopicPartitionOffset[] TopicPartitionOffsets =
			new TopicPartitionOffset[listenedPartitions.size()];
	int i = 0;
	SeekPosition seekPosition = null;
	Object resetTo = checkReset(extendedConsumerProperties.getExtension().isResetOffsets(),
			consumerFactory.getConfigurationProperties().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
	if (resetTo != null) {
		seekPosition = "earliest".equals(resetTo) ? SeekPosition.BEGINNING : SeekPosition.END;
	}
	for (PartitionInfo partition : listenedPartitions) {

		TopicPartitionOffsets[i++] = new TopicPartitionOffset(
				partition.topic(), partition.partition(), seekPosition);
	}
	return TopicPartitionOffsets;
}
 
Example 6
Source File: TopicMessageCounter.java    From ja-micro with Apache License 2.0 6 votes vote down vote up
/**
 * Gets the total message count for the topic.
 * <b>WARNING: Don't use with compacted topics</b>
 */
@SuppressWarnings("unchecked")
public long getCount(String kafkaBrokers, String topic) {
    KafkaConsumer consumer = buildConsumer(kafkaBrokers);
    try {
        @SuppressWarnings("unchecked")
        Map<String, List<PartitionInfo>> topics = consumer.listTopics();
        List<PartitionInfo> partitionInfos = topics.get(topic);
        if (partitionInfos == null) {
            logger.warn("Partition information was not found for topic {}", topic);
            return 0;
        } else {
            Collection<TopicPartition> partitions = new ArrayList<>();
            for (PartitionInfo partitionInfo : partitionInfos) {
                TopicPartition partition = new TopicPartition(topic, partitionInfo.partition());
                partitions.add(partition);
            }
            Map<TopicPartition, Long> endingOffsets = consumer.endOffsets(partitions);
            Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
            return diffOffsets(beginningOffsets, endingOffsets);
        }
    } finally {
        consumer.close();
    }
}
 
Example 7
Source File: KafkaWritingTask.java    From singer with Apache License 2.0 6 votes vote down vote up
public KafkaWritingTask(KafkaProducer<byte[], byte[]> producer,
                        List<ProducerRecord<byte[], byte[]>> msgs,
                        int writeTimeoutInSeconds, 
                        List<PartitionInfo> sortedPartitions) {
  this.producer = producer;
  this.messages = msgs;
  this.writeTimeoutInSeconds = writeTimeoutInSeconds;
  this.taskCreationTimeInMillis = System.currentTimeMillis();
  try {
    PartitionInfo firstPartition = sortedPartitions.get(msgs.get(0).partition());
    leaderNode = firstPartition.leader().host();
    partition = firstPartition.partition();
  } catch (Exception e) {
    LOG.error("Error getting leader node from partition metadata", e);
    OpenTsdbMetricConverter.incr(SingerMetrics.LEADER_INFO_EXCEPTION, 1, "host=" + KafkaWriter.HOSTNAME);
    leaderNode = "n/a";
  }
}
 
Example 8
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    String group = "kafka-insight-logOffsetListener";
    int sleepTime = 60000;
    KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;

    while (true) {

        try {
            if (null == kafkaConsumer) {
                kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
            }

            Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
            for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
                for (PartitionInfo partitionInfo : partitionInfoList) {
                    TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
                    Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
                    kafkaConsumer.assign(topicPartitions);
                    kafkaConsumer.seekToEnd(topicPartitions);
                    Long logEndOffset = kafkaConsumer.position(topicPartition);
                    logEndOffsetMap.put(topicPartition, logEndOffset);
                }
            }

            Thread.sleep(sleepTime);

        } catch (Exception e) {
            e.printStackTrace();
            if (null != kafkaConsumer) {
                kafkaConsumer.close();
                kafkaConsumer = null;
            }
        }
    }

}
 
Example 9
Source File: KafkaSource.java    From siddhi-io-kafka with Apache License 2.0 5 votes vote down vote up
private void checkPartitionsAvailableForTheTopicsInCluster() {
    //checking whether the defined partitions are available in the defined topic
    Properties configProperties = createProducerConfig(bootstrapServers, optionalConfigs, isBinaryMessage);
    org.apache.kafka.clients.producer.Producer producer = new KafkaProducer(configProperties);
    boolean partitionsAvailable = true;
    StringBuilder invalidPartitions = new StringBuilder("");
    for (String topic : topics) {
        List<PartitionInfo> partitionInfos = producer.partitionsFor(topic);
        if (null != partitions && !(partitions.length == 1 && partitions[0].equals("0"))) {
            for (String partition : partitions) {
                boolean partitonAvailable = false;
                for (PartitionInfo partitionInfo : partitionInfos) {
                    if (Integer.parseInt(partition) == partitionInfo.partition()) {
                        partitonAvailable = true;
                    }
                }
                if (!partitonAvailable) {
                    partitionsAvailable = false;
                    if ("".equals(invalidPartitions.toString())) {
                        invalidPartitions.append(partition);
                    } else {
                        invalidPartitions.append(',').append(partition);
                    }
                    LOG.error("Partition number, " + partition
                            + " in 'partition.id' is not available in topic partitions");
                }
            }
            if (!partitionsAvailable) {
                throw new SiddhiAppRuntimeException(
                        "Partition number(s) " + invalidPartitions + " aren't available for "
                                + "the topic: " + topic);
            }
        }
    }
}
 
Example 10
Source File: NewApiTopicConsumer.java    From azeroth with Apache License 2.0 5 votes vote down vote up
/**
 * 按上次记录重置offsets
 */
private void resetCorrectOffsets() {
    consumer.pause(consumer.assignment());
    Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics();
    Set<String> topics = topicInfos.keySet();

    List<String> expectTopics = new ArrayList<>(topicHandlers.keySet());

    List<PartitionInfo> patitions = null;
    for (String topic : topics) {
        if (!expectTopics.contains(topic))
            continue;

        patitions = topicInfos.get(topic);
        for (PartitionInfo partition : patitions) {
            try {
                //期望的偏移
                long expectOffsets = consumerContext.getLatestProcessedOffsets(topic,
                    partition.partition());
                //
                TopicPartition topicPartition = new TopicPartition(topic,
                    partition.partition());
                OffsetAndMetadata metadata = consumer
                    .committed(new TopicPartition(partition.topic(), partition.partition()));
                if (expectOffsets >= 0) {
                    if (expectOffsets < metadata.offset()) {
                        consumer.seek(topicPartition, expectOffsets);
                        logger.info("seek Topic[{}] partition[{}] from {} to {}", topic,
                            partition.partition(), metadata.offset(), expectOffsets);
                    }
                }
            } catch (Exception e) {
                logger.warn("try seek topic[" + topic + "] partition[" + partition.partition()
                            + "] offsets error");
            }
        }
    }
    consumer.resume(consumer.assignment());
}
 
Example 11
Source File: KafkaMessageChannelBinder.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
public Collection<PartitionInfo> processTopic(final String group,
		final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties,
		final ConsumerFactory<?, ?> consumerFactory, int partitionCount,
		boolean usingPatterns, boolean groupManagement, String topic) {
	Collection<PartitionInfo> listenedPartitions;
	Collection<PartitionInfo> allPartitions = usingPatterns ? Collections.emptyList()
			: getPartitionInfo(topic, extendedConsumerProperties, consumerFactory,
					partitionCount);

	if (groupManagement || extendedConsumerProperties.getInstanceCount() == 1) {
		listenedPartitions = allPartitions;
	}
	else {
		listenedPartitions = new ArrayList<>();
		for (PartitionInfo partition : allPartitions) {
			// divide partitions across modules
			if ((partition.partition() % extendedConsumerProperties
					.getInstanceCount()) == extendedConsumerProperties
							.getInstanceIndex()) {
				listenedPartitions.add(partition);
			}
		}
	}
	this.topicsInUse.put(topic,
			new TopicInformation(group, listenedPartitions, usingPatterns));
	return listenedPartitions;
}
 
Example 12
Source File: NewApiTopicConsumer.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
/**
 * 按上次记录重置offsets
 */
private void resetCorrectOffsets(ConsumerWorker worker) {	
	
	KafkaConsumer<String, Serializable> consumer = worker.consumer;
	Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics();
	Set<String> topics = topicInfos.keySet();
	
	List<String> expectTopics = new ArrayList<>(topicHandlers.keySet());
	
	List<PartitionInfo> patitions = null;
	
	consumer.poll(200);
	
	for (String topic : topics) {
		if(!expectTopics.contains(topic))continue;
		
		patitions = topicInfos.get(topic);
		for (PartitionInfo partition : patitions) {
			try {						
				//期望的偏移
				long expectOffsets = consumerContext.getLatestProcessedOffsets(topic, partition.partition());
				//
				TopicPartition topicPartition = new TopicPartition(partition.topic(), partition.partition());
				OffsetAndMetadata metadata = consumer.committed(topicPartition);
				
				Set<TopicPartition> assignment = consumer.assignment();
				if(assignment.contains(topicPartition)){
					if(expectOffsets > 0 && expectOffsets < metadata.offset()){								
						consumer.seek(topicPartition, expectOffsets);
						//consumer.seekToBeginning(assignment);
				        logger.info(">>>>>>> seek Topic[{}] partition[{}] from {} to {}",topic, partition.partition(),metadata.offset(),expectOffsets);
					}
				}
			} catch (Exception e) {
				logger.warn("try seek topic["+topic+"] partition["+partition.partition()+"] offsets error");
			}
		}
	}
	consumer.resume(consumer.assignment());
}
 
Example 13
Source File: Kafka0_10ConsumerLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private boolean firstConnection(String topic, KafkaConsumer kafkaAuxiliaryConsumer) throws StageException {
  LOG.debug("Checking first connection for Topic {}", topic);
  if (topic != null && !topic.isEmpty()) {
    List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);
    for (PartitionInfo partitionInfo : partitionInfoList) {
      if (partitionInfo != null) {
        TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition());
        try {
          OffsetAndMetadata offsetAndMetadata = kafkaAuxiliaryConsumer.committed(topicPartition);
          if (offsetAndMetadata != null) {
            // Already defined offset for that partition
            LOG.debug("Offset defined for Topic {} , partition {}", topic, topicPartition.partition());
            kafkaAuxiliaryConsumer.close();
            return false;
          }
        } catch (Exception ex) {
          // Could not obtain committed offset for corresponding partition
          LOG.error(KafkaErrors.KAFKA_30.getMessage(), ex.toString(), ex);
          throw new StageException(KafkaErrors.KAFKA_30, ex.toString(), ex);
        }

      }
    }
  }

  // There was no offset already defined for any partition so it is the first connection
  return true;
}
 
Example 14
Source File: KafkaConsumerEvent.java    From DBus with Apache License 2.0 5 votes vote down vote up
public KafkaConsumerEvent(String topic) {
    super(0l);
    this.topic = topic;
    Properties props = HeartBeatConfigContainer.getInstance().getKafkaConsumerConfig();
    Properties producerProps = HeartBeatConfigContainer.getInstance().getKafkaProducerConfig();
    try {
        if (KafkaUtil.checkSecurity()) {
            props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
            producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }

        dataConsumer = new KafkaConsumer<>(props);
        assignTopics = new ArrayList<>();
        for (PartitionInfo pif : dataConsumer.partitionsFor(this.topic)) {
            TopicPartition tp = new TopicPartition(pif.topic(), pif.partition());
            assignTopics.add(tp);
        }

        dataConsumer.assign(assignTopics);
        KafkaConsumerContainer.getInstances().putConsumer(this.topic, dataConsumer);
        statProducer = new KafkaProducer<>(producerProps);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    startTime = System.currentTimeMillis();
}
 
Example 15
Source File: PartitionState.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
public PartitionState(PartitionInfo partitionInfo) {
  _topic = partitionInfo.topic();
  _partition = partitionInfo.partition();
  _leader = partitionInfo.leader() == null ? -1 : partitionInfo.leader().id();
  _replicas = Arrays.stream(partitionInfo.replicas()).map(Node::id).collect(Collectors.toList());
  _inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toList());
  _outOfSyncReplicas = new HashSet<>(_replicas);
  _outOfSyncReplicas.removeAll(_inSyncReplicas);
  _offlineReplicas = Arrays.stream(partitionInfo.offlineReplicas()).map(Node::id).collect(Collectors.toSet());
}
 
Example 16
Source File: KafkaSplitManager.java    From presto with Apache License 2.0 4 votes vote down vote up
private static TopicPartition toTopicPartition(PartitionInfo partitionInfo)
{
    return new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
}
 
Example 17
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
/**
 * Call the kafka api to get the list of under-replicated partitions.
 * When a topic partition loses all of its replicas, it will not have a leader broker.
 * We need to handle this special case in detecting under replicated topic partitions.
 */
public static List<PartitionInfo> getUnderReplicatedPartitions(
    String zkUrl, SecurityProtocol securityProtocol, Map<String, String> consumerConfigs,
    List<String> topics,
    scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignments,
    Map<String, Integer> replicationFactors,
    Map<String, Integer> partitionCounts) {
  List<PartitionInfo> underReplicated = new ArrayList();
  KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl, securityProtocol, consumerConfigs);
  for (String topic : topics) {
    List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);
    if (partitionInfoList == null) {
      LOG.error("Failed to get partition info for {}", topic);
      continue;
    }
    int numPartitions = partitionCounts.get(topic);

    // when a partition loses all replicas and does not have a live leader,
    // kafkaconsumer.partitionsFor(...) will not return info for that partition.
    // the noLeaderFlag array is used to detect partitions that have no leaders
    boolean[] noLeaderFlags = new boolean[numPartitions];
    for (int i = 0; i < numPartitions; i++) {
      noLeaderFlags[i] = true;
    }
    for (PartitionInfo info : partitionInfoList) {
      if (info.inSyncReplicas().length < info.replicas().length &&
          replicationFactors.get(info.topic()) > info.inSyncReplicas().length) {
        underReplicated.add(info);
      }
      noLeaderFlags[info.partition()] = false;
    }

    // deal with the partitions that do not have leaders
    for (int partitionId = 0; partitionId < numPartitions; partitionId++) {
      if (noLeaderFlags[partitionId]) {
        Seq<Object> seq = partitionAssignments.get(topic).get().get(partitionId).get();
        Node[] nodes = JavaConverters.seqAsJavaList(seq).stream()
            .map(val -> new Node((Integer) val, "", -1)).toArray(Node[]::new);
        PartitionInfo partitionInfo =
            new PartitionInfo(topic, partitionId, null, nodes, new Node[0]);
        underReplicated.add(partitionInfo);
      }
    }
  }
  return underReplicated;
}
 
Example 18
Source File: OutOfSyncReplica.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
public OutOfSyncReplica(PartitionInfo partitionInfo) {
  this.topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
  this.inSyncBrokers = getInSyncReplicas(partitionInfo);
  this.outOfSyncBrokers = getOutOfSyncReplicas(partitionInfo);
  this.leader = partitionInfo.leader();
}
 
Example 19
Source File: ProjectTableService.java    From DBus with Apache License 2.0 4 votes vote down vote up
public List<Map<String, String>> getTopicOffsets(String topic) {
    KafkaConsumer<String, String> consumer = null;
    try {
        Properties consumerProps = zkService.getProperties(KeeperConstants.KEEPER_CONSUMER_CONF);
        consumerProps.setProperty("client.id", "");
        consumerProps.setProperty("group.id", "topic.offsets.reader.temp");
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        consumerProps.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        List<Map<String, String>> topicMsg = new ArrayList<>();
        // 新建consumer
        consumer = new KafkaConsumer<String, String>(consumerProps);
        /*//订阅topic(订阅所有partition,否则会抛出"You can only check the position for partitions assigned to this consumer.")
        consumer.subscribe(Arrays.asList(topic));*/
        // 获取topic的partition列表
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);

        // 获取每个partition信息
        for (PartitionInfo partitionInfo : partitionInfos) {
            int partition = partitionInfo.partition();
            TopicPartition topicPartition = new TopicPartition(topic, partition);
            consumer.assign(Arrays.asList(topicPartition));

            consumer.seekToEnd(consumer.assignment());
            //下一次拉取位置
            long nextFetchOffset = consumer.position(topicPartition);

            consumer.seekToBeginning(consumer.assignment());
            long headOffset = consumer.position(topicPartition);

            Map<String, String> partitionMsg = new HashedMap();
            partitionMsg.put("topic", topic);
            partitionMsg.put("partition", String.valueOf(partition));
            partitionMsg.put("latestOffset", String.valueOf(nextFetchOffset));
            partitionMsg.put("headOffset", String.valueOf(headOffset));
            topicMsg.add(partitionMsg);

        }

        return topicMsg;
    } catch (Exception e) {
        logger.error("[table topic offset] Error encountered while getting topic messages. topic:{}", topic);
        return null;
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}