kafka.common.ErrorMapping Java Examples

The following examples show how to use kafka.common.ErrorMapping. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PulsarKafkaSimpleConsumer.java    From pulsar with Apache License 2.0 6 votes vote down vote up
/**
 * <pre>
 * Overriden method: OffsetCommitResponse commitOffsets(OffsetCommitRequest request)
 * 
 * Note:
 * created PulsarOffsetCommitResponse as OffsetCommitRequest doesn't provide getters
 * 
 * </pre>
 */
public OffsetCommitResponse commitOffsets(PulsarOffsetCommitRequest request) {

    PulsarOffsetCommitResponse response = new PulsarOffsetCommitResponse(null);
    for (Entry<String, MessageId> topicOffset : request.getTopicOffsetMap().entrySet()) {
        final String topic = topicOffset.getKey();
        final String groupId = request.getGroupId();
        try {
            Consumer<byte[]> consumer = getConsumer(topic, groupId);
            consumer.acknowledgeCumulative(topicOffset.getValue());
        } catch (Exception e) {
            log.warn("Failed to ack message for topic {}-{}", topic, topicOffset.getValue(), e);
            response.hasError = true;
            TopicAndPartition topicPartition = new TopicAndPartition(topic, 0);
            response.errors.computeIfAbsent(topicPartition, tp -> ErrorMapping.UnknownCode());
        }
    }

    return response;
}
 
Example #2
Source File: KafkaSimpleConsumer.java    From julongchain with Apache License 2.0 6 votes vote down vote up
/**
 * 从保存consumer消费者offset偏移量的位置获取当前consumer对应的偏移量
 *
 * @param consumer    消费者
 * @param groupId     Group Id
 * @param clientName  client名称
 * @param topic       topic名称
 * @param partitionID 分区id
 * @return
 */
public long getOffsetOfTopicAndPartition(SimpleConsumer consumer, String groupId, String clientName, String topic, int partitionID) {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionID);
    List<TopicAndPartition> requestInfo = new ArrayList<TopicAndPartition>();
    requestInfo.add(topicAndPartition);
    OffsetFetchRequest request = new OffsetFetchRequest(groupId, requestInfo, 0, clientName);
    OffsetFetchResponse response = consumer.fetchOffsets(request);

    // 获取返回值
    Map<TopicAndPartition, OffsetMetadataAndError> returnOffsetMetadata = response.offsets();
    // 处理返回值
    if (returnOffsetMetadata != null && !returnOffsetMetadata.isEmpty()) {
        // 获取当前分区对应的偏移量信息
        OffsetMetadataAndError offset = returnOffsetMetadata.get(topicAndPartition);
        if (offset.error().equals(ErrorMapping.NoError())) {
            // 没有异常,表示是正常的,获取偏移量
            return offset.offset();
        } else {
            // 当Consumer第一次连接的时候(zk中不在当前topic对应数据的时候),会产生UnknownTopicOrPartitionCode异常
            System.out.println("Error fetching data Offset Data the Topic and Partition. Reason: " + offset.error());
        }
    }

    // 所有异常情况直接返回0
    return 0;
}
 
Example #3
Source File: KafkaRecordReader.java    From kangaroo with Apache License 2.0 6 votes vote down vote up
/**
 * THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff
 * the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method
 * returns false, you risk a {@link NullPointerException} OR a no-more-elements exception.
 * 
 * @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}.
 */
@VisibleForTesting
boolean continueItr() {
    final long remaining = end - currentOffset;
    if (!canCallNext() && remaining > 0) {
        final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize;
        LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize,
                currentOffset));
        final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition()
                .getPartId(), currentOffset, theFetchSize);
        final ByteBufferMessageSet msg = consumer.fetch(request);
        final int errorCode = msg.getErrorCode();
        if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
            return false;
        }
        if (errorCode != ErrorMapping.NoError()) {
            ErrorMapping.maybeThrowException(errorCode);
        } // --> else we try to grab the next iterator
        currentMessageItr = msg.iterator();
        currentOffset += msg.validBytes();
    }
    return canCallNext();
}
 
Example #4
Source File: CuratorKafkaMonitor.java    From Kafdrop with Apache License 2.0 6 votes vote down vote up
private Map<String, TopicVO> getTopicMetadata(BlockingChannel channel, String... topics)
{
   final TopicMetadataRequest request =
      new TopicMetadataRequest((short) 0, 0, clientId(), Arrays.asList(topics));

   LOG.debug("Sending topic metadata request: {}", request);

   channel.send(request);
   final kafka.api.TopicMetadataResponse underlyingResponse =
      kafka.api.TopicMetadataResponse.readFrom(channel.receive().buffer());

   LOG.debug("Received topic metadata response: {}", underlyingResponse);

   TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse);
   return response.topicsMetadata().stream()
      .filter(tmd -> tmd.errorCode() == ErrorMapping.NoError())
      .map(this::processTopicMetadata)
      .collect(Collectors.toMap(TopicVO::getName, t -> t));
}
 
Example #5
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Retrieves the last offset before the given timestamp for a given topic partition.
 *
 * @return The last offset before the given timestamp or {@code 0} if failed to do so.
 */
private long getLastOffset(TopicPartition topicPart, long timestamp) {
  BrokerInfo brokerInfo = brokerService.getLeader(topicPart.getTopic(), topicPart.getPartition());
  SimpleConsumer consumer = brokerInfo == null ? null : consumers.getUnchecked(brokerInfo);

  // If no broker, treat it as failure attempt.
  if (consumer == null) {
    LOG.warn("Failed to talk to any broker. Default offset to 0 for {}", topicPart);
    return 0L;
  }

  // Fire offset request
  OffsetRequest request = new OffsetRequest(ImmutableMap.of(
    new TopicAndPartition(topicPart.getTopic(), topicPart.getPartition()),
    new PartitionOffsetRequestInfo(timestamp, 1)
  ), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());

  OffsetResponse response = consumer.getOffsetsBefore(request);

  // Retrieve offsets from response
  long[] offsets = response.hasError() ? null : response.offsets(topicPart.getTopic(), topicPart.getPartition());
  if (offsets == null || offsets.length <= 0) {
    short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition());

    // If the topic partition doesn't exists, use offset 0 without logging error.
    if (errorCode != ErrorMapping.UnknownTopicOrPartitionCode()) {
      consumers.refresh(brokerInfo);
      LOG.warn("Failed to fetch offset for {} with timestamp {}. Error: {}. Default offset to 0.",
               topicPart, timestamp, errorCode);
    }
    return 0L;
  }

  LOG.debug("Offset {} fetched for {} with timestamp {}.", offsets[0], topicPart, timestamp);
  return offsets[0];
}
 
Example #6
Source File: KafkaValidationUtil08.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public int getPartitionCount(
    String metadataBrokerList,
    String topic,
    Map<String, Object> kafkaClientConfigs,
    int messageSendMaxRetries,
    long retryBackoffMs
) throws StageException {
  List<HostAndPort> kafkaBrokers = getKafkaBrokers(metadataBrokerList);
  TopicMetadata topicMetadata;
  try {
    topicMetadata = KafkaValidationUtil08.getTopicMetadata(
        kafkaBrokers,
        topic,
        messageSendMaxRetries,
        retryBackoffMs
    );
    if (topicMetadata == null) {
      // Could not get topic metadata from any of the supplied brokers
      throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList);
    }
    if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
      // Topic does not exist
      throw new StageException(KafkaErrors.KAFKA_04, topic);
    }
    if (topicMetadata.errorCode() != 0) {
      // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode()
      throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList);
    }
  } catch (IOException e) {
    LOG.error(KafkaErrors.KAFKA_11.getMessage(), topic, kafkaBrokers, e.toString(), e);
    throw new StageException(KafkaErrors.KAFKA_11, topic, kafkaBrokers, e.toString());
  }
  return topicMetadata.partitionsMetadata().size();
}
 
Example #7
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 5 votes vote down vote up
public long getLastOffset() throws InterruptedException {
    OffsetResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);

        ensureConsumer(previousLeader);
        try {
            response = consumer.getOffsetsBefore(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in getLastOffset for {} - {}",
                        e.getClass().getName(), topic, partitionId);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in getLastOffset {} - {}", topic, partitionId, e);
            response = null;
        }
        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();

            logger.warn("Error fetching data Offset for {} - {}, the Broker. Reason: {}",
                    topic, partitionId, errorCode);

            stopConsumer();
            previousLeader = leaderBroker;
            leaderBroker = null;
            continue;
        }
        break;
    }
    long[] offsets = response.offsets(topic, partitionId);
    return offsets[offsets.length - 1];
}
 
Example #8
Source File: KafkaMessageReceiverImpl.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * Check the leader.
 *
 * @param a_topic       topic name
 * @param a_partition   partition number
 * @param a_beginOffset begin offset
 * @return boolean
 */
private boolean checkLeader(String a_topic, int a_partition,
                            long a_beginOffset) {

    if (checkConsumer(a_topic, a_partition)) {

        FetchRequest req = new FetchRequestBuilder()
                .clientId(pool.getClientId())
                .addFetch(a_topic, a_partition, a_beginOffset,
                        KafkaConstants.FETCH_SIZE).build();
        fetchResponse = consumer.get().fetch(req);
        String leadHost = metadata.leader().host();

        if (fetchResponse.hasError()) {

            // Something went wrong!
            short code = fetchResponse.errorCode(a_topic, a_partition);
            logger.error("Error fetching data from the Broker:" + leadHost
                    + " Reason: " + code);

            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // We asked for an invalid offset. For simple case ask for
                // the last element to reset
                a_beginOffset = getLatestOffset(a_topic, a_partition);
            }
            consumer.get().close();
            consumer.set(null);

            try {
                metadata = findNewLeader(leadHost, a_topic, a_partition);
            } catch (MQException e) {
                logger.error("Find new leader failed.", e);
            }
            return false;
        }

        return true;
    }
    return false;
}
 
Example #9
Source File: CuratorKafkaMonitor.java    From Kafdrop with Apache License 2.0 5 votes vote down vote up
private Integer offsetManagerBroker(BlockingChannel channel, String groupId)
{
   final ConsumerMetadataRequest request =
      new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId());

   LOG.debug("Sending consumer metadata request: {}", request);

   channel.send(request);
   ConsumerMetadataResponse response =
      ConsumerMetadataResponse.readFrom(channel.receive().buffer());

   LOG.debug("Received consumer metadata response: {}", response);

   return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null;
}
 
Example #10
Source File: CuratorKafkaMonitor.java    From Kafdrop with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the map of partitionId to consumer offset for the given group and
 * topic. Uses the given blocking channel to execute the offset fetch request.
 *
 * @param channel          The channel to send requests on
 * @param groupId          Consumer group to use
 * @param topic            Topic to query
 * @param zookeeperOffsets If true, use a version of the API that retrieves
 *                         offsets from Zookeeper. Otherwise use a version
 *                         that pulls the offsets from Kafka itself.
 * @return Map where the key is partitionId and the value is the consumer
 * offset for that partition.
 */
private Map<Integer, Long> getConsumerOffsets(BlockingChannel channel,
                                              String groupId,
                                              TopicVO topic,
                                              boolean zookeeperOffsets)
{

   final OffsetFetchRequest request = new OffsetFetchRequest(
      groupId,
      topic.getPartitions().stream()
         .map(p -> new TopicAndPartition(topic.getName(), p.getId()))
         .collect(Collectors.toList()),
      (short) (zookeeperOffsets ? 0 : kafka.api.OffsetFetchRequest.CurrentVersion()), // version 0 = zookeeper offsets, 1+ = kafka offsets
      0, clientId());

   LOG.debug("Sending consumer offset request: {}", request);

   channel.send(request.underlying());

   final kafka.api.OffsetFetchResponse underlyingResponse =
      kafka.api.OffsetFetchResponse.readFrom(channel.receive().buffer());

   LOG.debug("Received consumer offset response: {}", underlyingResponse);

   OffsetFetchResponse response = new OffsetFetchResponse(underlyingResponse);

   return response.offsets().entrySet().stream()
      .filter(entry -> entry.getValue().error() == ErrorMapping.NoError())
      .collect(Collectors.toMap(entry -> entry.getKey().partition(), entry -> entry.getValue().offset()));
}
 
Example #11
Source File: KafkaRecordReaderTest.java    From kangaroo with Apache License 2.0 5 votes vote down vote up
@Test(expected = Exception.class)
public void testContinueItrException() throws Exception {
    doReturn(mockConsumer).when(reader).getConsumer(split, conf);
    reader.initialize(split, context);
    when(mockConsumer.fetch(any(FetchRequest.class))).thenReturn(mockMessage);
    when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.InvalidFetchSizeCode());
    reader.continueItr();
    fail();
}
 
Example #12
Source File: KafkaRecordReaderTest.java    From kangaroo with Apache License 2.0 5 votes vote down vote up
@Test
public void testContinueItrOffsetOutOfRange() throws Exception {
    doReturn(mockConsumer).when(reader).getConsumer(split, conf);
    reader.initialize(split, context);
    when(mockConsumer.fetch(any(FetchRequest.class))).thenReturn(mockMessage);
    when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.OffsetOutOfRangeCode());
    assertFalse("Should be done with split!", reader.continueItr());
}
 
Example #13
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private int dispatch(FetchResponse response) {
   int numDispatched = 0;
   for(TopicAndPartition tap: new ArrayList<>(offsets.keySet())) {
      short errorCode = response.errorCode(tap.topic(), tap.partition());
      if(errorCode != 0) {
         logger.warn("Error reading from topic: [{}] partition: [{}]", tap.topic(), tap.partition(), ErrorMapping.exceptionFor(errorCode));
         continue;
      }

      ByteBufferMessageSet message = response.messageSet(tap.topic(), tap.partition());
      for(MessageAndOffset mao: message) {
         Long offset = offsets.get(tap);
         if(offset != null && offset > mao.offset()) {
            // skip older offsets
            continue;
         }
         KafkaConsumer handler = handlers.computeIfAbsent(tap, handlerFactory);
         if(handler == null) {
            logger.debug("No handler for topic: [{}] partition: [{}], this partition won't be processed", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
         if(handler.apply(tap, mao.message())) {
            numDispatched++;
            offsets.put(tap, mao.nextOffset());
         }
         else {
            logger.debug("Done processing topic: [{}] partition: [{}]", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
      }
   }

   return numDispatched;
}
 
Example #14
Source File: KafkaRecordReaderTest.java    From kangaroo with Apache License 2.0 5 votes vote down vote up
@Test
public void testContinueItr() throws Exception {
    doReturn(mockConsumer).when(reader).getConsumer(split, conf);
    // unfortunately, FetchRequest does not implement equals, so we have to do any(), and validate with answer
    when(mockConsumer.fetch(any(FetchRequest.class))).thenAnswer(new Answer<ByteBufferMessageSet>() {
        @Override
        public ByteBufferMessageSet answer(final InvocationOnMock invocation) throws Throwable {
            final FetchRequest request = (FetchRequest) invocation.getArguments()[0];
            assertEquals("topic", request.topic());
            assertEquals(0, request.partition());
            assertEquals(0, request.offset());
            assertEquals(100, request.maxSize());
            return mockMessage;
        }
    });
    when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.NoError());
    when(mockMessage.iterator()).thenReturn(mockIterator);
    when(mockMessage.validBytes()).thenReturn(100l);
    when(mockIterator.hasNext()).thenReturn(true);
    reader.initialize(split, context);

    assertTrue("Should be able to continue iterator!", reader.continueItr());
    assertEquals(mockIterator, reader.getCurrentMessageItr());
    assertEquals(100, reader.getCurrentOffset());

    when(mockIterator.hasNext()).thenReturn(false);
    assertFalse("Should be done with split!", reader.continueItr());
    // call it again just for giggles
    assertFalse("Should be done with split!", reader.continueItr());
}
 
Example #15
Source File: KafkaRecordReaderTest.java    From kangaroo with Apache License 2.0 5 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testContinueItrMultipleIterations() throws Exception {
    // init split
    doReturn(mockConsumer).when(reader).getConsumer(split, conf);
    split.setEndOffset(4097);
    reader.initialize(split, context);

    // first iteration
    final Iterator<MessageAndOffset> mockIterator1 = mock(Iterator.class);
    when(mockConsumer.fetch(any(FetchRequest.class))).thenReturn(mockMessage);
    when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.NoError());
    when(mockMessage.iterator()).thenReturn(mockIterator1);
    when(mockMessage.validBytes()).thenReturn(2048l);
    when(mockIterator1.hasNext()).thenReturn(true);

    assertTrue("Should be able to continue iterator!", reader.continueItr());

    // reset iterator for second iteration
    when(mockIterator1.hasNext()).thenReturn(false);
    final Iterator<MessageAndOffset> mockIterator2 = mock(Iterator.class);
    when(mockMessage.iterator()).thenReturn(mockIterator2);
    when(mockIterator2.hasNext()).thenReturn(true);

    assertTrue("Should be able to continue iterator!", reader.continueItr());

    // reset iterator for third iteration
    when(mockIterator2.hasNext()).thenReturn(false);
    final Iterator<MessageAndOffset> mockIterator3 = mock(Iterator.class);
    when(mockMessage.iterator()).thenReturn(mockIterator3);
    when(mockIterator3.hasNext()).thenReturn(true);
    when(mockMessage.validBytes()).thenReturn(1l);

    assertTrue("Should be able to continue iterator!", reader.continueItr());

    // out of bytes to read
    when(mockIterator3.hasNext()).thenReturn(false);
    assertFalse("Should be done with split!", reader.continueItr());
}
 
Example #16
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 4 votes vote down vote up
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
    List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
    FetchResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        ensureConsumer(previousLeader);

        if (offset == Long.MAX_VALUE) {
            offset = getOffset(false);
            logger.info("offset max long, fetch from latest in kafka {}", offset);
        }

        FetchRequest request = new FetchRequestBuilder()
                .clientId(clientId)
                .addFetch(topic, partitionId, offset, 100000000)
                .maxWait(timeoutMs)
                .minBytes(1)
                .build();

        //logger.debug("fetch offset {}", offset);

        try {
            response = consumer.fetch(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
                        e.getClass().getName(), topic, partitionId, offset);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
            response = null;
        }

        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
            logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode);

            boolean needNewLeader = false;
            if (errorCode == ErrorMapping.RequestTimedOutCode()) {
                //TODO: leave it here
            } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
                //TODO: fetch the earliest offset or latest offset ?
                // seems no obvious correct way to handle it
                long earliestOffset = getOffset(true);
                logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
                if (earliestOffset < 0) {
                    needNewLeader = true;
                } else {
                    newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
                    offset = earliestOffset;
                    continue;
                }
            } else {
                needNewLeader = true;
            }

            if (needNewLeader) {
                stopConsumer();
                previousLeader = leaderBroker;
                leaderBroker = null;
                continue;
            }
        } else {
            break;
        }
    }

    return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) :
        (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}
 
Example #17
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }
 
Example #18
Source File: Kafka08PartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
protected List<String> getAllTopics() {
	List<String> topics = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				topics.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					topics.add(item.topic());
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return topics;
}
 
Example #19
Source File: KafkaLowLevelConsumer09.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    final Object key = messageAndOffset.message().key();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(
        key,
        bytes,
        messageAndOffset.nextOffset(),
        partition
    );
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
Example #20
Source File: KafkaValidationUtil08.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public boolean validateTopicExistence(
  Stage.Context context,
  String groupName,
  String configName,
  List<HostAndPort> kafkaBrokers,
  String metadataBrokerList,
  String topic,
  Map<String, Object> kafkaClientConfigs,
  List<Stage.ConfigIssue> issues,
  boolean producer
) {
  boolean valid = true;
  if(topic == null || topic.isEmpty()) {
    issues.add(context.createConfigIssue(groupName, configName, KafkaErrors.KAFKA_05));
    valid = false;
  } else {
    TopicMetadata topicMetadata;
    try {
      topicMetadata = KafkaValidationUtil08.getTopicMetadata(kafkaBrokers, topic, 1, 0);
      if(topicMetadata == null) {
        //Could not get topic metadata from any of the supplied brokers
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_03,
                topic,
                metadataBrokerList
            )
        );
        valid = false;
      } else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
        //Topic does not exist
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_04,
                topic
            )
        );
        valid = false;
      } else if (topicMetadata.errorCode() != 0) {
        // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode()
        issues.add(
            context.createConfigIssue(
                groupName,
                KAFKA_CONFIG_BEAN_PREFIX + "topic",
                KafkaErrors.KAFKA_03,
                topic,
                metadataBrokerList
            )
        );
        valid = false;
      }
    } catch (IOException e) {
      //Could not connect to kafka with the given metadata broker list
      issues.add(
          context.createConfigIssue(
              groupName,
              KAFKA_CONFIG_BEAN_PREFIX + "metadataBrokerList",
              KafkaErrors.KAFKA_67,
              metadataBrokerList
          )
      );
      valid = false;
    }
  }
  return valid;
}
 
Example #21
Source File: KafkaLowLevelConsumer08.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    final Object key = messageAndOffset.message().key();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(
        key,
        bytes,
        messageAndOffset.nextOffset(),
        partition
    );
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
Example #22
Source File: PulsarOffsetResponse.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Override
public short errorCode(String topic, int partition) {
    return ErrorMapping.NoError();
}
 
Example #23
Source File: KafkaBaseInfoService.java    From kafka-monitor with Apache License 2.0 4 votes vote down vote up
/**
 * 取得topic元数据
 *
 * @param topics
 * @return
 */
public Map<String, Topic> getTopicMetadata(String... topics) {

    //请求topic元数据
    kafka.api.TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConversions.asScalaIterable(Arrays.asList(topics)).toSet(), JavaConversions.asScalaBuffer(getBrokerEndPoints()), "test", 2000, 1);

    //从元数据中取得topic信息
    Map<String, Topic> topicMap = WrapAsJava$.MODULE$.seqAsJavaList(response.topicsMetadata())
            .stream().filter(error -> error.errorCode() == ErrorMapping.NoError())
            .map((temp) -> {
                Topic topic = new Topic(temp.topic());
                topic.setConfig(JSONObject.parseObject(topicConfigCache.getCurrentData(ZkUtils.EntityConfigPath() + "/topics/" + temp.topic()).getData(), Map.class));
                List<PartitionMetadata> pMetadata = WrapAsJava$.MODULE$.seqAsJavaList(temp.partitionsMetadata());
                topic.setPartitionMap(
                        pMetadata.stream()
                                .map((pMta) -> {
                                    //添加Partition副本信息
                                    Partition partition = new Partition(pMta.partitionId());
                                    BrokerEndPoint leader;
                                    int leaderId = -1;
                                    if (pMta.leader().nonEmpty()) {
                                        leader = pMta.leader().get();
                                        leaderId = leader.id();
                                    }

                                    partition.setIsr(WrapAsJava$.MODULE$.seqAsJavaList(pMta.isr()).stream().mapToInt(i -> i.id()).toArray());


                                    for (BrokerEndPoint replica :
                                            WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas())) {
                                        boolean isLeader = false;
                                        if (replica.id() == leaderId) {
                                            isLeader = true;
                                        }
                                        partition.addReplica(new PartitionReplica(replica.id(), true, isLeader));
                                    }

                                    partition.setReplicasArray(WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas()).stream().mapToInt(m -> m.id()).toArray());

                                    if (pMta.replicas().size() > 0) {
                                        //首选副本
                                        BrokerEndPoint preferedReplica = WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas()).get(0);
                                        //首选副本等于leader
                                        if (leaderId == preferedReplica.id()) {
                                            partition.setPreferredLeaderId(leaderId);
                                        }
                                    }
                                    return partition;
                                }).collect(Collectors.toMap(Partition::getId, p -> p))
                );
                return topic;
            }).collect(Collectors.toMap(Topic::getName, t -> t));

    return topicMap;
}
 
Example #24
Source File: KafkaSimpleConsumer.java    From julongchain with Apache License 2.0 4 votes vote down vote up
/**
     * 更新偏移量,当SimpleConsumer发生变化的时候,重新构造一个新的SimpleConsumer并返回
     *
     * @param consumer
     * @param topic
     * @param partitionID
     * @param readOffSet
     * @param groupId
     * @param clientName
     * @param times
     * @return
     * @throws RuntimeException 当更新失败的情况下
     */
    private SimpleConsumer updateOffset(SimpleConsumer consumer, String topic, int partitionID, long readOffSet, String groupId, String clientName, int times) {
        // 构建请求对象
        Map<TopicAndPartition, OffsetAndMetadata> requestInfoMap = new HashMap<TopicAndPartition, OffsetAndMetadata>();
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionID);
//        requestInfoMap.put(topicAndPartition, new OffsetAndMetadata(readOffSet, OffsetAndMetadata.NoMetadata(), -1));
        kafka.javaapi.OffsetCommitRequest ocRequest = new OffsetCommitRequest(groupId, requestInfoMap, 0, clientName);
        // 提交修改偏移量的请求,并获取返回值
        kafka.javaapi.OffsetCommitResponse response = consumer.commitOffsets(ocRequest);

        // 根据返回值进行不同的操作
        if (response.hasError()) {
            short code = response.errorCode(topicAndPartition);
            if (times > this.maxRetryTimes) {
                throw new RuntimeException("Update the Offset occur exception," +
                        " the current response code is:" + code);
            }

            if (code == ErrorMapping.LeaderNotAvailableCode()) {
                // 当异常code为leader切换情况的时候,重新构建consumer对象
                // 操作步骤:先休眠一段时间,再重新构造consumer对象,最后重试
                try {
                    Thread.sleep(this.retryIntervalMillis);
                } catch (InterruptedException e) {
                    // nothings
                }
                PartitionMetadata metadata = this.findNewLeaderMetadata(consumer.host(),
                        topic, partitionID);
                this.validatePartitionMetadata(metadata);
                consumer = this.createSimpleConsumer(metadata.leader().host(),
                        metadata.leader().port(), clientName);
                // 重试
                consumer = updateOffset(consumer, topic, partitionID, readOffSet, groupId, clientName, times + 1);
            }

            if (code == ErrorMapping.RequestTimedOutCode()) {
                // 当异常为请求超时的时候,进行重新请求
                consumer = updateOffset(consumer, topic, partitionID, readOffSet, groupId, clientName, times + 1);
            }

            // 其他code直接抛出异常
            throw new RuntimeException("Update the Offset occur exception," +
                    " the current response code is:" + code);
        }

        // 返回修改后的consumer对象
        return consumer;
    }
 
Example #25
Source File: Kafka08PartitionDiscoverer.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Send request to Kafka to get partitions for topics.
 *
 * @param topics The name of the topics.
 */
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
	List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				partitions.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					if (!topics.contains(item.topic())) {
						LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					for (PartitionMetadata part : item.partitionsMetadata()) {
						Node leader = brokerToNode(part.leader());
						KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
						KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
						partitions.add(pInfo);
					}
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return partitions;
}
 
Example #26
Source File: Kafka08PartitionDiscoverer.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
protected List<String> getAllTopics() {
	List<String> topics = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				topics.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					topics.add(item.topic());
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return topics;
}
 
Example #27
Source File: Kafka08PartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Send request to Kafka to get partitions for topics.
 *
 * @param topics The name of the topics.
 */
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
	List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				partitions.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					if (!topics.contains(item.topic())) {
						LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					for (PartitionMetadata part : item.partitionsMetadata()) {
						Node leader = brokerToNode(part.leader());
						KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
						KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
						partitions.add(pInfo);
					}
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return partitions;
}