Java Code Examples for kafka.common.ErrorMapping#OffsetOutOfRangeCode

The following examples show how to use kafka.common.ErrorMapping#OffsetOutOfRangeCode . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaRecordReader.java    From kangaroo with Apache License 2.0 6 votes vote down vote up
/**
 * THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff
 * the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method
 * returns false, you risk a {@link NullPointerException} OR a no-more-elements exception.
 * 
 * @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}.
 */
@VisibleForTesting
boolean continueItr() {
    final long remaining = end - currentOffset;
    if (!canCallNext() && remaining > 0) {
        final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize;
        LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize,
                currentOffset));
        final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition()
                .getPartId(), currentOffset, theFetchSize);
        final ByteBufferMessageSet msg = consumer.fetch(request);
        final int errorCode = msg.getErrorCode();
        if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
            return false;
        }
        if (errorCode != ErrorMapping.NoError()) {
            ErrorMapping.maybeThrowException(errorCode);
        } // --> else we try to grab the next iterator
        currentMessageItr = msg.iterator();
        currentOffset += msg.validBytes();
    }
    return canCallNext();
}
 
Example 2
Source File: KafkaMessageReceiverImpl.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * Check the leader.
 *
 * @param a_topic       topic name
 * @param a_partition   partition number
 * @param a_beginOffset begin offset
 * @return boolean
 */
private boolean checkLeader(String a_topic, int a_partition,
                            long a_beginOffset) {

    if (checkConsumer(a_topic, a_partition)) {

        FetchRequest req = new FetchRequestBuilder()
                .clientId(pool.getClientId())
                .addFetch(a_topic, a_partition, a_beginOffset,
                        KafkaConstants.FETCH_SIZE).build();
        fetchResponse = consumer.get().fetch(req);
        String leadHost = metadata.leader().host();

        if (fetchResponse.hasError()) {

            // Something went wrong!
            short code = fetchResponse.errorCode(a_topic, a_partition);
            logger.error("Error fetching data from the Broker:" + leadHost
                    + " Reason: " + code);

            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // We asked for an invalid offset. For simple case ask for
                // the last element to reset
                a_beginOffset = getLatestOffset(a_topic, a_partition);
            }
            consumer.get().close();
            consumer.set(null);

            try {
                metadata = findNewLeader(leadHost, a_topic, a_partition);
            } catch (MQException e) {
                logger.error("Find new leader failed.", e);
            }
            return false;
        }

        return true;
    }
    return false;
}
 
Example 3
Source File: KafkaLowLevelConsumer08.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    final Object key = messageAndOffset.message().key();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(
        key,
        bytes,
        messageAndOffset.nextOffset(),
        partition
    );
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
Example 4
Source File: KafkaLowLevelConsumer09.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    final Object key = messageAndOffset.message().key();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(
        key,
        bytes,
        messageAndOffset.nextOffset(),
        partition
    );
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
Example 5
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 4 votes vote down vote up
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
    List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
    FetchResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        ensureConsumer(previousLeader);

        if (offset == Long.MAX_VALUE) {
            offset = getOffset(false);
            logger.info("offset max long, fetch from latest in kafka {}", offset);
        }

        FetchRequest request = new FetchRequestBuilder()
                .clientId(clientId)
                .addFetch(topic, partitionId, offset, 100000000)
                .maxWait(timeoutMs)
                .minBytes(1)
                .build();

        //logger.debug("fetch offset {}", offset);

        try {
            response = consumer.fetch(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
                        e.getClass().getName(), topic, partitionId, offset);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
            response = null;
        }

        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
            logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode);

            boolean needNewLeader = false;
            if (errorCode == ErrorMapping.RequestTimedOutCode()) {
                //TODO: leave it here
            } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
                //TODO: fetch the earliest offset or latest offset ?
                // seems no obvious correct way to handle it
                long earliestOffset = getOffset(true);
                logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
                if (earliestOffset < 0) {
                    needNewLeader = true;
                } else {
                    newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
                    offset = earliestOffset;
                    continue;
                }
            } else {
                needNewLeader = true;
            }

            if (needNewLeader) {
                stopConsumer();
                previousLeader = leaderBroker;
                leaderBroker = null;
                continue;
            }
        } else {
            break;
        }
    }

    return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) :
        (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}
 
Example 6
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }