org.apache.kafka.common.errors.TimeoutException Java Examples

The following examples show how to use org.apache.kafka.common.errors.TimeoutException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ProducerRunnable.java    From event-streams-samples with Apache License 2.0 7 votes vote down vote up
public ProducerRunnable(Map<String, Object> producerConfigs, String topic) {
    this.topic = topic;

    // Create a Kafka producer with the provided client configuration
    kafkaProducer = new KafkaProducer<String, String>(producerConfigs);

    try {
        // Checking for topic existence.
        // If the topic does not exist, the kafkaProducer will retry for about 60 secs
        // before throwing a TimeoutException
        // see configuration parameter 'metadata.fetch.timeout.ms'
        List<PartitionInfo> partitions = kafkaProducer.partitionsFor(topic);
        logger.info(partitions.toString());
    } catch (TimeoutException kte) {
        logger.error("Topic '{}' may not exist - application will terminate", topic);
        kafkaProducer.close(Duration.ofSeconds(5L));
        throw new IllegalStateException("Topic '" + topic + "' may not exist - application will terminate", kte);
    }
}
 
Example #2
Source File: AsynchronousDeliveryStrategy.java    From logback-kafka-appender with Apache License 2.0 6 votes vote down vote up
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
                              final FailedDeliveryCallback<E> failedDeliveryCallback) {
    try {
        producer.send(record, new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null) {
                    failedDeliveryCallback.onFailedDelivery(event, exception);
                }
            }
        });
        return true;
    } catch (BufferExhaustedException | TimeoutException e) {
        failedDeliveryCallback.onFailedDelivery(event, e);
        return false;
    }
}
 
Example #3
Source File: ProcessingPartition.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the earliest offset for the partition
 *
 * @return the earliest offset for the partition
 *
 * @throws IllegalStateException if the earliest offset could not be looked up
 */
protected long getEarliestOffset() {
    Map<TopicPartition, Long> offsets;

    try {
        offsets = consumer.beginningOffsets(Collections.singleton(topicPartition));
    } catch (TimeoutException | InterruptException e) {
        throw new IllegalStateException("Unable to look up earliest offset for topic partition [" + topicPartition + "]", e);
    }

    if (!offsets.containsKey(topicPartition))
        throw new IllegalStateException("Unable to look up earliest offset for topic partition [" + topicPartition + "]");

    Long offset = offsets.get(topicPartition);

    if (offset == null)
        throw new IllegalStateException("Unable to look up earliest offset for topic partition [" + topicPartition + "]");

    return offset;
}
 
Example #4
Source File: ProcessingPartition.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the latest offset for the partition
 *
 * @return the latest offset for the partition
 *
 * @throws IllegalStateException if the latest offset could not be looked up
 */
protected long getLatestOffset() {
    Map<TopicPartition, Long> offsets;

    try {
        offsets = consumer.endOffsets(Collections.singleton(topicPartition));
    } catch (TimeoutException | InterruptException e) {
        throw new IllegalStateException("Unable to look up latest offset for topic partition [" + topicPartition + "]", e);
    }

    if (!offsets.containsKey(topicPartition))
        throw new IllegalStateException("Unable to look up latest offset for topic partition [" + topicPartition + "]");

    Long offset = offsets.get(topicPartition);

    if (offset == null)
        throw new IllegalStateException("Unable to look up latest offset for topic partition [" + topicPartition + "]");

    return offset;
}
 
Example #5
Source File: KafkaAdminClient.java    From df_data_service with Apache License 2.0 6 votes vote down vote up
/**
 * Given its name, deletes a topic on the Kafka broker.
 *
 * @param topicsName The name of the topic.
 */
public static void describeTopics (String BOOTSTRAP_SERVERS_HOST_PORT, String topicsName) {
    AdminClient adminClient = createAdminClient(BOOTSTRAP_SERVERS_HOST_PORT);
    // remove topic which is not already exists
    DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Arrays.asList(topicsName.split(",")));
    try {
        describeTopicsResult.all().get().forEach((key, value) -> {
            System.out.println("Key : " + key + " Value : " + value);
        });
        // real failure cause is wrapped inside the raised ExecutionException
    } catch (ExecutionException | InterruptedException e) {
        if (e.getCause() instanceof UnknownTopicOrPartitionException) {
            System.err.println("Topic not exists !!");
        } else if (e.getCause() instanceof TimeoutException) {
            System.err.println("Timeout !!");
        }
        e.printStackTrace();
    } finally {
        adminClient.close();
    }
}
 
Example #6
Source File: SourceRecordDequeImpl.java    From connect-utils with Apache License 2.0 6 votes vote down vote up
private void waitForCapacity(int size) {
  if (null != writeRateLimit) {
    this.writeRateLimit.acquire(size);
  }
  if (size() >= this.maximumCapacity) {
    final long start = this.time.milliseconds();
    long elapsed = start;
    while (size() >= this.maximumCapacity) {
      if (elapsed > this.maximumCapacityTimeoutMs) {
        throw new TimeoutException(
            String.format(
                "Timeout of %s ms exceeded while waiting for Deque to be drained below %s",
                this.maximumCapacityTimeoutMs,
                this.maximumCapacity
            )
        );
      }
      this.time.sleep(this.maximumCapacityWaitMs);
      elapsed = (this.time.milliseconds() - start);
    }
  }
}
 
Example #7
Source File: KafkaAdminClient.java    From df_data_service with Apache License 2.0 6 votes vote down vote up
/**
 * Given its name, deletes a topic on the Kafka broker.
 *
 * @param topicsName The name of the topic.
 */
public static void deleteTopics (String BOOTSTRAP_SERVERS_HOST_PORT, String topicsName) {
    AdminClient adminClient = createAdminClient(BOOTSTRAP_SERVERS_HOST_PORT);
    // remove topic which is not already exists
    DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(Arrays.asList(topicsName.split(",")));
    try {
        deleteTopicsResult.all().get();
        // real failure cause is wrapped inside the raised ExecutionException
    } catch (ExecutionException | InterruptedException e) {
        if (e.getCause() instanceof UnknownTopicOrPartitionException) {
            System.err.println("Topic not exists !!");
        } else if (e.getCause() instanceof TimeoutException) {
            System.err.println("Timeout !!");
        }
        e.printStackTrace();
    } finally {
        adminClient.close();
    }
}
 
Example #8
Source File: HttpSourceBridgeEndpoint.java    From strimzi-kafka-bridge with Apache License 2.0 5 votes vote down vote up
private int handleError(Throwable ex) {
    if (ex instanceof TimeoutException && ex.getMessage() != null &&
        ex.getMessage().contains("not present in metadata")) {
        this.closing = true;
        return HttpResponseStatus.NOT_FOUND.code();
    } else {
        return HttpResponseStatus.INTERNAL_SERVER_ERROR.code();
    }
}
 
Example #9
Source File: AsynchronousDeliveryStrategyTest.java    From logback-kafka-appender with Apache License 2.0 5 votes vote down vote up
@Test
public void testCallbackWillTriggerOnFailedDeliveryOnProducerSendTimeout() {
    final TimeoutException exception = new TimeoutException("miau");
    final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg");

    when(producer.send(same(record), any(Callback.class))).thenThrow(exception);

    unit.send(producer, record, "msg", failedDeliveryCallback);

    verify(failedDeliveryCallback).onFailedDelivery(eq("msg"), same(exception));
}
 
Example #10
Source File: KafkaTopicRepositoryTest.java    From nakadi with MIT License 5 votes vote down vote up
@Test
public void whenPublishShortCircuitingIsRecorded() {
    when(nakadiSettings.getKafkaSendTimeoutMs()).thenReturn(1000L);
    when(kafkaProducer.partitionsFor(EXPECTED_PRODUCER_RECORD.topic())).thenReturn(ImmutableList.of(
            new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 1, new Node(1, "10.10.0.1", 9091), null, null)));

    final MetricRegistry metricRegistry = new MetricRegistry();
    setResponseForSendingBatches(new TimeoutException(), metricRegistry);
    final String meterName = metricRegistry.getMeters().firstKey();
    final Meter meter = metricRegistry.getMeters().get(meterName);
    Assert.assertEquals(meterName, "hystrix.short.circuit.1_10.10.0.1");
    Assert.assertTrue(meter.getCount() >= 1);
}
 
Example #11
Source File: KafkaTopicRepositoryTest.java    From nakadi with MIT License 5 votes vote down vote up
@Test
public void checkCircuitBreakerStateBasedOnKafkaResponse() {
    when(nakadiSettings.getKafkaSendTimeoutMs()).thenReturn(1000L);
    when(kafkaProducer.partitionsFor(EXPECTED_PRODUCER_RECORD.topic())).thenReturn(ImmutableList.of(
            new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 1, NODE, null, null)));

    //Timeout Exception should cause circuit breaker to open
    List<BatchItem> batches = setResponseForSendingBatches(new TimeoutException(), new MetricRegistry());
    Assert.assertTrue(batches.stream()
            .filter(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.FAILED &&
                    item.getResponse().getDetail().equals("short circuited"))
            .count() >= 1);

    //No exception should close the circuit
    batches = setResponseForSendingBatches(null, new MetricRegistry());
    Assert.assertTrue(batches.stream()
            .filter(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.SUBMITTED &&
                    item.getResponse().getDetail().equals(""))
            .count() >= 1);

    //Timeout Exception should cause circuit breaker to open again
    batches = setResponseForSendingBatches(new TimeoutException(), new MetricRegistry());
    Assert.assertTrue(batches.stream()
            .filter(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.FAILED &&
                    item.getResponse().getDetail().equals("short circuited"))
            .count() >= 1);

}
 
Example #12
Source File: KafkaConsumerTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
 * and a wrong broker was specified.
 *
 * @throws Exception
 */
public void runFailOnNoBrokerTest() throws Exception {
	try {
		Properties properties = new Properties();

		StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
		see.getConfig().disableSysoutLogging();
		see.setRestartStrategy(RestartStrategies.noRestart());
		see.setParallelism(1);

		// use wrong ports for the consumers
		properties.setProperty("bootstrap.servers", "localhost:80");
		properties.setProperty("group.id", "test");
		properties.setProperty("request.timeout.ms", "3000"); // let the test fail fast
		properties.setProperty("socket.timeout.ms", "3000");
		properties.setProperty("session.timeout.ms", "2000");
		properties.setProperty("fetch.max.wait.ms", "2000");
		properties.setProperty("heartbeat.interval.ms", "1000");
		properties.putAll(secureProps);
		FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
		DataStream<String> stream = see.addSource(source);
		stream.print();
		see.execute("No broker test");
	} catch (JobExecutionException jee) {
		if (kafkaServer.getVersion().equals("0.9") ||
			kafkaServer.getVersion().equals("0.10") ||
			kafkaServer.getVersion().equals("0.11") ||
			kafkaServer.getVersion().equals("2.0")) {
			final Optional<TimeoutException> optionalTimeoutException = ExceptionUtils.findThrowable(jee, TimeoutException.class);
			assertTrue(optionalTimeoutException.isPresent());

			final TimeoutException timeoutException = optionalTimeoutException.get();
			assertEquals("Timeout expired while fetching topic metadata", timeoutException.getMessage());
		} else {
			final Optional<Throwable> optionalThrowable = ExceptionUtils.findThrowableWithMessage(jee, "Unable to retrieve any partitions");
			assertTrue(optionalThrowable.isPresent());
			assertTrue(optionalThrowable.get() instanceof RuntimeException);
		}
	}
}
 
Example #13
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeleteDescribeTimeout(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDeleteTopics(admin, singletonMap("test", Either.ofLeft(null)));
    mockDescribeTopics(admin, singletonMap("test", Either.ofRight(new TimeoutException())));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.deleteTopic(new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> {
        assertTrue(error instanceof TimeoutException);
        testContext.completeNow();
    })));
}
 
Example #14
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeleteDeleteTimeout(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofRight(new UnknownTopicOrPartitionException())));
    mockDeleteTopics(admin, singletonMap("test", Either.ofRight(new TimeoutException())));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.deleteTopic(new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> {
        assertTrue(error instanceof TimeoutException);
        testContext.completeNow();
    })));
}
 
Example #15
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testTopicMetadataDescribeTimeout(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofLeft(mock(TopicDescription.class))));
    mockDescribeConfigs(admin, singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, "test"),
                Either.ofRight(new TimeoutException())));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.topicMetadata(new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> {
        assertTrue(error instanceof TimeoutException);
        testContext.completeNow();
    })));
}
 
Example #16
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testCanRollThrowsTimeoutExceptionWhenTopicsListThrowsException(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(0)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()
            .addNewTopic("B", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(1)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()

            .addBroker(3)
            .listTopicsResult(new TimeoutException());

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.failing(e -> context.verify(() -> {
            assertThat(e, instanceOf(TimeoutException.class));
            a.flag();
        })));
    }
}
 
Example #17
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test(expectedExceptions = TimeoutException.class)
public void testCommitWithTimeout() throws Exception {
  String topic = "testCommitWithTimeout";
  createTopic(topic);
  produceSyntheticMessages(topic);
  Properties props = new Properties();
  // All the consumers should have the same group id.
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testCommitWithTimeout");
  // Make sure we start to consume from the beginning.
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  // Only fetch one record at a time.
  props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
  // No auto commmit
  props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
  LiKafkaConsumer<String, String> consumer = createConsumer(props);
  try {
    TopicPartition tp = new TopicPartition(topic, SYNTHETIC_PARTITION_0);
    consumer.assign(Arrays.asList(tp));

    while (consumer.poll(10).isEmpty()) {
    }
    // Shutdown the broker so that offset commit would hang and eventually time out.
    tearDown();
    consumer.commitSync(Duration.ofSeconds(3));
  } finally {
    consumer.close();
  }
}
 
Example #18
Source File: SourceRecordDequeTest.java    From connect-utils with Apache License 2.0 5 votes vote down vote up
@Test
public void timeout() {
  when(this.time.milliseconds()).thenReturn(1000L, 3000L, 5000L, 100000L);
  SourceRecordDeque deque = this.builder
      .maximumCapacity(5)
      .build();
  final int count = 10;
  assertNotNull(deque);
  assertThrows(TimeoutException.class, () -> {
    for (int i = 0; i < count; i++) {
      deque.add(newRecord());
    }
  });
}
 
Example #19
Source File: KafkaPublisherTest.java    From ja-micro with Apache License 2.0 5 votes vote down vote up
@Test
public void sendFailsReturnsFalse() {
    KafkaProducer producer = mock(KafkaProducer.class);
    publisher.realProducer = producer;
    RecordMetadata metadata = new RecordMetadata(null, 0, 0,
            0, Long.valueOf(0), 0, 0);
    ArgumentCaptor<Callback> captor = ArgumentCaptor.forClass(Callback.class);
    when(producer.send(any(), captor.capture())).then(
        invocation -> {
            captor.getValue().onCompletion(metadata, new TimeoutException("error"));
            return new CompletableFuture();
        });
    String[] events = { "test" };
    assertThat(publisher.publishEvents(false, null, events)).isFalse();
}
 
Example #20
Source File: KafkaChannel.java    From syncer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public ErrorLevel level(Throwable e, SyncWrapper wrapper, int maxTry) {
  if (e instanceof KafkaProducerException) {
    e = e.getCause();
  }
  if (e instanceof TimeoutException || e instanceof NotLeaderForPartitionException) {
    return ErrorLevel.RETRIABLE_ERROR;
  }
  return ErrorLevel.MAX_TRY_EXCEED;
}
 
Example #21
Source File: KafkaProducerWrapperTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test(expected=IOException.class)
public void test_flushRetriable() throws IOException {
    doThrow(new TimeoutException("boom")).when(mockedProducer).flush();

    KafkaProducerWrapper<String, String> producer = new KafkaProducerWrapper<>(mockedProducer);


    producer.send(new ProducerRecord<>(topic, "key"+testName.getMethodName(),
            "value"+UUID.randomUUID()));
    producer.flush();
}
 
Example #22
Source File: KafkaMonitorTest.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Test
public void shouldContinueRunningWhenExceptionEncountered() throws InterruptedException {
  Map<String, String> properties = getBaseProperties();
  SourceConfig config = new SourceConfig(properties);
  TaskConfigBuilder taskConfigBuilder =
      new TaskConfigBuilder(new RoundRobinTaskAssignor(), config);

  // Require two thrown exceptions to ensure that the KafkaMonitor run loop executes more than
  // once
  CountDownLatch exceptionThrownLatch = new CountDownLatch(2);
  MockConsumer<byte[], byte[]> consumer =
      new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
        @Override
        public Map<String, List<PartitionInfo>> listTopics() {
          exceptionThrownLatch.countDown();
          throw new TimeoutException("KABOOM!");
        }
      };

  kafkaMonitor =
      new KafkaMonitor(
          mock(ConnectorContext.class),
          config,
          consumer,
          mockDestinationConsumer,
          taskConfigBuilder);
  Thread monitorThread = new Thread(kafkaMonitor);
  monitorThread.start();
  exceptionThrownLatch.await(2, TimeUnit.SECONDS);
  monitorThread.join(1);

  assertThat(monitorThread.getState(), not(State.TERMINATED));
  kafkaMonitor.stop();
  monitorThread.interrupt();
  monitorThread.join(5000);
}
 
Example #23
Source File: KafkaStreamMetadataProvider.java    From incubator-pinot with Apache License 2.0 4 votes vote down vote up
/**
 * Fetches the numeric Kafka offset for this partition for a symbolic name ("largest" or "smallest").
 *
 * @param offsetCriteria
 * @param timeoutMillis Timeout in milliseconds
 * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
 * milliseconds
 * @return An offset
 */
@Override
public synchronized StreamPartitionMsgOffset fetchStreamPartitionOffset(@Nonnull OffsetCriteria offsetCriteria, long timeoutMillis)
    throws java.util.concurrent.TimeoutException {
  Preconditions.checkState(isPartitionProvided,
      "Cannot fetch partition offset. StreamMetadataProvider created without partition information");
  Preconditions.checkNotNull(offsetCriteria);

  final long offsetRequestTime;
  if (offsetCriteria.isLargest()) {
    offsetRequestTime = kafka.api.OffsetRequest.LatestTime();
  } else if (offsetCriteria.isSmallest()) {
    offsetRequestTime = kafka.api.OffsetRequest.EarliestTime();
  } else {
    throw new IllegalArgumentException("Unknown initial offset value " + offsetCriteria.toString());
  }

  int kafkaErrorCount = 0;
  final int MAX_KAFKA_ERROR_COUNT = 10;

  final long endTime = System.currentTimeMillis() + timeoutMillis;

  while (System.currentTimeMillis() < endTime) {
    // Try to get into a state where we're connected to Kafka
    while (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER
        && System.currentTimeMillis() < endTime) {
      _currentState.process();
    }

    if (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER
        && endTime <= System.currentTimeMillis()) {
      throw new TimeoutException();
    }

    // Send the offset request to Kafka
    OffsetRequest request = new OffsetRequest(Collections.singletonMap(new TopicAndPartition(_topic, _partition),
        new PartitionOffsetRequestInfo(offsetRequestTime, 1)), kafka.api.OffsetRequest.CurrentVersion(), _clientId);
    OffsetResponse offsetResponse;
    try {
      offsetResponse = _simpleConsumer.getOffsetsBefore(request);
    } catch (Exception e) {
      _currentState.handleConsumerException(e);
      continue;
    }

    final short errorCode = offsetResponse.errorCode(_topic, _partition);

    if (errorCode == Errors.NONE.code()) {
      long offset = offsetResponse.offsets(_topic, _partition)[0];
      if (offset == 0L) {
        LOGGER.warn("Fetched offset of 0 for topic {} and partition {}, is this a newly created topic?", _topic,
            _partition);
      }
      return new LongMsgOffset(offset);
    } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
      // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    } else {
      // Retry after a short delay
      kafkaErrorCount++;

      if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
        throw exceptionForKafkaErrorCode(errorCode);
      }

      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }
  }

  throw new TimeoutException();
}
 
Example #24
Source File: KafkaStreamMetadataProvider.java    From incubator-pinot with Apache License 2.0 4 votes vote down vote up
/**
 * Fetches the number of partitions for this kafka stream
 * @param timeoutMillis
 * @return
 */
@Override
public synchronized int fetchPartitionCount(long timeoutMillis) {
  int unknownTopicReplyCount = 0;
  final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10;
  int kafkaErrorCount = 0;
  final int MAX_KAFKA_ERROR_COUNT = 10;

  final long endTime = System.currentTimeMillis() + timeoutMillis;

  while (System.currentTimeMillis() < endTime) {
    // Try to get into a state where we're connected to Kafka
    while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) {
      _currentState.process();
    }

    if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) {
      throw new TimeoutException(
          "Failed to get the partition count for topic " + _topic + " within " + timeoutMillis + " ms");
    }

    // Send the metadata request to Kafka
    TopicMetadataResponse topicMetadataResponse = null;
    try {
      topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(_topic)));
    } catch (Exception e) {
      _currentState.handleConsumerException(e);
      continue;
    }

    final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0);
    final short errorCode = topicMetadata.errorCode();

    if (errorCode == Errors.NONE.code()) {
      return topicMetadata.partitionsMetadata().size();
    } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
      // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) {
      throw new RuntimeException("Invalid topic name " + _topic);
    } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
      if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) {
        throw new RuntimeException("Topic " + _topic + " does not exist");
      } else {
        // Kafka topic creation can sometimes take some time, so we'll retry after a little bit
        unknownTopicReplyCount++;
        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
      }
    } else {
      // Retry after a short delay
      kafkaErrorCount++;

      if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
        throw exceptionForKafkaErrorCode(errorCode);
      }

      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }
  }

  throw new TimeoutException();
}
 
Example #25
Source File: KafkaStreamMetadataProvider.java    From incubator-pinot with Apache License 2.0 4 votes vote down vote up
public synchronized long fetchPartitionOffset(@Nonnull OffsetCriteria offsetCriteria, long timeoutMillis)
    throws java.util.concurrent.TimeoutException {
  throw new UnsupportedOperationException("The use of this method s not supported");
}
 
Example #26
Source File: KafkaConsumerTestBase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
 * and a wrong broker was specified.
 *
 * @throws Exception
 */
public void runFailOnNoBrokerTest() throws Exception {
	try {
		Properties properties = new Properties();

		StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
		see.getConfig().disableSysoutLogging();
		see.setRestartStrategy(RestartStrategies.noRestart());
		see.setParallelism(1);

		// use wrong ports for the consumers
		properties.setProperty("bootstrap.servers", "localhost:80");
		properties.setProperty("zookeeper.connect", "localhost:80");
		properties.setProperty("group.id", "test");
		properties.setProperty("request.timeout.ms", "3000"); // let the test fail fast
		properties.setProperty("socket.timeout.ms", "3000");
		properties.setProperty("session.timeout.ms", "2000");
		properties.setProperty("fetch.max.wait.ms", "2000");
		properties.setProperty("heartbeat.interval.ms", "1000");
		properties.putAll(secureProps);
		FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
		DataStream<String> stream = see.addSource(source);
		stream.print();
		see.execute("No broker test");
	} catch (JobExecutionException jee) {
		if (kafkaServer.getVersion().equals("0.9") ||
			kafkaServer.getVersion().equals("0.10") ||
			kafkaServer.getVersion().equals("0.11") ||
			kafkaServer.getVersion().equals("2.0")) {
			assertTrue(jee.getCause() instanceof TimeoutException);

			TimeoutException te = (TimeoutException) jee.getCause();

			assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
		} else {
			assertTrue(jee.getCause() instanceof RuntimeException);

			RuntimeException re = (RuntimeException) jee.getCause();

			assertTrue(re.getMessage().contains("Unable to retrieve any partitions"));
		}
	}
}
 
Example #27
Source File: TestKafkaProducerWrapper.java    From brooklin with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testFlushTimeout() throws Exception {
  DynamicMetricsManager.createInstance(new MetricRegistry(), getClass().getSimpleName());
  Properties transportProviderProperties = new Properties();
  transportProviderProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:1234");
  transportProviderProperties.put(ProducerConfig.CLIENT_ID_CONFIG, "testClient");
  transportProviderProperties.put(KafkaTransportProviderAdmin.ZK_CONNECT_STRING_CONFIG, "zk-connect-string");
  transportProviderProperties.put(KafkaProducerWrapper.CFG_PRODUCER_FLUSH_TIMEOUT_MS, "1");

  String topicName = "topic-42";

  MockKafkaProducerWrapper<byte[], byte[]> producerWrapper =
      new MockKafkaProducerWrapper<>("log-suffix", transportProviderProperties, "metrics",
          TimeoutException.class);

  String destinationUri = "localhost:1234/" + topicName;
  Datastream ds = DatastreamTestUtils.createDatastream("test", "ds1", "source", destinationUri, 1);

  DatastreamTask task = new DatastreamTaskImpl(Collections.singletonList(ds));
  ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topicName, null, null);
  producerWrapper.assignTask(task);

  // Sending first event, send should pass, none of the other methods on the producer should have been called
  producerWrapper.send(task, producerRecord, null);
  producerWrapper.verifySend(1);
  producerWrapper.verifyFlush(0);
  producerWrapper.verifyClose(0);
  Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 1);

  // Producer was mocked to throw a TimeoutException
  Assert.assertThrows(TimeoutException.class, producerWrapper::flush);

  producerWrapper.verifySend(1);
  producerWrapper.verifyFlush(1);
  producerWrapper.verifyClose(0);

  // Second send should reuse the same producer since the producer is not closed on TimeoutException
  producerWrapper.send(task, producerRecord, null);
  producerWrapper.verifySend(2);
  producerWrapper.verifyFlush(1);
  producerWrapper.verifyClose(0);
  Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 1);

  // Closing the producer's task. Since this is the only task, the producer should be closed
  producerWrapper.close(task);
  producerWrapper.verifySend(2);
  producerWrapper.verifyFlush(1);
  producerWrapper.verifyClose(1);
  Assert.assertEquals(producerWrapper.getNumCreateKafkaProducerCalls(), 1);
}
 
Example #28
Source File: ClusterConfigController.java    From kafka-webview with MIT License 4 votes vote down vote up
/**
 * GET for testing if a cluster is configured correctly.
 */
@RequestMapping(path = "/test/{id}", method = RequestMethod.GET)
public String testCluster(@PathVariable final Long id, final RedirectAttributes redirectAttributes) {
    // Retrieve it
    final Optional<Cluster> clusterOptional = clusterRepository.findById(id);
    if (!clusterOptional.isPresent()) {
        // Set flash message & redirect
        redirectAttributes.addFlashAttribute("FlashMessage", FlashMessage.newWarning("Unable to find cluster!"));

        // redirect to cluster index
        return "redirect:/configuration/cluster";
    }
    final Cluster cluster = clusterOptional.get();

    // Create new Operational Client
    try {
        try (final KafkaOperations kafkaOperations = kafkaOperationsFactory.create(cluster, getLoggedInUserId())) {
            logger.info("Cluster Nodes: {}", kafkaOperations.getClusterNodes());

            // If we made it this far, we should be AOK
            cluster.setValid(true);
            clusterRepository.save(cluster);

            // Set success msg
            redirectAttributes.addFlashAttribute("FlashMessage", FlashMessage.newSuccess("Cluster configuration is valid!"));
        }
    } catch (final Exception e) {
        // Collect all reasons.
        String reason = e.getMessage();
        if (e instanceof TimeoutException) {
            reason = reason + " (This may indicate an authentication or connection problem)";
        }

        // Set error msg
        redirectAttributes.addFlashAttribute(
            "FlashMessage",
            FlashMessage.newDanger("Error connecting to cluster: " + reason, e)
        );

        // Mark as invalid
        cluster.setValid(false);
        clusterRepository.save(cluster);
    }


    // redirect to cluster index
    return "redirect:/configuration/cluster";
}
 
Example #29
Source File: ClusterStatus.java    From common-docker with Apache License 2.0 4 votes vote down vote up
/**
 * Checks whether /brokers/ids is present. This signifies that at least one Kafka broker has
 * registered in ZK.
 *
 * @param timeoutMs timeout in ms.
 * @param zookeeper Zookeeper client.
 * @return True if /brokers/ids is present.
 */
private static boolean isKafkaRegisteredInZookeeper(ZooKeeper zookeeper, int timeoutMs)
    throws InterruptedException {
  // Make sure /brokers/ids exists. Countdown when one of the following happen:
  // 1. node created event is triggered (this happens when /brokers/ids is created after the
  // call is made).
  // 2. StatCallback gets a non-null callback (this happens when /brokers/ids exists when the
  // call is made) .
  final CountDownLatch kafkaRegistrationSignal = new CountDownLatch(1);
  zookeeper.exists(
      BROKERS_IDS_PATH,
      new Watcher() {
        @Override
        public void process(WatchedEvent event) {
          log.debug(
              "Got event when checking for existence of /brokers/ids. type={} path={}",
              event.getType(),
              event.getPath()
          );
          if (event.getType() == Watcher.Event.EventType.NodeCreated) {
            kafkaRegistrationSignal.countDown();
          }
        }
      },
      new StatCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx, Stat stat) {
          log.debug(
              "StatsCallback got data for path={}, stat={}",
              path,
              stat
          );
          if (stat != null) {
            kafkaRegistrationSignal.countDown();
          }
        }
      },
      null
  );

  boolean kafkaRegistrationTimedOut = !kafkaRegistrationSignal.await(
      timeoutMs,
      TimeUnit.MILLISECONDS
  );
  if (kafkaRegistrationTimedOut) {
    String message = String.format(
        "Timed out waiting for Kafka to create /brokers/ids in Zookeeper. timeout (ms) = %s",
        timeoutMs
    );
    throw new TimeoutException(message);
  }

  return true;
}
 
Example #30
Source File: KafkaConsumerTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
 * and a wrong broker was specified.
 *
 * @throws Exception
 */
public void runFailOnNoBrokerTest() throws Exception {
	try {
		Properties properties = new Properties();

		StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
		see.getConfig().disableSysoutLogging();
		see.setRestartStrategy(RestartStrategies.noRestart());
		see.setParallelism(1);

		// use wrong ports for the consumers
		properties.setProperty("bootstrap.servers", "localhost:80");
		properties.setProperty("zookeeper.connect", "localhost:80");
		properties.setProperty("group.id", "test");
		properties.setProperty("request.timeout.ms", "3000"); // let the test fail fast
		properties.setProperty("socket.timeout.ms", "3000");
		properties.setProperty("session.timeout.ms", "2000");
		properties.setProperty("fetch.max.wait.ms", "2000");
		properties.setProperty("heartbeat.interval.ms", "1000");
		properties.putAll(secureProps);
		FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
		DataStream<String> stream = see.addSource(source);
		stream.print();
		see.execute("No broker test");
	} catch (JobExecutionException jee) {
		if (kafkaServer.getVersion().equals("0.9") ||
			kafkaServer.getVersion().equals("0.10") ||
			kafkaServer.getVersion().equals("0.11") ||
			kafkaServer.getVersion().equals("2.0")) {
			assertTrue(jee.getCause() instanceof TimeoutException);

			TimeoutException te = (TimeoutException) jee.getCause();

			assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
		} else {
			assertTrue(jee.getCause() instanceof RuntimeException);

			RuntimeException re = (RuntimeException) jee.getCause();

			assertTrue(re.getMessage().contains("Unable to retrieve any partitions"));
		}
	}
}