org.apache.kafka.common.KafkaFuture Java Examples

The following examples show how to use org.apache.kafka.common.KafkaFuture. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RetentionByTopicFunctionTest.java    From data-highway with Apache License 2.0 8 votes vote down vote up
@Test
public void typical() throws Exception {
  String topic = "topic";
  Collection<String> topics = singleton(topic);
  ConfigResource configResource = new ConfigResource(TOPIC, topic);
  Config config = new Config(singleton(new ConfigEntry("retention.ms", "1")));
  KafkaFuture<Map<ConfigResource, Config>> kafkaFuture = completedFuture(singletonMap(configResource, config));

  doReturn(describeConfigsResult).when(adminClient).describeConfigs(any());
  doReturn(kafkaFuture).when(describeConfigsResult).all();

  Map<String, Duration> result = underTest.apply(topics);

  assertThat(result.size(), is(1));
  Duration retention = result.get(topic);
  assertThat(retention, is(Duration.ofMillis(1)));
}
 
Example #2
Source File: ReplicaByPartitionFunctionTest.java    From data-highway with Apache License 2.0 7 votes vote down vote up
@Test
public void typical() throws Exception {
  ReplicaInfo ri = new ReplicaInfo(42L, 0L, false);
  TopicPartition topicPartition = new TopicPartition("topic", 0);
  Map<TopicPartition, ReplicaInfo> replicaInfos = Collections.singletonMap(topicPartition, ri);
  LogDirInfo ldi = new LogDirInfo(null, replicaInfos);
  Map<String, LogDirInfo> ldis = Collections.singletonMap("logDir", ldi);
  KafkaFuture<Map<String, LogDirInfo>> kafkaFuture = KafkaFuture.completedFuture(ldis);
  Map<Integer, KafkaFuture<Map<String, LogDirInfo>>> values = Collections.singletonMap(brokerId, kafkaFuture);

  doReturn(describeLogDirsResult).when(adminClient).describeLogDirs(brokerIds);
  doReturn(values).when(describeLogDirsResult).values();

  Map<TopicPartition, Replica> result = underTest.apply(brokerId);

  assertThat(result.size(), is(1));
  Replica replica = new Replica("logDir", 42L);
  assertThat(result.get(topicPartition), is(replica));
}
 
Example #3
Source File: ExecutorAdminUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Execute intra-broker replica movement tasks by sending alterReplicaLogDirs request.
 *
 * @param tasksToExecute The tasks to execute.
 * @param adminClient The adminClient to send alterReplicaLogDirs request.
 * @param executionTaskManager The task manager to do bookkeeping for task execution state.
 * @param config The config object that holds all the Cruise Control related configs
 */
static void executeIntraBrokerReplicaMovements(List<ExecutionTask> tasksToExecute,
                                               AdminClient adminClient,
                                               ExecutionTaskManager executionTaskManager,
                                               KafkaCruiseControlConfig config) {
  Map<TopicPartitionReplica, String> replicaAssignment = new HashMap<>(tasksToExecute.size());
  Map<TopicPartitionReplica, ExecutionTask> replicaToTask = new HashMap<>(tasksToExecute.size());
  tasksToExecute.forEach(t -> {
    TopicPartitionReplica tpr = new TopicPartitionReplica(t.proposal().topic(), t.proposal().partitionId(), t.brokerId());
    replicaAssignment.put(tpr, t.proposal().replicasToMoveBetweenDisksByBroker().get(t.brokerId()).logdir());
    replicaToTask.put(tpr, t);
  });
  for (Map.Entry<TopicPartitionReplica, KafkaFuture<Void>> entry: adminClient.alterReplicaLogDirs(replicaAssignment).values().entrySet()) {
    try {
      entry.getValue().get(config.getLong(LOGDIR_RESPONSE_TIMEOUT_MS_CONFIG), TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException |
             LogDirNotFoundException | KafkaStorageException | ReplicaNotAvailableException e) {
      LOG.warn("Encounter exception {} when trying to execute task {}, mark task dead.", e.getMessage(), replicaToTask.get(entry.getKey()));
      executionTaskManager.markTaskAborting(replicaToTask.get(entry.getKey()));
      executionTaskManager.markTaskDead(replicaToTask.get(entry.getKey()));
    }
  }
}
 
Example #4
Source File: KafkaImpl.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
private <T> Future<T> mapFuture(KafkaFuture<T> future) {
    Promise<T> handler = Promise.promise();
    try {
        future.whenComplete((result, error) -> {
            vertx.runOnContext(ignored -> {
                if (error != null) {
                    handler.fail(error);
                } else {
                    handler.complete(result);
                }
            });
        });
    } catch (Exception e) {
        handler.fail(e);
    }
    return handler.future();
}
 
Example #5
Source File: TopicServiceImpl.java    From kafka-helmsman with MIT License 6 votes vote down vote up
/**
 * Transform a TopicDescription instance to ConfiguredTopic instance.
 *
 * @param td  an instance of TopicDescription
 * @param ktc a topic config future
 * @return an instance of ConfiguredTopic
 */
static ConfiguredTopic configuredTopic(TopicDescription td, KafkaFuture<Config> ktc) {
  int partitions = td.partitions().size();
  short replication = (short) td.partitions().iterator().next().replicas().size();
  try {
    Config tc = ktc.get();
    Map<String, String> configMap = tc
        .entries()
        .stream()
        .filter(TopicServiceImpl::isNonDefault)
        .collect(toMap(ConfigEntry::name, ConfigEntry::value));
    return new ConfiguredTopic(td.name(), partitions, replication, configMap);
  } catch (InterruptedException | ExecutionException e) {
    // TODO: FA-10109: Improve exception handling
    throw new RuntimeException(e);
  }
}
 
Example #6
Source File: TopicServiceImplTest.java    From kafka-helmsman with MIT License 6 votes vote down vote up
@Test
public void testCreate() {
  TopicService service = new TopicServiceImpl(adminClient, true);
  CreateTopicsResult createTopicsResult = mock(CreateTopicsResult.class);
  when(createTopicsResult.all()).thenReturn(KafkaFuture.completedFuture(null));
  when(adminClient.createTopics(any(Collection.class),
      any(CreateTopicsOptions.class))).thenReturn(createTopicsResult);

  service.create(Collections.singletonList(
      new ConfiguredTopic("test", 1, (short) 2, Collections.emptyMap())));

  ArgumentCaptor<List> newTopics = ArgumentCaptor.forClass(List.class);
  ArgumentCaptor<CreateTopicsOptions> options = ArgumentCaptor.forClass(CreateTopicsOptions.class);
  verify(adminClient).createTopics((Collection<NewTopic>) newTopics.capture(), options.capture());
  Assert.assertEquals(1, newTopics.getValue().size());
  Assert.assertEquals("test", ((NewTopic) newTopics.getValue().get(0)).name());
  Assert.assertEquals(2, ((NewTopic) newTopics.getValue().get(0)).replicationFactor());
  Assert.assertTrue(options.getValue().shouldValidateOnly());
}
 
Example #7
Source File: TopicServiceImpl.java    From kafka-helmsman with MIT License 6 votes vote down vote up
@Override
public Map<String, ConfiguredTopic> listExisting(boolean excludeInternal) {
  try {
    Set<String> topics = adminClient
        .listTopics(excludeInternal ? EXCLUDE_INTERNAL : INCLUDE_INTERNAL)
        .names().get();
    Collection<TopicDescription> topicDescriptions = adminClient.describeTopics(topics).all().get().values();

    List<ConfigResource> resources = topics
        .stream()
        .map(t -> new ConfigResource(Type.TOPIC, t))
        .collect(toList());

    Map<ConfigResource, KafkaFuture<Config>> topicConfigs = adminClient.describeConfigs(resources).values();

    return topicDescriptions
        .stream()
        .map(td -> configuredTopic(td, topicConfigs.get(new ConfigResource(Type.TOPIC, td.name()))))
        .filter(t -> !INTERNAL_TOPIC.test(t))
        .collect(toMap(ConfiguredTopic::getName, td -> td));

  } catch (InterruptedException | ExecutionException e) {
    // TODO: FA-10109: Improve exception handling
    throw new RuntimeException(e);
  }
}
 
Example #8
Source File: KafkaRangerTopicCreationTest.java    From ranger with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTopic() throws Exception {
        final String topic = "test";
        Properties properties = new Properties();
        properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + port);
        properties.put("client.id", "test-consumer-id");
        properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        AdminClient client = KafkaAdminClient.create(properties);
        CreateTopicsResult result = client.createTopics(Arrays.asList(new NewTopic(topic, 1, (short) 1)));
        result.values().get(topic).get();
        for (Map.Entry<String, KafkaFuture<Void>> entry : result.values().entrySet()) {
            System.out.println("Create Topic : " + entry.getKey() + " " +
                    "isCancelled : " + entry.getValue().isCancelled() + " " +
                    "isCompletedExceptionally : " + entry.getValue().isCompletedExceptionally() + " " +
                    "isDone : " + entry.getValue().isDone());
        }
}
 
Example #9
Source File: TopicServiceImplTest.java    From kafka-helmsman with MIT License 6 votes vote down vote up
@Test
public void testIncreasePartitions() {
  TopicService service = new TopicServiceImpl(adminClient, true);
  CreatePartitionsResult result = mock(CreatePartitionsResult.class);
  when(result.all()).thenReturn(KafkaFuture.completedFuture(null));
  when(adminClient.createPartitions(any(Map.class), any(CreatePartitionsOptions.class))).thenReturn(result);

  service.increasePartitions(Collections.singletonList(
      new ConfiguredTopic("test", 3, (short) 2, Collections.emptyMap())));

  ArgumentCaptor<Map> increase = ArgumentCaptor.forClass(Map.class);
  ArgumentCaptor<CreatePartitionsOptions> options = ArgumentCaptor.forClass(CreatePartitionsOptions.class);
  verify(adminClient).createPartitions((Map<String, NewPartitions>) increase.capture(), options.capture());
  Assert.assertEquals(1, increase.getValue().size());
  Assert.assertTrue(increase.getValue().containsKey("test"));
  Assert.assertEquals(3, ((NewPartitions) increase.getValue().get("test")).totalCount());
  Assert.assertTrue(options.getValue().validateOnly());
}
 
Example #10
Source File: TopicServiceImplTest.java    From kafka-helmsman with MIT License 6 votes vote down vote up
@Test
public void testAlterConfiguration() {
  TopicService service = new TopicServiceImpl(adminClient, true);
  AlterConfigsResult result = mock(AlterConfigsResult.class);
  when(result.all()).thenReturn(KafkaFuture.completedFuture(null));
  when(adminClient.alterConfigs(any(Map.class), any(AlterConfigsOptions.class))).thenReturn(result);

  service.alterConfiguration(Collections.singletonList(
      new ConfiguredTopic("test", 3, (short) 2, Collections.singletonMap("k", "v"))));

  ArgumentCaptor<Map> alter = ArgumentCaptor.forClass(Map.class);
  ArgumentCaptor<AlterConfigsOptions> options = ArgumentCaptor.forClass(AlterConfigsOptions.class);
  verify(adminClient).alterConfigs((Map<ConfigResource, Config>) alter.capture(), options.capture());
  Assert.assertEquals(1, alter.getValue().size());
  ConfigResource expectedKey = new ConfigResource(TOPIC, "test");
  Assert.assertTrue(alter.getValue().containsKey(expectedKey));
  Assert.assertEquals("v", ((Config) alter.getValue().get(expectedKey)).get("k").value());
  Assert.assertTrue(options.getValue().shouldValidateOnly());
}
 
Example #11
Source File: KafkaStorage.java    From zipkin-storage-kafka with Apache License 2.0 6 votes vote down vote up
@Override public CheckResult check() {
  try {
    KafkaFuture<String> maybeClusterId = getAdminClient().describeCluster().clusterId();
    maybeClusterId.get(1, TimeUnit.SECONDS);
    KafkaStreams.State state = getAggregationStream().state();
    if (!state.isRunning()) {
      return CheckResult.failed(
          new IllegalStateException("Aggregation stream not running. " + state));
    }
    KafkaStreams.State traceStateStore = getTraceStorageStream().state();
    if (!traceStateStore.isRunning()) {
      return CheckResult.failed(
          new IllegalStateException("Store stream not running. " + traceStateStore));
    }
    KafkaStreams.State dependencyStateStore = getDependencyStorageStream().state();
    if (!dependencyStateStore.isRunning()) {
      return CheckResult.failed(
          new IllegalStateException("Store stream not running. " + dependencyStateStore));
    }
    return CheckResult.OK;
  } catch (Exception e) {
    return CheckResult.failed(e);
  }
}
 
Example #12
Source File: KafkaTopicClientImpl.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
private static <T> T executeWithRetries(final Supplier<KafkaFuture<T>> supplier)
    throws Exception {

  int retries = 0;
  Exception lastException = null;
  while (retries < NUM_RETRIES) {
    try {
      if (retries != 0) {
        Thread.sleep(RETRY_BACKOFF_MS);
      }
      return supplier.get().get();
    } catch (ExecutionException e) {
      if (e.getCause() instanceof RetriableException) {
        retries++;
        log.info("Retrying admin request due to retriable exception. Retry no: " + retries, e);
        lastException = e;
      } else if (e.getCause() instanceof Exception) {
        throw (Exception) e.getCause();
      } else {
        throw e;
      }
    }
  }
  throw lastException;
}
 
Example #13
Source File: ConsumeService.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
@Override
public synchronized void start() {
  if (_running.compareAndSet(false, true)) {
    _consumeThread.start();
    LOG.info("{}/ConsumeService started.", _name);

    Sensor topicPartitionCount = metrics.sensor("topic-partitions");
    DescribeTopicsResult describeTopicsResult = _adminClient.describeTopics(Collections.singleton(_topic));
    Map<String, KafkaFuture<TopicDescription>> topicResultValues = describeTopicsResult.values();
    KafkaFuture<TopicDescription> topicDescriptionKafkaFuture = topicResultValues.get(_topic);
    TopicDescription topicDescription = null;
    try {
      topicDescription = topicDescriptionKafkaFuture.get();
    } catch (InterruptedException | ExecutionException e) {
      LOG.error("Exception occurred while getting the topicDescriptionKafkaFuture for topic: {}", _topic, e);
    }
    @SuppressWarnings("ConstantConditions")
    double partitionCount = topicDescription.partitions().size();
    topicPartitionCount.add(
        new MetricName("topic-partitions-count", METRIC_GROUP_NAME, "The total number of partitions for the topic.", tags), new Total(partitionCount));
  }
}
 
Example #14
Source File: KafkaTopicClientImpl.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteTopics(final List<String> topicsToDelete) {
  if (!isDeleteTopicEnabled) {
    log.info("Cannot delete topics since 'delete.topic.enable' is false. ");
    return;
  }
  final DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(topicsToDelete);
  final Map<String, KafkaFuture<Void>> results = deleteTopicsResult.values();
  List<String> failList = Lists.newArrayList();

  for (final Map.Entry<String, KafkaFuture<Void>> entry : results.entrySet()) {
    try {
      entry.getValue().get(30, TimeUnit.SECONDS);
    } catch (Exception e) {
      failList.add(entry.getKey());
    }
  }
  if (!failList.isEmpty()) {
    throw new KsqlException("Failed to clean up topics: " + failList.stream()
        .collect(Collectors.joining(",")));
  }
}
 
Example #15
Source File: ExecutorAdminUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Fetch the logdir information for subject replicas in intra-broker replica movement tasks.
 *
 * @param tasks The tasks to check.
 * @param adminClient The adminClient to send describeReplicaLogDirs request.
 * @param config The config object that holds all the Cruise Control related configs
 * @return Replica logdir information by task.
 */
static Map<ExecutionTask, ReplicaLogDirInfo> getLogdirInfoForExecutionTask(Collection<ExecutionTask> tasks,
                                                                           AdminClient adminClient,
                                                                           KafkaCruiseControlConfig config) {
  Set<TopicPartitionReplica> replicasToCheck = new HashSet<>(tasks.size());
  Map<ExecutionTask, ReplicaLogDirInfo> logdirInfoByTask = new HashMap<>(tasks.size());
  Map<TopicPartitionReplica, ExecutionTask> taskByReplica = new HashMap<>(tasks.size());
  tasks.forEach(t -> {
    TopicPartitionReplica tpr = new TopicPartitionReplica(t.proposal().topic(), t.proposal().partitionId(), t.brokerId());
    replicasToCheck.add(tpr);
    taskByReplica.put(tpr, t);
  });
  Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> logDirsByReplicas = adminClient.describeReplicaLogDirs(replicasToCheck).values();
  for (Map.Entry<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> entry : logDirsByReplicas.entrySet()) {
    try {
      ReplicaLogDirInfo info = entry.getValue().get(config.getLong(LOGDIR_RESPONSE_TIMEOUT_MS_CONFIG), TimeUnit.MILLISECONDS);
      logdirInfoByTask.put(taskByReplica.get(entry.getKey()), info);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
      LOG.warn("Encounter exception {} when fetching logdir information for replica {}", e.getMessage(), entry.getKey());
    }
  }
  return logdirInfoByTask;
}
 
Example #16
Source File: ExecutorAdminUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Check whether there is ongoing intra-broker replica movement.
 * @param brokersToCheck List of broker to check.
 * @param adminClient The adminClient to send describeLogDirs request.
 * @param config The config object that holds all the Cruise Control related configs
 * @return True if there is ongoing intra-broker replica movement.
 */
static boolean hasOngoingIntraBrokerReplicaMovement(Collection<Integer> brokersToCheck, AdminClient adminClient,
                                                    KafkaCruiseControlConfig config)
    throws InterruptedException, ExecutionException, TimeoutException {
  Map<Integer, KafkaFuture<Map<String, LogDirInfo>>> logDirsByBrokerId = adminClient.describeLogDirs(brokersToCheck).values();
  for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirInfo>>> entry : logDirsByBrokerId.entrySet()) {
    Map<String, LogDirInfo> logInfos = entry.getValue().get(config.getLong(LOGDIR_RESPONSE_TIMEOUT_MS_CONFIG), TimeUnit.MILLISECONDS);
    for (LogDirInfo info : logInfos.values()) {
      if (info.error == Errors.NONE) {
        if (info.replicaInfos.values().stream().anyMatch(i -> i.isFuture)) {
          return true;
        }
      }
    }
  }
  return false;
}
 
Example #17
Source File: TopicReplicationFactorAnomalyFinder.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Retrieve topic minISR config information if it is not cached locally.
 * @param topicsToCheck Set of topics to check.
 */
private void maybeRetrieveAndCacheTopicMinISR(Set<String> topicsToCheck) {
  Set<ConfigResource> topicResourcesToCheck = new HashSet<>(topicsToCheck.size());
  topicsToCheck.stream().filter(t -> !_cachedTopicMinISR.containsKey(t))
                        .forEach(t -> topicResourcesToCheck.add(new ConfigResource(ConfigResource.Type.TOPIC, t)));
  if (topicResourcesToCheck.isEmpty()) {
    return;
  }
  for (Map.Entry<ConfigResource, KafkaFuture<Config>> entry : _adminClient.describeConfigs(topicResourcesToCheck).values().entrySet()) {
    try {
      short topicMinISR = Short.parseShort(entry.getValue().get(DESCRIBE_TOPIC_CONFIG_TIMEOUT_MS, TimeUnit.MILLISECONDS)
                                                .get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value());
      _cachedTopicMinISR.put(entry.getKey().name(), new TopicMinISREntry(topicMinISR, System.currentTimeMillis()));
    } catch (TimeoutException | InterruptedException | ExecutionException e) {
      LOG.warn("Skip attempt to fix replication factor of topic {} due to unable to retrieve its minISR config.",
               entry.getKey().name());
    }
  }
}
 
Example #18
Source File: SamplingUtilsTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testMaybeIncreasePartitionCount() throws InterruptedException, ExecutionException, TimeoutException {
  AdminClient adminClient = EasyMock.createMock(AdminClient.class);
  NewTopic topicToAddPartitions = SamplingUtils.wrapTopic(MOCK_TOPIC, MOCK_DESIRED_PARTITION_COUNT,
                                                          MOCK_REPLICATION_FACTOR, MOCK_DESIRED_RETENTION_MS);
  DescribeTopicsResult describeTopicsResult = EasyMock.createMock(DescribeTopicsResult.class);
  KafkaFuture<TopicDescription> topicDescriptionFuture = EasyMock.createMock(KafkaFuture.class);
  TopicDescription topicDescription = EasyMock.createMock(TopicDescription.class);
  Map<String, KafkaFuture<TopicDescription>> describeTopicsValues = Collections.singletonMap(MOCK_TOPIC, topicDescriptionFuture);
  Map<String, KafkaFuture<Void>> createPartitionsValues = Collections.singletonMap(MOCK_TOPIC, EasyMock.createMock(KafkaFuture.class));
  CreatePartitionsResult createPartitionsResult = EasyMock.createMock(CreatePartitionsResult.class);

  EasyMock.expect(adminClient.describeTopics(Collections.singletonList(MOCK_TOPIC))).andReturn(describeTopicsResult);
  EasyMock.expect(describeTopicsResult.values()).andReturn(describeTopicsValues);
  EasyMock.expect(topicDescriptionFuture.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(topicDescription);
  EasyMock.expect(topicDescription.partitions()).andReturn(MOCK_PARTITIONS);
  EasyMock.expect(adminClient.createPartitions(Collections.singletonMap(MOCK_TOPIC, EasyMock.anyObject())))
          .andReturn(createPartitionsResult);
  EasyMock.expect(createPartitionsResult.values()).andReturn(createPartitionsValues);

  EasyMock.replay(adminClient, describeTopicsResult, topicDescriptionFuture, topicDescription, createPartitionsResult);
  boolean increasePartitionCount = SamplingUtils.maybeIncreasePartitionCount(adminClient, topicToAddPartitions);

  EasyMock.verify(adminClient, describeTopicsResult, topicDescriptionFuture, topicDescription, createPartitionsResult);
  assertTrue(increasePartitionCount);
}
 
Example #19
Source File: CreateTopics.java    From phoebus with Eclipse Public License 1.0 6 votes vote down vote up
/**
 * <p> Discover any currently active Kafka topics. Return a list of strings filled with any default topics that need to be created.
 * @param client
 * @return topics_to_create <code>List</code> of <code>Strings</code> with all the topic names that need to be created.
 *                           Returns <code>null</code> if none need to be created.
 */
private static List<String> discoverTopics(final AdminClient client, final List<String> topics_to_discover)
{
    final List<String> topics_to_create = new ArrayList<>();

    // Discover what topics currently exist.
    try
    {
        final ListTopicsResult res = client.listTopics();
        final KafkaFuture<Set<String>> topics = res.names();
        final Set<String> topic_names = topics.get();

        for (String topic : topics_to_discover)
        {
            if ( ! topic_names.contains(topic))
                topics_to_create.add(topic);
        }
    }
    catch (Exception ex)
    {
        logger.log(Level.WARNING, "Unable to list topics. Automatic topic detection failed.", ex);
    }

    return topics_to_create;
}
 
Example #20
Source File: CreateTopics.java    From phoebus with Eclipse Public License 1.0 6 votes vote down vote up
/** Create a topic for each of the topics in the passed list.
 *  @param client {@link AdminClient}
 *  @param compact If the topics should be compacted.
 *  @param topics_to_create {@link List} of {@link String}s filled with the names of topics to create.
 */
private static void createTopics(final AdminClient client, final boolean compact, final List<String> topics_to_create)
{
    // Create the new topics locally.
    final List<NewTopic> new_topics = new ArrayList<>();
    for (String topic : topics_to_create)
    {
            logger.info("Creating topic '" + topic + "'");
            new_topics.add(createTopic(client, compact, topic));
    }
    // Create the new topics in the Kafka server.
    try
    {
        final CreateTopicsResult res = client.createTopics(new_topics);
        final KafkaFuture<Void> future = res.all();
        future.get();
    }
    catch (Exception ex)
    {
        logger.log(Level.WARNING, "Attempt to create topics failed", ex);
    }
}
 
Example #21
Source File: MultiClusterTopicManagementServiceTest.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
@BeforeMethod
private void startTest() {
  _createTopicsResult = Mockito.mock(CreateTopicsResult.class);
  _kafkaFutureMap = Mockito.mock(Map.class);
  _kafkaFuture = Mockito.mock(KafkaFuture.class);

  nodeSet = new LinkedHashSet<>();
  nodeSet.add(new Node(1, "host-1", 2132));
  nodeSet.add(new Node(2, "host-2", 2133));
  nodeSet.add(new Node(3, "host-3", 2134));
  nodeSet.add(new Node(4, "host-4", 2135));
  nodeSet.add(new Node(5, "host-5", 2136));
  nodeSet.add(new Node(6, "host-5", 2137));
  nodeSet.add(new Node(7, "host-5", 2138));
  nodeSet.add(new Node(8, "host-5", 2139));
  nodeSet.add(new Node(9, "host-5", 2140));
  nodeSet.add(new Node(10, "host-5", 2141));

  _topicManagementHelper = Mockito.mock(MultiClusterTopicManagementService.TopicManagementHelper.class);
  _topicManagementHelper._topic = SERVICE_TEST_TOPIC;
  _topicManagementHelper._adminClient = Mockito.mock(AdminClient.class);
  _topicManagementHelper._topicFactory = Mockito.mock(TopicFactory.class);
  _topicManagementHelper._topicCreationEnabled = true;
}
 
Example #22
Source File: KafkaBinderTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
private int invokePartitionSize(String topic) throws Throwable {

		DescribeTopicsResult describeTopicsResult = adminClient
				.describeTopics(Collections.singletonList(topic));
		KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
		Map<String, TopicDescription> stringTopicDescriptionMap = all
				.get(DEFAULT_OPERATION_TIMEOUT, TimeUnit.SECONDS);
		TopicDescription topicDescription = stringTopicDescriptionMap.get(topic);
		return topicDescription.partitions().size();
	}
 
Example #23
Source File: SimpleAclOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private void mockDeleteAcls(Admin mockAdminClient, Collection<AclBinding> aclBindings, ArgumentCaptor<Collection<AclBindingFilter>> aclBindingFiltersCaptor)
        throws InterruptedException, ExecutionException {
    DeleteAclsResult result = mock(DeleteAclsResult.class);
    KafkaFuture<Collection<AclBinding>> future = mock(KafkaFuture.class);
    when(future.get()).thenReturn(aclBindings);
    when(result.all()).thenReturn(future);
    when(mockAdminClient.deleteAcls(aclBindingFiltersCaptor.capture())).thenReturn(result);
}
 
Example #24
Source File: KafkaImpl.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
@Override
public Future<Void> updateTopicConfig(Topic topic) {
    Map<ConfigResource, Config> configs = TopicSerialization.toTopicConfig(topic);
    KafkaFuture<Void> future = adminClient.alterConfigs(configs).values().get(configs.keySet().iterator().next());
    return mapFuture(future);
}
 
Example #25
Source File: DefaultCollector.java    From paraflow with Apache License 2.0 5 votes vote down vote up
@Override
public void createTopic(String topicName, int partitionsNum, short replicationFactor)
{
    NewTopic newTopic = new NewTopic(topicName, partitionsNum, replicationFactor);
    CreateTopicsResult result = kafkaAdminClient.createTopics(Collections.singletonList(newTopic));
    KafkaFuture future = result.values().get(topicName);
    try {
        future.get();
    }
    catch (InterruptedException | ExecutionException e) {
        e.printStackTrace();
    }
}
 
Example #26
Source File: LoadMonitorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private DescribeLogDirsResult getDescribeLogDirsResult() {
  try {
    // Reflectively set DescribeLogDirsResult's constructor from package private to public.
    Constructor<DescribeLogDirsResult> constructor = DescribeLogDirsResult.class.getDeclaredConstructor(Map.class);
    constructor.setAccessible(true);

    Map<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>> futureByBroker = new HashMap<>();
    Map<String, DescribeLogDirsResponse.LogDirInfo> logdirInfoBylogdir =  new HashMap<>();
    Map<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoByPartition = new HashMap<>();
    replicaInfoByPartition.put(T0P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T0P1, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T1P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T1P1, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    logdirInfoBylogdir.put("/tmp/kafka-logs", new DescribeLogDirsResponse.LogDirInfo(Errors.NONE, replicaInfoByPartition));
    futureByBroker.put(0, completedFuture(logdirInfoBylogdir));

    logdirInfoBylogdir =  new HashMap<>();
    replicaInfoByPartition = new HashMap<>();
    replicaInfoByPartition.put(T0P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T0P1, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    replicaInfoByPartition.put(T1P0, new DescribeLogDirsResponse.ReplicaInfo(0, 0, false));
    logdirInfoBylogdir.put("/tmp/kafka-logs-1", new DescribeLogDirsResponse.LogDirInfo(Errors.NONE, replicaInfoByPartition));
    logdirInfoBylogdir.put("/tmp/kafka-logs-2",
                           new DescribeLogDirsResponse.LogDirInfo(Errors.NONE,
                                                                  Collections.singletonMap(T1P1,
                                                                                           new DescribeLogDirsResponse.ReplicaInfo(0, 0, false))));
    futureByBroker.put(1, completedFuture(logdirInfoBylogdir));
    return constructor.newInstance(futureByBroker);
  } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
    // Let it go.
  }
  return null;
}
 
Example #27
Source File: KafkaSender.java    From zipkin-reporter-java with Apache License 2.0 5 votes vote down vote up
/** Ensures there are no problems reading metadata about the topic. */
@Override public CheckResult check() {
  try {
    KafkaFuture<String> maybeClusterId = getAdminClient().describeCluster().clusterId();
    maybeClusterId.get(1, TimeUnit.SECONDS);
    return CheckResult.OK;
  } catch (Exception e) {
    return CheckResult.failed(e);
  }
}
 
Example #28
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private void mockDescribeTopics(Admin admin, Map<String, Either<TopicDescription, Exception>> result) {
    DescribeTopicsResult describeTopicsResult = mock(DescribeTopicsResult.class);
    when(describeTopicsResult.values()).thenReturn(result.entrySet().stream().collect(toMap(
        entry1 -> entry1.getKey(),
        entry1 -> {
            KafkaFutureImpl<TopicDescription> kafkaFuture1 = new KafkaFutureImpl<>();
            if (entry1.getValue().isLeft()) {
                kafkaFuture1.complete(entry1.getValue().left());
            } else {
                kafkaFuture1.completeExceptionally(entry1.getValue().right());
            }
            return kafkaFuture1;
        })));
    Optional<Either<TopicDescription, Exception>> first = result.values().stream().filter(either -> !either.isLeft()).findFirst();
    if (first.isPresent()) {
        KafkaFutureImpl<Map<String, TopicDescription>> kafkaFuture = new KafkaFutureImpl<>();
        kafkaFuture.completeExceptionally(first.get().right());
        when(describeTopicsResult.all()).thenReturn(kafkaFuture);
    } else {
        when(describeTopicsResult.all()).thenReturn(KafkaFuture.completedFuture(
            result.entrySet().stream().collect(toMap(
                entry -> entry.getKey(),
                entry -> entry.getValue().left()))
        ));
    }
    when(admin.describeTopics(result.keySet())).thenReturn(describeTopicsResult);
}
 
Example #29
Source File: KafkaImpl.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
/**
 * Create a new topic via the Kafka AdminClient API, calling the given handler
 * (in a different thread) with the result.
 */
@Override
public Future<Void> createTopic(Topic topic) {
    try {
        NewTopic newTopic = TopicSerialization.toNewTopic(topic, null);
        LOGGER.debug("Creating topic {}", newTopic);
        KafkaFuture<Void> future = adminClient.createTopics(
                singleton(newTopic)).values().get(newTopic.name());
        return mapFuture(future);
    } catch (Exception e) {
        return Future.failedFuture(e);
    }
}
 
Example #30
Source File: SamplingUtilsTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testMaybeUpdateTopicConfig() throws InterruptedException, ExecutionException, TimeoutException {
  AdminClient adminClient = EasyMock.createMock(AdminClient.class);
  DescribeConfigsResult describeConfigsResult = EasyMock.createMock(DescribeConfigsResult.class);
  KafkaFuture<Config> describedConfigsFuture = EasyMock.createMock(KafkaFuture.class);
  Config topicConfig = EasyMock.createMock(Config.class);
  AlterConfigsResult alterConfigsResult = EasyMock.createMock(AlterConfigsResult.class);
  Set<AlterConfigOp> alterConfigOps = Collections.singleton(new AlterConfigOp(
      new ConfigEntry(RetentionMsProp(), Long.toString(MOCK_DESIRED_RETENTION_MS)), AlterConfigOp.OpType.SET));
  Map<ConfigResource, KafkaFuture<Config>> describeConfigsValues = Collections.singletonMap(MOCK_TOPIC_RESOURCE,
                                                                                            describedConfigsFuture);
  Map<ConfigResource, KafkaFuture<Void>> alterConfigsValues = Collections.singletonMap(MOCK_TOPIC_RESOURCE,
                                                                                       EasyMock.createMock(KafkaFuture.class));

  NewTopic topicToUpdateConfigs = SamplingUtils.wrapTopic(MOCK_TOPIC, MOCK_PARTITION_COUNT, MOCK_REPLICATION_FACTOR, MOCK_DESIRED_RETENTION_MS);
  EasyMock.expect(adminClient.describeConfigs(EasyMock.eq(Collections.singleton(MOCK_TOPIC_RESOURCE)))).andReturn(describeConfigsResult);
  EasyMock.expect(describeConfigsResult.values()).andReturn(describeConfigsValues);
  EasyMock.expect(describedConfigsFuture.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(topicConfig);
  EasyMock.expect(topicConfig.get(EasyMock.eq(CleanupPolicyProp()))).andReturn(new ConfigEntry(CleanupPolicyProp(),
                                                                                               DEFAULT_CLEANUP_POLICY));
  EasyMock.expect(topicConfig.get(EasyMock.eq(RetentionMsProp()))).andReturn(new ConfigEntry(RetentionMsProp(),
                                                                                             MOCK_CURRENT_RETENTION_MS));
  EasyMock.expect(adminClient.incrementalAlterConfigs(EasyMock.eq(Collections.singletonMap(MOCK_TOPIC_RESOURCE,
                                                                                           alterConfigOps))))
          .andReturn(alterConfigsResult);
  EasyMock.expect(alterConfigsResult.values()).andReturn(alterConfigsValues);
  EasyMock.replay(adminClient, describeConfigsResult, describedConfigsFuture, topicConfig, alterConfigsResult);


  boolean updateTopicConfig = SamplingUtils.maybeUpdateTopicConfig(adminClient, topicToUpdateConfigs);
  EasyMock.verify(adminClient, describeConfigsResult, describedConfigsFuture, topicConfig, alterConfigsResult);
  assertTrue(updateTopicConfig);
}