Java Code Examples for org.apache.kafka.common.security.auth.SecurityProtocol

The following examples show how to use org.apache.kafka.common.security.auth.SecurityProtocol. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: kcache   Source File: SASLClusterTestHarness.java    License: Apache License 2.0 7 votes vote down vote up
@Override
protected KafkaConfig getKafkaConfig(int brokerId) {
    final Option<File> trustStoreFileOption = Option.apply(null);
    final Option<SecurityProtocol> saslInterBrokerSecurityProtocol =
        Option.apply(SecurityProtocol.SASL_PLAINTEXT);
    Properties props = TestUtils.createBrokerConfig(
        brokerId, zkConnect, false, false, TestUtils.RandomPort(), saslInterBrokerSecurityProtocol,
        trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, true, TestUtils.RandomPort(),
        false, TestUtils.RandomPort(),
        false, TestUtils.RandomPort(), Option.<String>empty(), 1, false, 1, (short) 1);

    injectProperties(props);
    props.setProperty("zookeeper.connection.timeout.ms", "30000");
    props.setProperty("sasl.mechanism.inter.broker.protocol", "GSSAPI");
    props.setProperty(SaslConfigs.SASL_ENABLED_MECHANISMS, "GSSAPI");

    return KafkaConfig.fromProps(props);
}
 
Example 2
Source Project: kcache   Source File: CacheUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Get a new instance of an SSL KafkaCache and initialize it.
 */
public static Cache<String, String> createAndInitSSLKafkaCacheInstance(
    String bootstrapServers, Map<String, Object> sslConfigs, boolean requireSSLClientAuth)
    throws CacheInitializationException {
    Properties props = new Properties();

    props.put(KafkaCacheConfig.KAFKACACHE_SECURITY_PROTOCOL_CONFIG,
        SecurityProtocol.SSL.toString());
    props.put(KafkaCacheConfig.KAFKACACHE_SSL_TRUSTSTORE_LOCATION_CONFIG,
        sslConfigs.get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG));
    props.put(KafkaCacheConfig.KAFKACACHE_SSL_TRUSTSTORE_PASSWORD_CONFIG,
        ((Password) sslConfigs.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)).value());
    if (requireSSLClientAuth) {
        props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEYSTORE_LOCATION_CONFIG,
            sslConfigs.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG));
        props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEYSTORE_PASSWORD_CONFIG,
            ((Password) sslConfigs.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG)).value());
        props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEY_PASSWORD_CONFIG,
            ((Password) sslConfigs.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG)).value());
    }

    Cache<String, String> inMemoryCache = new InMemoryCache<>();
    return createAndInitKafkaCacheInstance(bootstrapServers, inMemoryCache, props);
}
 
Example 3
Source Project: samza   Source File: TestStreamProcessor.java    License: Apache License 2.0 6 votes vote down vote up
private void initProducer(String bootstrapServer) {
  producer = TestUtils.createProducer(
      bootstrapServer,
      1,
      60 * 1000L,
      1024L * 1024L,
      0,
      0L,
      5 * 1000L,
      SecurityProtocol.PLAINTEXT,
      null,
      Option$.MODULE$.<Properties>apply(new Properties()),
      new StringSerializer(),
      new ByteArraySerializer(),
      Option$.MODULE$.<Properties>apply(new Properties()));
}
 
Example 4
Source Project: strimzi-kafka-operator   Source File: HttpBridgeTlsST.java    License: Apache License 2.0 6 votes vote down vote up
@Test
void testSendSimpleMessageTls() throws Exception {
    String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
    // Create topic
    KafkaTopicResource.topic(CLUSTER_NAME, topicName).done();

    JsonObject records = BridgeUtils.generateHttpMessages(MESSAGE_COUNT);
    JsonObject response = BridgeUtils.sendMessagesHttpRequest(records, bridgeHost, bridgePort, topicName, client);
    KafkaBridgeUtils.checkSendResponse(response, MESSAGE_COUNT);

    BasicExternalKafkaClient basicExternalKafkaClient = new BasicExternalKafkaClient.Builder()
        .withTopicName(topicName)
        .withNamespaceName(NAMESPACE)
        .withClusterName(CLUSTER_NAME)
        .withMessageCount(MESSAGE_COUNT)
        .withSecurityProtocol(SecurityProtocol.SSL)
        .withKafkaUsername(USER_NAME)
        .withConsumerGroupName(CONSUMER_GROUP_NAME + "-" + rng.nextInt(Integer.MAX_VALUE))
        .build();

    assertThat(basicExternalKafkaClient.receiveMessagesTls(), is(MESSAGE_COUNT));
}
 
Example 5
protected void setSecurityConfigs(Properties clientProps, String certAlias) {
  SecurityProtocol protocol = securityProtocol();
  if (protocol == SecurityProtocol.SSL) {
    File trustStoreFile = trustStoreFile();
    if (trustStoreFile == null) {
      throw new AssertionError("ssl set but no trust store provided");
    }
    clientProps.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, protocol.name);
    clientProps.setProperty(KafkaConfig.SslEndpointIdentificationAlgorithmProp(), "");
    try {
      clientProps.putAll(TestSslUtils.createSslConfig(true, true, Mode.CLIENT, trustStoreFile, certAlias));
    } catch (Exception e) {
      throw new IllegalStateException(e);
    }
  }
}
 
Example 6
Source Project: strimzi-kafka-operator   Source File: ConnectST.java    License: Apache License 2.0 6 votes vote down vote up
void testConnectAuthorizationWithWeirdUserName(String userName, SecurityProtocol securityProtocol) {
    String topicName = TOPIC_NAME + rng.nextInt(Integer.MAX_VALUE);
    KafkaTopicResource.topic(CLUSTER_NAME, topicName).done();
    String connectorPodName = kubeClient().listPodsByPrefixInName(CLUSTER_NAME + "-connect").get(0).getMetadata().getName();

    KafkaConnectorResource.kafkaConnector(CLUSTER_NAME)
            .editSpec()
                .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector")
                .addToConfig("topics", topicName)
                .addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH)
            .endSpec().done();

    BasicExternalKafkaClient basicExternalKafkaClient = new BasicExternalKafkaClient.Builder()
        .withNamespaceName(NAMESPACE)
        .withClusterName(CLUSTER_NAME)
        .withKafkaUsername(userName)
        .withMessageCount(MESSAGE_COUNT)
        .withSecurityProtocol(securityProtocol)
        .withTopicName(topicName)
        .build();

    assertThat(basicExternalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT));

    KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(connectorPodName, Constants.DEFAULT_SINK_FILE_PATH);
}
 
Example 7
@Test
void testSendSimpleMessageTlsScramSha() throws Exception {
    int messageCount = 50;
    String topicName = "topic-simple-send-" + new Random().nextInt(Integer.MAX_VALUE);
    // Create topic
    KafkaTopicResource.topic(CLUSTER_NAME, topicName).done();

    JsonObject records = BridgeUtils.generateHttpMessages(messageCount);
    JsonObject response = BridgeUtils.sendMessagesHttpRequest(records, bridgeHost, bridgePort, topicName, client);
    KafkaBridgeUtils.checkSendResponse(response, messageCount);

    BasicExternalKafkaClient kafkaClient = new BasicExternalKafkaClient.Builder()
        .withTopicName(topicName)
        .withNamespaceName(NAMESPACE)
        .withClusterName(CLUSTER_NAME)
        .withKafkaUsername(USER_NAME)
        .withMessageCount(messageCount)
        .withSecurityProtocol(SecurityProtocol.SASL_SSL)
        .build();

    assertThat(kafkaClient.receiveMessagesTls(), is(messageCount));
}
 
Example 8
Source Project: common-docker   Source File: ClusterStatusSASLTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyWithSASLAndSSL() throws Exception {
  Properties clientSecurityProps = kafka.getClientSecurityConfig();

  Map<String, String> config = Utils.propsToStringMap(clientSecurityProps);
  config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker
      (SecurityProtocol.SASL_SSL));

  // Set password and enabled protocol as the Utils.propsToStringMap just returns toString()
  // representations and these properties don't have a valid representation.
  Password trustStorePassword = (Password) clientSecurityProps.get("ssl.truststore.password");
  config.put("ssl.truststore.password", trustStorePassword.value());
  config.put("ssl.enabled.protocols", "TLSv1.2");

  assertThat(ClusterStatus.isKafkaReady(config, 3, 10000)).isTrue();
}
 
Example 9
Source Project: kafka-webview   Source File: KafkaClientConfigUtil.java    License: MIT License 6 votes vote down vote up
/**
 * If SASL is configured for this cluster, apply the settings.
 * @param clusterConfig Cluster configuration definition to source values from.
 * @param config Config map to apply settings to.
 */
private void applySaslSettings(final ClusterConfig clusterConfig, final Map<String, Object> config) {
    // If we're using SSL, we've already configured everything for SASL too...
    if (!clusterConfig.isUseSasl()) {
        return;
    }

    // If not using SSL
    if (clusterConfig.isUseSsl()) {
        // SASL+SSL
        config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_SSL.name);

        // Keystore and keystore password not required if using SASL+SSL
        config.remove(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG);
        config.remove(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG);
    } else {
        // Just SASL PLAINTEXT
        config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name);
    }
    config.put(SaslConfigs.SASL_MECHANISM, clusterConfig.getSaslMechanism());
    config.put(SaslConfigs.SASL_JAAS_CONFIG, clusterConfig.getSaslJaas());
}
 
Example 10
Source Project: doctorkafka   Source File: KafkaAvroPublisher.java    License: Apache License 2.0 6 votes vote down vote up
public KafkaAvroPublisher(String zkUrl, String topic, String statsProducerPropertiesFile) {
  this.destTopic = topic;
  Properties statsProducerProperties = new Properties();
  Map<String, Object> keyValueMap = new HashMap<>();
  try {
    if (statsProducerPropertiesFile != null) {
      statsProducerProperties.load(new FileInputStream(statsProducerPropertiesFile));
      for (String propertyName : statsProducerProperties.stringPropertyNames()) {
        keyValueMap.put(propertyName, statsProducerProperties.get(propertyName));
      }
    }
  } catch (IOException e) {
    LOG.error("Failed to load configuration file {}", statsProducerPropertiesFile, e);
  }
  // set the security protocol based on
  SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT;
  if (keyValueMap.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) {
    String secStr = keyValueMap.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).toString();
    securityProtocol = Enum.valueOf(SecurityProtocol.class, secStr);
  }
  Properties producerProperties = OperatorUtil.createKafkaProducerProperties(zkUrl, securityProtocol);
  for (Map.Entry<String, Object> entry: keyValueMap.entrySet()) {
    producerProperties.put(entry.getKey(), entry.getValue());
  }
  this.kafkaProducer = new KafkaProducer<>(producerProperties);
}
 
Example 11
Source Project: doctorkafka   Source File: OperatorUtil.java    License: Apache License 2.0 6 votes vote down vote up
public static Properties createKafkaConsumerProperties(String zkUrl, String consumerGroupName,
    SecurityProtocol securityProtocol, Map<String, String> consumerConfigs) {
  String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol);
  Properties props = new Properties();
  props.put(KafkaUtils.BOOTSTRAP_SERVERS, bootstrapBrokers);
  props.put("group.id", consumerGroupName);
  props.put("enable.auto.commit", "true");
  props.put("auto.commit.interval.ms", "1000");
  props.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");

  if (consumerConfigs != null) {
    for (Map.Entry<String, String> entry : consumerConfigs.entrySet()) {
      props.put(entry.getKey(), entry.getValue());
    }
  }
  return props;
}
 
Example 12
Source Project: doctorkafka   Source File: DoctorKafkaActionReporter.java    License: Apache License 2.0 6 votes vote down vote up
public DoctorKafkaActionReporter(String zkUrl, SecurityProtocol securityProtocol,
    String topic,  Map<String, String> producerConfigs) {
  this.topic = topic;
  String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol);
  Properties props = new Properties();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
  props.put(ProducerConfig.ACKS_CONFIG, "1");
  props.put(ProducerConfig.RETRIES_CONFIG, 3);
  props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1638400);
  props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
  props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip");
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");

  for (Map.Entry<String, String> entry : producerConfigs.entrySet()) {
    props.put(entry.getKey(), entry.getValue());
  }
  this.kafkaProducer = new KafkaProducer<>(props);
}
 
Example 13
Source Project: doctorkafka   Source File: KafkaWriter.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zkUrl = commandLine.getOptionValue(ZOOKEEPER);
  String topic = commandLine.getOptionValue(TOPIC);
  int numMessages = Integer.parseInt(commandLine.getOptionValue(NUM_MESSAGES));

  Random random = new Random();
  Properties props = OperatorUtil.createKafkaProducerProperties(zkUrl, SecurityProtocol.PLAINTEXT);
  KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<>(props);

  byte[] key = new byte[16];
  byte[] data = new byte[1024];
  for (int i = 0; i < numMessages; i++) {
    for (int j = 0; j < data.length; j++) {
      data[j] = (byte)random.nextInt();
    }
    ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(
        topic, 0, System.currentTimeMillis(), key, data);
    Future<RecordMetadata> future = kafkaProducer.send(producerRecord);
    future.get();
    if (i % 100 == 0) {
      System.out.println("Have wrote " + i + " messages to kafka");
    }
  }
  kafkaProducer.close();
}
 
Example 14
/**
 * returns the list of broker configs for all brokers created by this test
 * (as determined by clusterSize()
 * @return list of broker configs, one config map per broker to be created
 */
protected List<Map<Object, Object>> buildBrokerConfigs() {
  List<Map<Object, Object>> configs = new ArrayList<>();
  for (int i = 0; i < clusterSize(); i++) {
    EmbeddedBrokerBuilder builder = new EmbeddedBrokerBuilder();
    builder.zkConnect(zookeeper());
    builder.nodeId(i);
    builder.enable(securityProtocol());
    if (securityProtocol() == SecurityProtocol.SSL) {
      if (trustStoreFile() != null) {
        builder.trustStore(trustStoreFile());
      }
    } else {
      if (trustStoreFile() != null) {
        throw new AssertionError("security protocol not set yet trust store file provided");
      }
    }
    Map<Object, Object> config = builder.buildConfig();
    config.putAll(overridingProps());
    configs.add(config);
  }
  return configs;
}
 
Example 15
Source Project: kareldb   Source File: ClusterTestHarness.java    License: Apache License 2.0 5 votes vote down vote up
protected KafkaConfig getKafkaConfig(int brokerId) {

        final Option<java.io.File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        Properties props = TestUtils.createBrokerConfig(
            brokerId,
            zkConnect,
            false,
            false,
            TestUtils.RandomPort(),
            noInterBrokerSecurityProtocol,
            noFile,
            EMPTY_SASL_PROPERTIES,
            true,
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            Option.<String>empty(),
            1,
            false,
            1,
            (short) 1
        );
        injectProperties(props);
        return KafkaConfig.fromProps(props);
    }
 
Example 16
@Test
void testBasicClientInParallel() {

    String clientId = "producer-plain-" + new Random().nextInt(Integer.MAX_VALUE);

    KafkaClientProperties properties = new KafkaClientProperties.KafkaClientPropertiesBuilder()
        .withKeySerializerConfig(StringSerializer.class)
        .withValueSerializerConfig(StringSerializer.class)
        .withClientIdConfig(clientId)
        .withSecurityProtocol(SecurityProtocol.PLAINTEXT)
        .withBootstrapServerConfig(STRIMZI_CONTAINER.getBootstrapServers())
        .withSharedProperties()
        .build();

    CompletableFuture<Integer> resultPromise = new CompletableFuture<>();
    IntPredicate msgCntPredicate = x -> x == MESSAGE_COUNT * 20;

    try (Producer plainProducer = new Producer(properties, resultPromise, msgCntPredicate, TOPIC_NAME, clientId)) {

        for (int i = 0; i < 10; i++) {
            plainProducer.setClientName("producer-plain-" + new Random().nextInt(Integer.MAX_VALUE));
            plainProducer.getVertx().deployVerticle(plainProducer);
        }

        plainProducer.getResultPromise().get(Duration.ofSeconds(30).toMillis(), TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        e.printStackTrace();
        throw new WaitException(e);
    }
}
 
Example 17
Source Project: kcache   Source File: ClusterTestHarness.java    License: Apache License 2.0 5 votes vote down vote up
protected KafkaConfig getKafkaConfig(int brokerId) {

        final Option<java.io.File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        Properties props = TestUtils.createBrokerConfig(
            brokerId,
            zkConnect,
            false,
            false,
            TestUtils.RandomPort(),
            noInterBrokerSecurityProtocol,
            noFile,
            EMPTY_SASL_PROPERTIES,
            true,
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            false,
            TestUtils.RandomPort(),
            Option.<String>empty(),
            1,
            false,
            1,
            (short) 1
        );
        injectProperties(props);
        return KafkaConfig.fromProps(props);
    }
 
Example 18
/**
 * Parse AdminClient configs based on the given {@link CruiseControlMetricsReporterConfig configs}.
 *
 * @param adminClientConfigs Configs that will be return with SSL configs.
 * @param configs Configs to be used for parsing AdminClient SSL configs.
 * @return AdminClient configs.
 */
public static Properties addSslConfigs(Properties adminClientConfigs, CruiseControlMetricsReporterConfig configs) {
  // Add security protocol (if specified).
  try {
    String securityProtocol = configs.getString(AdminClientConfig.SECURITY_PROTOCOL_CONFIG);
    adminClientConfigs.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, securityProtocol);
    setStringConfigIfExists(configs, adminClientConfigs, SaslConfigs.SASL_MECHANISM);
    setPasswordConfigIfExists(configs, adminClientConfigs, SaslConfigs.SASL_JAAS_CONFIG);

    // Configure SSL configs (if security protocol is SSL or SASL_SSL)
    if (securityProtocol.equals(SecurityProtocol.SSL.name) || securityProtocol.equals(SecurityProtocol.SASL_SSL.name)) {
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG);
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG);
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_KEYSTORE_TYPE_CONFIG);
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG);
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG);
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG);
      setStringConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG);
      setPasswordConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG);
      setPasswordConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_KEY_PASSWORD_CONFIG);
      setPasswordConfigIfExists(configs, adminClientConfigs, SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG);
    }
  } catch (ConfigException ce) {
    // let it go.
  }

  return adminClientConfigs;
}
 
Example 19
@Test
void testSimplePlain() {

    String clientId = "producer-plain-" + new Random().nextInt(Integer.MAX_VALUE);

    KafkaClientProperties properties = new KafkaClientProperties.KafkaClientPropertiesBuilder()
        .withKeySerializerConfig(StringSerializer.class)
        .withValueSerializerConfig(StringSerializer.class)
        .withClientIdConfig(clientId)
        .withSecurityProtocol(SecurityProtocol.PLAINTEXT)
        .withBootstrapServerConfig(STRIMZI_CONTAINER.getBootstrapServers())
        .withSharedProperties()
        .build();

    CompletableFuture<Integer> resultPromise = new CompletableFuture<>();
    IntPredicate msgCntPredicate = x -> x == MESSAGE_COUNT;

    try (Producer plainProducer = new Producer(properties, resultPromise, msgCntPredicate, TOPIC_NAME, clientId)) {

        plainProducer.getVertx().deployVerticle(plainProducer);

        plainProducer.getResultPromise().get(Duration.ofSeconds(30).toMillis(), TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        e.printStackTrace();
        throw new WaitException(e);
    }
}
 
Example 20
/**
 * @param protocol Security protocol.
 * @return Address containing host and port.
 */
public String addr(SecurityProtocol protocol) {
  if (!_hosts.containsKey(protocol)) {
    return null;
  }
  return _hosts.get(protocol) + ":" + _ports.get(protocol);
}
 
Example 21
Source Project: common-docker   Source File: TopicEnsureTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws IOException {
  kafka = new EmbeddedKafkaCluster(NUM_BROKERS, NUM_ZK);
  kafka.start();

  Properties adminClientProps = new Properties();
  adminClientProps.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
                       kafka.getBootstrapBroker(SecurityProtocol.PLAINTEXT));
  topicEnsure = new TopicEnsure(adminClientProps);
}
 
Example 22
Source Project: common-docker   Source File: EmbeddedKafkaCluster.java    License: Apache License 2.0 5 votes vote down vote up
public Properties getClientSecurityConfig() {
  if (enableSASLSSL) {
    Properties clientSecurityProps = TestUtils.producerSecurityConfigs(
        SecurityProtocol.SASL_SSL,
        Option.apply(trustStoreFile),
        Option.apply(saslProperties)
    );

    return clientSecurityProps;
  } else {
    return new Properties();
  }
}
 
Example 23
Source Project: common-docker   Source File: ClusterStatusTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReady() throws Exception {

  Map<String, String> config = new HashMap<>();
  config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker
      (SecurityProtocol.PLAINTEXT));
  assertThat(ClusterStatus.isKafkaReady(config, 3, 10000))
      .isTrue();
}
 
Example 24
Source Project: common-docker   Source File: ClusterStatusTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyFailWithLessBrokers() throws Exception {
  try {
    Map<String, String> config = new HashMap<>();
    config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker
        (SecurityProtocol.PLAINTEXT));
    assertThat(ClusterStatus.isKafkaReady(config, 5, 10000))
        .isFalse();
  } catch (Exception e) {
    fail("Unexpected error. " + e.getMessage());
  }

}
 
Example 25
Source Project: doctorkafka   Source File: BrokerStatsReader.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zkUrl = commandLine.getOptionValue(ZOOKEEPER);
  String statsTopic = commandLine.getOptionValue(STATS_TOPIC);

  String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, SecurityProtocol.PLAINTEXT);
  Properties props = new Properties();
  props.put(KafkaUtils.BOOTSTRAP_SERVERS, bootstrapBrokers);
  props.put("group.id", "broker_statsreader_group");
  props.put("enable.auto.commit", "false");
  props.put("auto.commit.interval.ms", "1000");
  props.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");

  Schema schema = BrokerStats.getClassSchema();
  KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props);
  consumer.subscribe(Arrays.asList(statsTopic));
  while (true) {
    ConsumerRecords<byte[], byte[]> records = consumer.poll(100);
    for (ConsumerRecord<byte[], byte[]> record : records) {
      System.out.printf("offset = %d, key.size = %d, value.size = %s%n",
          record.offset(), record.key().length, record.value().length);
      try {
        BinaryDecoder binaryDecoder = avroDecoderFactory.binaryDecoder(record.value(), null);
        SpecificDatumReader<BrokerStats> reader = new SpecificDatumReader<>(schema);
        BrokerStats result = new BrokerStats();
        reader.read(result, binaryDecoder);
        System.out.println(result);
      } catch (Exception e) {
        LOG.error("Fail to decode an message", e);
      }
    }
  }
}
 
Example 26
Source Project: doctorkafka   Source File: OperatorUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static Properties createKafkaProducerProperties(String zkUrl, SecurityProtocol securityProtocol) {
  String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol);
  Properties props = new Properties();
  props.put(KafkaUtils.BOOTSTRAP_SERVERS, bootstrapBrokers);
  props.put(ProducerConfig.ACKS_CONFIG, "1");
  props.put(ProducerConfig.RETRIES_CONFIG, 0);
  props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1638400);
  props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 3554432);
  props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip");
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
  return props;
}
 
Example 27
Source Project: doctorkafka   Source File: KafkaUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static KafkaConsumer<byte[], byte[]> getKafkaConsumer(String zkUrl,
    SecurityProtocol securityProtocol,
    Map<String, String> consumerConfigs) {
  return getKafkaConsumer(zkUrl,
      "org.apache.kafka.common.serialization.ByteArrayDeserializer",
      "org.apache.kafka.common.serialization.ByteArrayDeserializer",
      DEFAULT_MAX_POOL_RECORDS, securityProtocol, consumerConfigs);
}
 
Example 28
Source Project: doctorkafka   Source File: ClusterLoadBalancer.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String configFilePath = commandLine.getOptionValue(CONFIG);
  String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER);
  String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC);
  String clusterZk = commandLine.getOptionValue(CLUSTER_ZOOKEEPER);
  long seconds = Long.parseLong(commandLine.getOptionValue(SECONDS));
  boolean onlyOne = commandLine.hasOption(ONLY_ONE);

  ReplicaStatsManager replicaStatsManager = new ReplicaStatsManager(new DoctorKafkaConfig(configFilePath));
  replicaStatsManager.readPastReplicaStats(brokerStatsZk, SecurityProtocol.PLAINTEXT,
      brokerStatsTopic, seconds);
  Set<String> zkUrls = replicaStatsManager.getClusterZkUrls();
  if (!zkUrls.contains(clusterZk)) {
    LOG.error("Failed to find zkurl {} in configuration", clusterZk);
    return;
  }

  DoctorKafkaClusterConfig clusterConf =
      replicaStatsManager.getConfig().getClusterConfigByZkUrl(clusterZk);
  KafkaCluster kafkaCluster = replicaStatsManager.getClusters().get(clusterZk);
  KafkaClusterManager clusterManager = new KafkaClusterManager(
      clusterZk, kafkaCluster, clusterConf, replicaStatsManager.getConfig(), null, null, replicaStatsManager);

  List<KafkaBroker> highTrafficBrokers = clusterManager.getHighTrafficBroker();
  if (onlyOne && highTrafficBrokers.size() > 0) {
    KafkaBroker broker = highTrafficBrokers.get(0);
    highTrafficBrokers.clear();
    highTrafficBrokers.add(broker);
  }

  String assignPlan = clusterManager.getWorkloadBalancingPlanInJson(highTrafficBrokers);
  LOG.info("Reassignment Plan : {}", assignPlan);
}
 
Example 29
Source Project: doctorkafka   Source File: BrokerStatsFilter.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER);
  String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC);
  String brokerName = commandLine.getOptionValue(BROKERNAME);
  Set<String> brokerNames = new HashSet<>();
  brokerNames.add(brokerName);

  KafkaConsumer<byte[], byte[]> kafkaConsumer = KafkaUtils.getKafkaConsumer(brokerStatsZk,
      "org.apache.kafka.common.serialization.ByteArrayDeserializer",
      "org.apache.kafka.common.serialization.ByteArrayDeserializer", 1,
      SecurityProtocol.PLAINTEXT,
      null);

  long startTimestampInMillis = System.currentTimeMillis() - 86400 * 1000L;
  Map<TopicPartition, Long> offsets = ReplicaStatsUtil.getProcessingStartOffsets(
      kafkaConsumer, brokerStatsTopic, startTimestampInMillis);
  kafkaConsumer.unsubscribe();
  kafkaConsumer.assign(offsets.keySet());
  Map<TopicPartition, Long> latestOffsets = kafkaConsumer.endOffsets(offsets.keySet());
  KafkaUtils.closeConsumer(brokerStatsZk);

  Map<Long, BrokerStats> brokerStatsMap = new TreeMap<>();
  for (TopicPartition topicPartition : offsets.keySet()) {
    LOG.info("Start processing {}", topicPartition);
    long startOffset = offsets.get(topicPartition);
    long endOffset = latestOffsets.get(topicPartition);

    List<BrokerStats> statsList = processOnePartition(brokerStatsZk, topicPartition,
        startOffset, endOffset, brokerNames);
    for (BrokerStats brokerStats : statsList) {
      brokerStatsMap.put(brokerStats.getTimestamp(), brokerStats);
    }
    LOG.info("Finished processing {}, retrieved {} records", topicPartition, statsList.size());
  }

  for (Map.Entry<Long, BrokerStats> entry: brokerStatsMap.entrySet()) {
    System.out.println(entry.getKey() + " : " + entry.getValue());
  }
}
 
Example 30
Source Project: strimzi-kafka-operator   Source File: KafkaST.java    License: Apache License 2.0 5 votes vote down vote up
@Test
@Tag(ACCEPTANCE)
@Tag(LOADBALANCER_SUPPORTED)
@Tag(EXTERNAL_CLIENTS_USED)
void testLoadBalancerTls() {
    KafkaResource.kafkaEphemeral(CLUSTER_NAME, 3)
        .editSpec()
            .editKafka()
                .editListeners()
                    .withNewKafkaListenerExternalLoadBalancer()
                    .endKafkaListenerExternalLoadBalancer()
                .endListeners()
                .withConfig(singletonMap("default.replication.factor", 3))
            .endKafka()
        .endSpec()
        .done();

    KafkaUserResource.tlsUser(CLUSTER_NAME, USER_NAME).done();

    ServiceUtils.waitUntilAddressIsReachable(kubeClient().getService(KafkaResources.externalBootstrapServiceName(CLUSTER_NAME)).getStatus().getLoadBalancer().getIngress().get(0).getHostname());

    BasicExternalKafkaClient basicExternalKafkaClient = new BasicExternalKafkaClient.Builder()
            .withTopicName(TOPIC_NAME)
            .withNamespaceName(NAMESPACE)
            .withClusterName(CLUSTER_NAME)
            .withMessageCount(MESSAGE_COUNT)
            .withKafkaUsername(USER_NAME)
            .withSecurityProtocol(SecurityProtocol.SSL)
            .build();

    basicExternalKafkaClient.verifyProducedAndConsumedMessages(
        basicExternalKafkaClient.sendMessagesTls(),
        basicExternalKafkaClient.receiveMessagesTls()
    );
}