org.apache.kafka.common.security.auth.SecurityProtocol Java Examples
The following examples show how to use
org.apache.kafka.common.security.auth.SecurityProtocol.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SASLClusterTestHarness.java From kcache with Apache License 2.0 | 7 votes |
@Override protected KafkaConfig getKafkaConfig(int brokerId) { final Option<File> trustStoreFileOption = Option.apply(null); final Option<SecurityProtocol> saslInterBrokerSecurityProtocol = Option.apply(SecurityProtocol.SASL_PLAINTEXT); Properties props = TestUtils.createBrokerConfig( brokerId, zkConnect, false, false, TestUtils.RandomPort(), saslInterBrokerSecurityProtocol, trustStoreFileOption, EMPTY_SASL_PROPERTIES, false, true, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), Option.<String>empty(), 1, false, 1, (short) 1); injectProperties(props); props.setProperty("zookeeper.connection.timeout.ms", "30000"); props.setProperty("sasl.mechanism.inter.broker.protocol", "GSSAPI"); props.setProperty(SaslConfigs.SASL_ENABLED_MECHANISMS, "GSSAPI"); return KafkaConfig.fromProps(props); }
Example #2
Source File: AbstractKafkaIntegrationTestHarness.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 6 votes |
/** * returns the list of broker configs for all brokers created by this test * (as determined by clusterSize() * @return list of broker configs, one config map per broker to be created */ protected List<Map<Object, Object>> buildBrokerConfigs() { List<Map<Object, Object>> configs = new ArrayList<>(); for (int i = 0; i < clusterSize(); i++) { EmbeddedBrokerBuilder builder = new EmbeddedBrokerBuilder(); builder.zkConnect(zookeeper()); builder.nodeId(i); builder.enable(securityProtocol()); if (securityProtocol() == SecurityProtocol.SSL) { if (trustStoreFile() != null) { builder.trustStore(trustStoreFile()); } } else { if (trustStoreFile() != null) { throw new AssertionError("security protocol not set yet trust store file provided"); } } Map<Object, Object> config = builder.buildConfig(); config.putAll(overridingProps()); configs.add(config); } return configs; }
Example #3
Source File: HttpBridgeTlsST.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test void testSendSimpleMessageTls() throws Exception { String topicName = KafkaTopicUtils.generateRandomNameOfTopic(); // Create topic KafkaTopicResource.topic(CLUSTER_NAME, topicName).done(); JsonObject records = BridgeUtils.generateHttpMessages(MESSAGE_COUNT); JsonObject response = BridgeUtils.sendMessagesHttpRequest(records, bridgeHost, bridgePort, topicName, client); KafkaBridgeUtils.checkSendResponse(response, MESSAGE_COUNT); BasicExternalKafkaClient basicExternalKafkaClient = new BasicExternalKafkaClient.Builder() .withTopicName(topicName) .withNamespaceName(NAMESPACE) .withClusterName(CLUSTER_NAME) .withMessageCount(MESSAGE_COUNT) .withSecurityProtocol(SecurityProtocol.SSL) .withKafkaUsername(USER_NAME) .withConsumerGroupName(CONSUMER_GROUP_NAME + "-" + rng.nextInt(Integer.MAX_VALUE)) .build(); assertThat(basicExternalKafkaClient.receiveMessagesTls(), is(MESSAGE_COUNT)); }
Example #4
Source File: CacheUtils.java From kcache with Apache License 2.0 | 6 votes |
/** * Get a new instance of an SSL KafkaCache and initialize it. */ public static Cache<String, String> createAndInitSSLKafkaCacheInstance( String bootstrapServers, Map<String, Object> sslConfigs, boolean requireSSLClientAuth) throws CacheInitializationException { Properties props = new Properties(); props.put(KafkaCacheConfig.KAFKACACHE_SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SSL.toString()); props.put(KafkaCacheConfig.KAFKACACHE_SSL_TRUSTSTORE_LOCATION_CONFIG, sslConfigs.get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG)); props.put(KafkaCacheConfig.KAFKACACHE_SSL_TRUSTSTORE_PASSWORD_CONFIG, ((Password) sslConfigs.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)).value()); if (requireSSLClientAuth) { props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEYSTORE_LOCATION_CONFIG, sslConfigs.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG)); props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEYSTORE_PASSWORD_CONFIG, ((Password) sslConfigs.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG)).value()); props.put(KafkaCacheConfig.KAFKACACHE_SSL_KEY_PASSWORD_CONFIG, ((Password) sslConfigs.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG)).value()); } Cache<String, String> inMemoryCache = new InMemoryCache<>(); return createAndInitKafkaCacheInstance(bootstrapServers, inMemoryCache, props); }
Example #5
Source File: TestStreamProcessor.java From samza with Apache License 2.0 | 6 votes |
private void initProducer(String bootstrapServer) { producer = TestUtils.createProducer( bootstrapServer, 1, 60 * 1000L, 1024L * 1024L, 0, 0L, 5 * 1000L, SecurityProtocol.PLAINTEXT, null, Option$.MODULE$.<Properties>apply(new Properties()), new StringSerializer(), new ByteArraySerializer(), Option$.MODULE$.<Properties>apply(new Properties())); }
Example #6
Source File: KafkaWriter.java From doctorkafka with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { CommandLine commandLine = parseCommandLine(args); String zkUrl = commandLine.getOptionValue(ZOOKEEPER); String topic = commandLine.getOptionValue(TOPIC); int numMessages = Integer.parseInt(commandLine.getOptionValue(NUM_MESSAGES)); Random random = new Random(); Properties props = OperatorUtil.createKafkaProducerProperties(zkUrl, SecurityProtocol.PLAINTEXT); KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<>(props); byte[] key = new byte[16]; byte[] data = new byte[1024]; for (int i = 0; i < numMessages; i++) { for (int j = 0; j < data.length; j++) { data[j] = (byte)random.nextInt(); } ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>( topic, 0, System.currentTimeMillis(), key, data); Future<RecordMetadata> future = kafkaProducer.send(producerRecord); future.get(); if (i % 100 == 0) { System.out.println("Have wrote " + i + " messages to kafka"); } } kafkaProducer.close(); }
Example #7
Source File: OperatorUtil.java From doctorkafka with Apache License 2.0 | 6 votes |
public static Properties createKafkaConsumerProperties(String zkUrl, String consumerGroupName, SecurityProtocol securityProtocol, Map<String, String> consumerConfigs) { String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol); Properties props = new Properties(); props.put(KafkaUtils.BOOTSTRAP_SERVERS, bootstrapBrokers); props.put("group.id", consumerGroupName); props.put("enable.auto.commit", "true"); props.put("auto.commit.interval.ms", "1000"); props.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); if (consumerConfigs != null) { for (Map.Entry<String, String> entry : consumerConfigs.entrySet()) { props.put(entry.getKey(), entry.getValue()); } } return props; }
Example #8
Source File: KafkaAvroPublisher.java From doctorkafka with Apache License 2.0 | 6 votes |
public KafkaAvroPublisher(String zkUrl, String topic, String statsProducerPropertiesFile) { this.destTopic = topic; Properties statsProducerProperties = new Properties(); Map<String, Object> keyValueMap = new HashMap<>(); try { if (statsProducerPropertiesFile != null) { statsProducerProperties.load(new FileInputStream(statsProducerPropertiesFile)); for (String propertyName : statsProducerProperties.stringPropertyNames()) { keyValueMap.put(propertyName, statsProducerProperties.get(propertyName)); } } } catch (IOException e) { LOG.error("Failed to load configuration file {}", statsProducerPropertiesFile, e); } // set the security protocol based on SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT; if (keyValueMap.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) { String secStr = keyValueMap.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).toString(); securityProtocol = Enum.valueOf(SecurityProtocol.class, secStr); } Properties producerProperties = OperatorUtil.createKafkaProducerProperties(zkUrl, securityProtocol); for (Map.Entry<String, Object> entry: keyValueMap.entrySet()) { producerProperties.put(entry.getKey(), entry.getValue()); } this.kafkaProducer = new KafkaProducer<>(producerProperties); }
Example #9
Source File: CCKafkaClientsIntegrationTestHarness.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
protected void setSecurityConfigs(Properties clientProps, String certAlias) { SecurityProtocol protocol = securityProtocol(); if (protocol == SecurityProtocol.SSL) { File trustStoreFile = trustStoreFile(); if (trustStoreFile == null) { throw new AssertionError("ssl set but no trust store provided"); } clientProps.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, protocol.name); clientProps.setProperty(KafkaConfig.SslEndpointIdentificationAlgorithmProp(), ""); try { clientProps.putAll(TestSslUtils.createSslConfig(true, true, Mode.CLIENT, trustStoreFile, certAlias)); } catch (Exception e) { throw new IllegalStateException(e); } } }
Example #10
Source File: ConnectST.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
void testConnectAuthorizationWithWeirdUserName(String userName, SecurityProtocol securityProtocol) { String topicName = TOPIC_NAME + rng.nextInt(Integer.MAX_VALUE); KafkaTopicResource.topic(CLUSTER_NAME, topicName).done(); String connectorPodName = kubeClient().listPodsByPrefixInName(CLUSTER_NAME + "-connect").get(0).getMetadata().getName(); KafkaConnectorResource.kafkaConnector(CLUSTER_NAME) .editSpec() .withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector") .addToConfig("topics", topicName) .addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH) .endSpec().done(); BasicExternalKafkaClient basicExternalKafkaClient = new BasicExternalKafkaClient.Builder() .withNamespaceName(NAMESPACE) .withClusterName(CLUSTER_NAME) .withKafkaUsername(userName) .withMessageCount(MESSAGE_COUNT) .withSecurityProtocol(securityProtocol) .withTopicName(topicName) .build(); assertThat(basicExternalKafkaClient.sendMessagesTls(), is(MESSAGE_COUNT)); KafkaConnectUtils.waitForMessagesInKafkaConnectFileSink(connectorPodName, Constants.DEFAULT_SINK_FILE_PATH); }
Example #11
Source File: HttpBridgeScramShaST.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
@Test void testSendSimpleMessageTlsScramSha() throws Exception { int messageCount = 50; String topicName = "topic-simple-send-" + new Random().nextInt(Integer.MAX_VALUE); // Create topic KafkaTopicResource.topic(CLUSTER_NAME, topicName).done(); JsonObject records = BridgeUtils.generateHttpMessages(messageCount); JsonObject response = BridgeUtils.sendMessagesHttpRequest(records, bridgeHost, bridgePort, topicName, client); KafkaBridgeUtils.checkSendResponse(response, messageCount); BasicExternalKafkaClient kafkaClient = new BasicExternalKafkaClient.Builder() .withTopicName(topicName) .withNamespaceName(NAMESPACE) .withClusterName(CLUSTER_NAME) .withKafkaUsername(USER_NAME) .withMessageCount(messageCount) .withSecurityProtocol(SecurityProtocol.SASL_SSL) .build(); assertThat(kafkaClient.receiveMessagesTls(), is(messageCount)); }
Example #12
Source File: KafkaClientConfigUtil.java From kafka-webview with MIT License | 6 votes |
/** * If SASL is configured for this cluster, apply the settings. * @param clusterConfig Cluster configuration definition to source values from. * @param config Config map to apply settings to. */ private void applySaslSettings(final ClusterConfig clusterConfig, final Map<String, Object> config) { // If we're using SSL, we've already configured everything for SASL too... if (!clusterConfig.isUseSasl()) { return; } // If not using SSL if (clusterConfig.isUseSsl()) { // SASL+SSL config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_SSL.name); // Keystore and keystore password not required if using SASL+SSL config.remove(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG); config.remove(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG); } else { // Just SASL PLAINTEXT config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); } config.put(SaslConfigs.SASL_MECHANISM, clusterConfig.getSaslMechanism()); config.put(SaslConfigs.SASL_JAAS_CONFIG, clusterConfig.getSaslJaas()); }
Example #13
Source File: ClusterStatusSASLTest.java From common-docker with Apache License 2.0 | 6 votes |
@Test(timeout = 120000) public void isKafkaReadyWithSASLAndSSL() throws Exception { Properties clientSecurityProps = kafka.getClientSecurityConfig(); Map<String, String> config = Utils.propsToStringMap(clientSecurityProps); config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker (SecurityProtocol.SASL_SSL)); // Set password and enabled protocol as the Utils.propsToStringMap just returns toString() // representations and these properties don't have a valid representation. Password trustStorePassword = (Password) clientSecurityProps.get("ssl.truststore.password"); config.put("ssl.truststore.password", trustStorePassword.value()); config.put("ssl.enabled.protocols", "TLSv1.2"); assertThat(ClusterStatus.isKafkaReady(config, 3, 10000)).isTrue(); }
Example #14
Source File: TestStreamProcessor.java From samza with Apache License 2.0 | 5 votes |
private void initConsumer(String bootstrapServer) { consumer = TestUtils.createConsumer( bootstrapServer, "group", "earliest", 4096L, "org.apache.kafka.clients.consumer.RangeAssignor", 30000, SecurityProtocol.PLAINTEXT, Option$.MODULE$.<File>empty(), Option$.MODULE$.<Properties>empty(), new StringDeserializer(), new ByteArrayDeserializer(), Option$.MODULE$.<Properties>empty()); }
Example #15
Source File: OauthExternalKafkaClient.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
@Override public int sendMessagesTls(long timeoutMs) { String clientName = "sender-ssl" + clusterName; CompletableFuture<Integer> resultPromise = new CompletableFuture<>(); IntPredicate msgCntPredicate = x -> x == messageCount; String caCertName = this.caCertName == null ? KafkaResource.getKafkaExternalListenerCaCertName(namespaceName, clusterName) : this.caCertName; LOGGER.info("Going to use the following CA certificate: {}", caCertName); KafkaClientProperties properties = this.clientProperties; if (properties == null || properties.getProperties().isEmpty()) { properties = new KafkaClientProperties.KafkaClientPropertiesBuilder() .withNamespaceName(namespaceName) .withClusterName(clusterName) .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName)) .withKeySerializerConfig(StringSerializer.class) .withValueSerializerConfig(StringSerializer.class) .withCaSecretName(caCertName) .withKafkaUsername(kafkaUsername) .withSecurityProtocol(SecurityProtocol.SASL_SSL) .withClientIdConfig(kafkaUsername + "-producer") .withSaslMechanism(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM) .withSaslLoginCallbackHandlerClass() .withSharedProperties() .withSaslJassConfigAndTls(clientId, clientSecretName, oauthTokenEndpointUri) .build(); } try (Producer tlsProducer = new Producer(properties, resultPromise, msgCntPredicate, topicName, clientName, partition)) { tlsProducer.getVertx().deployVerticle(tlsProducer); return tlsProducer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { e.printStackTrace(); throw new WaitException(e); } }
Example #16
Source File: OauthExternalKafkaClient.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
@Override public int sendMessagesPlain(long timeoutMs) { String clientName = "sender-plain-" + clusterName; CompletableFuture<Integer> resultPromise = new CompletableFuture<>(); IntPredicate msgCntPredicate = x -> x == messageCount; KafkaClientProperties properties = this.clientProperties; if (properties == null || properties.getProperties().isEmpty()) { properties = new KafkaClientProperties.KafkaClientPropertiesBuilder() .withNamespaceName(namespaceName) .withClusterName(clusterName) .withSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName)) .withKeySerializerConfig(StringSerializer.class) .withValueSerializerConfig(StringSerializer.class) .withClientIdConfig(kafkaUsername + "-producer") .withSaslMechanism(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM) .withSaslLoginCallbackHandlerClass() .withSharedProperties() .withSaslJassConfig(this.clientId, this.clientSecretName, this.oauthTokenEndpointUri) .build(); } try (Producer plainProducer = new Producer(properties, resultPromise, msgCntPredicate, topicName, clientName, partition)) { plainProducer.getVertx().deployVerticle(plainProducer); return plainProducer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { e.printStackTrace(); throw new WaitException(e); } }
Example #17
Source File: ClusterLoadBalancer.java From doctorkafka with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { CommandLine commandLine = parseCommandLine(args); String configFilePath = commandLine.getOptionValue(CONFIG); String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER); String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC); String clusterZk = commandLine.getOptionValue(CLUSTER_ZOOKEEPER); long seconds = Long.parseLong(commandLine.getOptionValue(SECONDS)); boolean onlyOne = commandLine.hasOption(ONLY_ONE); ReplicaStatsManager replicaStatsManager = new ReplicaStatsManager(new DoctorKafkaConfig(configFilePath)); replicaStatsManager.readPastReplicaStats(brokerStatsZk, SecurityProtocol.PLAINTEXT, brokerStatsTopic, seconds); Set<String> zkUrls = replicaStatsManager.getClusterZkUrls(); if (!zkUrls.contains(clusterZk)) { LOG.error("Failed to find zkurl {} in configuration", clusterZk); return; } DoctorKafkaClusterConfig clusterConf = replicaStatsManager.getConfig().getClusterConfigByZkUrl(clusterZk); KafkaCluster kafkaCluster = replicaStatsManager.getClusters().get(clusterZk); KafkaClusterManager clusterManager = new KafkaClusterManager( clusterZk, kafkaCluster, clusterConf, replicaStatsManager.getConfig(), null, null, replicaStatsManager); List<KafkaBroker> highTrafficBrokers = clusterManager.getHighTrafficBroker(); if (onlyOne && highTrafficBrokers.size() > 0) { KafkaBroker broker = highTrafficBrokers.get(0); highTrafficBrokers.clear(); highTrafficBrokers.add(broker); } String assignPlan = clusterManager.getWorkloadBalancingPlanInJson(highTrafficBrokers); LOG.info("Reassignment Plan : {}", assignPlan); }
Example #18
Source File: RangerKafkaAuthorizer.java From ranger with Apache License 2.0 | 5 votes |
@Override public void configure(Map<String, ?> configs) { RangerBasePlugin me = rangerPlugin; if (me == null) { synchronized(RangerKafkaAuthorizer.class) { me = rangerPlugin; if (me == null) { try { // Possible to override JAAS configuration which is used by Ranger, otherwise // SASL_PLAINTEXT is used, which force Kafka to use 'sasl_plaintext.KafkaServer', // if it's not defined, then it reverts to 'KafkaServer' configuration. final Object jaasContext = configs.get("ranger.jaas.context"); final String listenerName = (jaasContext instanceof String && StringUtils.isNotEmpty((String) jaasContext)) ? (String) jaasContext : SecurityProtocol.SASL_PLAINTEXT.name(); final String saslMechanism = SaslConfigs.GSSAPI_MECHANISM; JaasContext context = JaasContext.loadServerContext(new ListenerName(listenerName), saslMechanism, configs); LoginManager loginManager = LoginManager.acquireLoginManager(context, saslMechanism, KerberosLogin.class, configs); Subject subject = loginManager.subject(); UserGroupInformation ugi = MiscUtil .createUGIFromSubject(subject); if (ugi != null) { MiscUtil.setUGILoginUser(ugi, subject); } logger.info("LoginUser=" + MiscUtil.getUGILoginUser()); } catch (Throwable t) { logger.error("Error getting principal.", t); } me = rangerPlugin = new RangerBasePlugin("kafka", "kafka"); } } } logger.info("Calling plugin.init()"); rangerPlugin.init(); auditHandler = new RangerKafkaAuditHandler(); rangerPlugin.setResultProcessor(auditHandler); }
Example #19
Source File: URPChecker.java From doctorkafka with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { CommandLine commandLine = parseCommandLine(args); String zookeeper = commandLine.getOptionValue(ZOOKEEPER); ZkUtils zkUtils = KafkaUtils.getZkUtils(zookeeper); Seq<String> topicsSeq = zkUtils.getAllTopics(); List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq); scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq); Map<String, Integer> replicationFactors = new HashMap<>(); Map<String, Integer> partitionCounts = new HashMap<>(); topics.stream().forEach(topic -> { int partitionCount = partitionAssignments.get(topic).get().size(); int factor = partitionAssignments.get(topic).get().head()._2().size(); partitionCounts.put(topic, partitionCount); replicationFactors.put(topic, factor); }); List<PartitionInfo> urps = KafkaClusterManager.getUnderReplicatedPartitions( zookeeper, SecurityProtocol.PLAINTEXT, null, topics, partitionAssignments, replicationFactors, partitionCounts); for (PartitionInfo partitionInfo : urps) { LOG.info("under-replicated : {}", partitionInfo); } }
Example #20
Source File: ClusterTestHarness.java From kareldb with Apache License 2.0 | 5 votes |
protected KafkaConfig getKafkaConfig(int brokerId) { final Option<java.io.File> noFile = scala.Option.apply(null); final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null); Properties props = TestUtils.createBrokerConfig( brokerId, zkConnect, false, false, TestUtils.RandomPort(), noInterBrokerSecurityProtocol, noFile, EMPTY_SASL_PROPERTIES, true, false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), false, TestUtils.RandomPort(), Option.<String>empty(), 1, false, 1, (short) 1 ); injectProperties(props); return KafkaConfig.fromProps(props); }
Example #21
Source File: KafkaST.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
@Test @Tag(ACCEPTANCE) @Tag(LOADBALANCER_SUPPORTED) @Tag(EXTERNAL_CLIENTS_USED) void testLoadBalancerTls() { KafkaResource.kafkaEphemeral(CLUSTER_NAME, 3) .editSpec() .editKafka() .editListeners() .withNewKafkaListenerExternalLoadBalancer() .endKafkaListenerExternalLoadBalancer() .endListeners() .withConfig(singletonMap("default.replication.factor", 3)) .endKafka() .endSpec() .done(); KafkaUserResource.tlsUser(CLUSTER_NAME, USER_NAME).done(); ServiceUtils.waitUntilAddressIsReachable(kubeClient().getService(KafkaResources.externalBootstrapServiceName(CLUSTER_NAME)).getStatus().getLoadBalancer().getIngress().get(0).getHostname()); BasicExternalKafkaClient basicExternalKafkaClient = new BasicExternalKafkaClient.Builder() .withTopicName(TOPIC_NAME) .withNamespaceName(NAMESPACE) .withClusterName(CLUSTER_NAME) .withMessageCount(MESSAGE_COUNT) .withKafkaUsername(USER_NAME) .withSecurityProtocol(SecurityProtocol.SSL) .build(); basicExternalKafkaClient.verifyProducedAndConsumedMessages( basicExternalKafkaClient.sendMessagesTls(), basicExternalKafkaClient.receiveMessagesTls() ); }
Example #22
Source File: BrokerStatsFilter.java From doctorkafka with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { CommandLine commandLine = parseCommandLine(args); String brokerStatsZk = commandLine.getOptionValue(BROKERSTATS_ZOOKEEPER); String brokerStatsTopic = commandLine.getOptionValue(BROKERSTATS_TOPIC); String brokerName = commandLine.getOptionValue(BROKERNAME); Set<String> brokerNames = new HashSet<>(); brokerNames.add(brokerName); KafkaConsumer<byte[], byte[]> kafkaConsumer = KafkaUtils.getKafkaConsumer(brokerStatsZk, "org.apache.kafka.common.serialization.ByteArrayDeserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer", 1, SecurityProtocol.PLAINTEXT, null); long startTimestampInMillis = System.currentTimeMillis() - 86400 * 1000L; Map<TopicPartition, Long> offsets = ReplicaStatsUtil.getProcessingStartOffsets( kafkaConsumer, brokerStatsTopic, startTimestampInMillis); kafkaConsumer.unsubscribe(); kafkaConsumer.assign(offsets.keySet()); Map<TopicPartition, Long> latestOffsets = kafkaConsumer.endOffsets(offsets.keySet()); KafkaUtils.closeConsumer(brokerStatsZk); Map<Long, BrokerStats> brokerStatsMap = new TreeMap<>(); for (TopicPartition topicPartition : offsets.keySet()) { LOG.info("Start processing {}", topicPartition); long startOffset = offsets.get(topicPartition); long endOffset = latestOffsets.get(topicPartition); List<BrokerStats> statsList = processOnePartition(brokerStatsZk, topicPartition, startOffset, endOffset, brokerNames); for (BrokerStats brokerStats : statsList) { brokerStatsMap.put(brokerStats.getTimestamp(), brokerStats); } LOG.info("Finished processing {}, retrieved {} records", topicPartition, statsList.size()); } for (Map.Entry<Long, BrokerStats> entry: brokerStatsMap.entrySet()) { System.out.println(entry.getKey() + " : " + entry.getValue()); } }
Example #23
Source File: BasicExternalKafkaClient.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
/** * Send messages to external entrypoint of the cluster with PLAINTEXT security protocol setting * @return sent message count */ public int sendMessagesPlain(long timeoutMs) { String clientName = "sender-plain-" + this.clusterName; CompletableFuture<Integer> resultPromise = new CompletableFuture<>(); IntPredicate msgCntPredicate = x -> x == messageCount; KafkaClientProperties properties = this.clientProperties; if (properties == null || properties.getProperties().isEmpty()) { properties = new KafkaClientProperties.KafkaClientPropertiesBuilder() .withNamespaceName(namespaceName) .withClusterName(clusterName) .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName)) .withKeySerializerConfig(StringSerializer.class) .withValueSerializerConfig(StringSerializer.class) .withClientIdConfig("producer-plain-" + new Random().nextInt(Integer.MAX_VALUE)) .withSecurityProtocol(SecurityProtocol.PLAINTEXT) .withSharedProperties() .build(); } try (Producer plainProducer = new Producer(properties, resultPromise, msgCntPredicate, topicName, clientName, partition)) { plainProducer.getVertx().deployVerticle(plainProducer); return plainProducer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { e.printStackTrace(); throw new WaitException(e); } }
Example #24
Source File: BasicExternalKafkaClient.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
/** * Receive messages to external entrypoint of the cluster with PLAINTEXT security protocol setting * @return received message count */ public int receiveMessagesPlain(long timeoutMs) { String clientName = "receiver-plain-" + clusterName; CompletableFuture<Integer> resultPromise = new CompletableFuture<>(); IntPredicate msgCntPredicate = x -> x == messageCount; KafkaClientProperties properties = this.clientProperties; if (properties == null || properties.getProperties().isEmpty()) { properties = new KafkaClientProperties.KafkaClientPropertiesBuilder() .withNamespaceName(namespaceName) .withClusterName(clusterName) .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName)) .withKeyDeserializerConfig(StringDeserializer.class) .withValueDeserializerConfig(StringDeserializer.class) .withClientIdConfig("consumer-plain-" + new Random().nextInt(Integer.MAX_VALUE)) .withAutoOffsetResetConfig(OffsetResetStrategy.EARLIEST) .withGroupIdConfig(consumerGroup) .withSecurityProtocol(SecurityProtocol.PLAINTEXT) .withSharedProperties() .build(); } try (Consumer plainConsumer = new Consumer(properties, resultPromise, msgCntPredicate, this.topicName, clientName)) { plainConsumer.getVertx().deployVerticle(plainConsumer); return plainConsumer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { e.printStackTrace(); throw new WaitException(e); } }
Example #25
Source File: EmbeddedBrokerBuilder.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 5 votes |
public EmbeddedBrokerBuilder enable(SecurityProtocol protocol) { switch (protocol) { case PLAINTEXT: enablePlaintext(); break; case SSL: enableSsl(); break; default: throw new IllegalStateException("unhandled: " + protocol); } return this; }
Example #26
Source File: DoctorKafkaActionReporter.java From doctorkafka with Apache License 2.0 | 5 votes |
public DoctorKafkaActionReporter(String zkUrl, SecurityProtocol securityProtocol, String topic, Map<String, String> producerConfigs) { this.topic = topic; String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol); Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers); props.put(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.RETRIES_CONFIG, 3); props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1638400); props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); for (Map.Entry<String, String> entry : producerConfigs.entrySet()) { props.put(entry.getKey(), entry.getValue()); } this.kafkaProducer = new KafkaProducer<>(props); }
Example #27
Source File: KafkaUtils.java From doctorkafka with Apache License 2.0 | 5 votes |
public static KafkaConsumer<byte[], byte[]> getKafkaConsumer(String zkUrl, SecurityProtocol securityProtocol, Map<String, String> consumerConfigs) { return getKafkaConsumer(zkUrl, "org.apache.kafka.common.serialization.ByteArrayDeserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer", DEFAULT_MAX_POOL_RECORDS, securityProtocol, consumerConfigs); }
Example #28
Source File: OperatorUtil.java From doctorkafka with Apache License 2.0 | 5 votes |
public static Properties createKafkaProducerProperties(String zkUrl, SecurityProtocol securityProtocol) { String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol); Properties props = new Properties(); props.put(KafkaUtils.BOOTSTRAP_SERVERS, bootstrapBrokers); props.put(ProducerConfig.ACKS_CONFIG, "1"); props.put(ProducerConfig.RETRIES_CONFIG, 0); props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1638400); props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 3554432); props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer"); return props; }
Example #29
Source File: BrokerStatsReader.java From doctorkafka with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { CommandLine commandLine = parseCommandLine(args); String zkUrl = commandLine.getOptionValue(ZOOKEEPER); String statsTopic = commandLine.getOptionValue(STATS_TOPIC); String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, SecurityProtocol.PLAINTEXT); Properties props = new Properties(); props.put(KafkaUtils.BOOTSTRAP_SERVERS, bootstrapBrokers); props.put("group.id", "broker_statsreader_group"); props.put("enable.auto.commit", "false"); props.put("auto.commit.interval.ms", "1000"); props.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer"); Schema schema = BrokerStats.getClassSchema(); KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(statsTopic)); while (true) { ConsumerRecords<byte[], byte[]> records = consumer.poll(100); for (ConsumerRecord<byte[], byte[]> record : records) { System.out.printf("offset = %d, key.size = %d, value.size = %s%n", record.offset(), record.key().length, record.value().length); try { BinaryDecoder binaryDecoder = avroDecoderFactory.binaryDecoder(record.value(), null); SpecificDatumReader<BrokerStats> reader = new SpecificDatumReader<>(schema); BrokerStats result = new BrokerStats(); reader.read(result, binaryDecoder); System.out.println(result); } catch (Exception e) { LOG.error("Fail to decode an message", e); } } } }
Example #30
Source File: ClusterStatusTest.java From common-docker with Apache License 2.0 | 5 votes |
@Test(timeout = 120000) public void isKafkaReadyFailWithLessBrokers() throws Exception { try { Map<String, String> config = new HashMap<>(); config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker (SecurityProtocol.PLAINTEXT)); assertThat(ClusterStatus.isKafkaReady(config, 5, 10000)) .isFalse(); } catch (Exception e) { fail("Unexpected error. " + e.getMessage()); } }