Java Code Examples for org.apache.kafka.clients.CommonClientConfigs

The following examples show how to use org.apache.kafka.clients.CommonClientConfigs. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: doctorkafka   Source File: KafkaAvroPublisher.java    License: Apache License 2.0 6 votes vote down vote up
public KafkaAvroPublisher(String zkUrl, String topic, String statsProducerPropertiesFile) {
  this.destTopic = topic;
  Properties statsProducerProperties = new Properties();
  Map<String, Object> keyValueMap = new HashMap<>();
  try {
    if (statsProducerPropertiesFile != null) {
      statsProducerProperties.load(new FileInputStream(statsProducerPropertiesFile));
      for (String propertyName : statsProducerProperties.stringPropertyNames()) {
        keyValueMap.put(propertyName, statsProducerProperties.get(propertyName));
      }
    }
  } catch (IOException e) {
    LOG.error("Failed to load configuration file {}", statsProducerPropertiesFile, e);
  }
  // set the security protocol based on
  SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT;
  if (keyValueMap.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)) {
    String secStr = keyValueMap.get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG).toString();
    securityProtocol = Enum.valueOf(SecurityProtocol.class, secStr);
  }
  Properties producerProperties = OperatorUtil.createKafkaProducerProperties(zkUrl, securityProtocol);
  for (Map.Entry<String, Object> entry: keyValueMap.entrySet()) {
    producerProperties.put(entry.getKey(), entry.getValue());
  }
  this.kafkaProducer = new KafkaProducer<>(producerProperties);
}
 
Example 2
Source Project: kbear   Source File: ProducerProxy.java    License: Apache License 2.0 6 votes vote down vote up
public ProducerProxy(ConfigurationManager configurationManager, KafkaMetaManager metaManager,
        KafkaProducerConfig kafkaProducerConfig) {
    ObjectExtension.requireNonNull(configurationManager, "configurationManager");
    ObjectExtension.requireNonNull(metaManager, "metaManager");
    ObjectExtension.requireNonNull(kafkaProducerConfig, "kafkaProducerConfig");

    _clientId = kafkaProducerConfig.getProperties().getProperty(CommonClientConfigs.CLIENT_ID_CONFIG);
    if (StringExtension.isBlank(_clientId))
        _clientId = StringExtension.EMPTY;

    _metaManager = metaManager;
    _kafkaProducerConfig = kafkaProducerConfig;

    _addRemoveLock = new Object();
    _producerHolders = new ConcurrentHashMap<>();

    StringProperties stringProperties = new StringProperties(configurationManager);
    _destroyDelay = stringProperties.getIntProperty("kafka.producer-proxy.producer-destroy-delay", 60 * 1000,
            v -> v < 0 ? null : v);
    _executorService = Executors.newSingleThreadScheduledExecutor(r -> {
        Thread thread = new Thread(r, "kafka.producer-proxy.scheduled-executor");
        thread.setDaemon(true);
        return thread;
    });
}
 
Example 3
Source Project: kafka-webview   Source File: KafkaClientConfigUtil.java    License: MIT License 6 votes vote down vote up
/**
 * If SSL is configured for this cluster, apply the settings.
 * @param clusterConfig Cluster configuration definition to source values from.
 * @param config Config map to apply settings to.
 */
private void applySslSettings(final ClusterConfig clusterConfig, final Map<String, Object> config) {
    // Optionally configure SSL
    if (!clusterConfig.isUseSsl()) {
        return;
    }
    if (clusterConfig.isUseSasl()) {
        config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_SSL.name);
    } else {
        config.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SSL.name);

        // KeyStore and KeyStore password only needed if NOT using SASL
        config.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getKeyStoreFile());
        config.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, clusterConfig.getKeyStorePassword());
    }
    // Only put Trust properties if one is defined
    if (clusterConfig.getTrustStoreFile() != null) {
        config.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, keyStoreRootPath + "/" + clusterConfig.getTrustStoreFile());
        config.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, clusterConfig.getTrustStorePassword());
    }
}
 
Example 4
Source Project: micronaut-kafka   Source File: KafkaEmbedded.java    License: Apache License 2.0 6 votes vote down vote up
private void createTopics(int targetPort, Integer numPartitions) throws InterruptedException, java.util.concurrent.ExecutionException {
    List<String> topics = embeddedConfiguration.getTopics();

    if (LOG.isDebugEnabled()) {
        LOG.debug("Creating Kafka Topics in Embedded Kafka: {}", topics);
    }
    if (!topics.isEmpty()) {
        Properties properties = new Properties();
        properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, ("127.0.0.1:" + targetPort));
        AdminClient adminClient = AdminClient.create(properties);
        final CreateTopicsResult result = adminClient.createTopics(topics.stream().map(s ->
                new NewTopic(s, numPartitions, (short) 1)).collect(Collectors.toList())
        );
        result.all().get();

        if (LOG.isInfoEnabled()) {
            LOG.info("Created Kafka Topics in Embedded Kafka: {}", topics);
        }
    }
}
 
Example 5
Source Project: DataLink   Source File: TestKafkaUtils.java    License: Apache License 2.0 6 votes vote down vote up
private KafkaFactory.KafkaClientModel get(){
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.104.156.83:9092");
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
    props.put("acks", "all");
    props.put("retries", 0);
    props.put("batch.size", 16384);
    props.put("linger.ms", 1);
    props.put("buffer.memory", 33554432);
    props.put("sasl.jaas.config",
            "org.apache.kafka.common.security.plain.PlainLoginModule required username='kafka' password='kafka';");
    KafkaProducer<String, Byte[]> producer = new KafkaProducer<>(props);
    AdminClient client = AdminClient.create(props);

    KafkaFactory.KafkaClientModel kafkaClientModel = new KafkaFactory.KafkaClientModel(producer, client);
    return kafkaClientModel;
}
 
Example 6
Source Project: DataLink   Source File: TaskExceptionProbeImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void record(TaskExceptionProbeIndex index) {
    String bootMode = WorkerConfig.current().getString(WorkerConfig.WORKER_BOOT_MODE_CONFIG);
    if (index.isSendDirectly() && bootMode.equals("distributed")) {
        sendDirectly(index);
    } else {
        cache.put(index.getTaskId(), index.getExceptionInfo());
    }
    try {
        TaskExceptionInfo taskExceptionInfo = new TaskExceptionInfo();
        String clientId = WorkerConfig.current().getString(CommonClientConfigs.CLIENT_ID_CONFIG);//???
        taskExceptionInfo.setWorkerId(Long.valueOf(clientId));
        taskExceptionInfo.setTaskId(index.getTaskId());
        taskExceptionInfo.setExceptionDetail(index.getExceptionInfo());
        taskExceptionService.insert(taskExceptionInfo);
    } catch (Exception e) {
        logger.info("Record exception detail failed.", e);
    }
}
 
Example 7
Source Project: kafka-eagle   Source File: TestKafkaServiceImpl.java    License: Apache License 2.0 6 votes vote down vote up
public Map<TopicPartition, Long> getKafkaLogSize(String topic, Set<Integer> partitionids) {
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	Set<TopicPartition> tps = new HashSet<>();
	Map<Integer, Long> partitionOffset = new HashMap<Integer, Long>();
	for (int partitionid : partitionids) {
		TopicPartition tp = new TopicPartition(topic, partitionid);
		long offset = consumer.position(tp);
		partitionOffset.put(partitionid, offset);
	}

	System.out.println(partitionOffset.toString());

	if (consumer != null) {
		consumer.close();
	}
	return null;
}
 
Example 8
Source Project: kafka-eagle   Source File: KafkaServiceImpl.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Send mock message to kafka topic .
 */
public boolean mockMessage(String clusterAlias, String topic, String message) {
	Properties props = new Properties();
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getKafkaBrokerServer(clusterAlias));
	props.put(Kafka.KEY_SERIALIZER, StringSerializer.class.getCanonicalName());
	props.put(Kafka.VALUE_SERIALIZER, StringSerializer.class.getCanonicalName());
	props.put(Kafka.PARTITION_CLASS, KafkaPartitioner.class.getName());

	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(props, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(props, clusterAlias);
	}
	Producer<String, String> producer = new KafkaProducer<>(props);
	producer.send(new ProducerRecord<String, String>(topic, new Date().getTime() + "", message));
	producer.close();

	return true;
}
 
Example 9
/**
 * Get a producer connected to the given broker.
 *
 * @param broker The broker to connect to.
 * @return A producer connected to the given broker.
 */
public static KafkaProducer<String, String> producerFor(CCEmbeddedBroker broker) {
  String bootstrap = broker.plaintextAddr();
  if (bootstrap == null) {
    bootstrap = broker.sslAddr();
  }

  Properties props = new Properties();
  props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrap);
  props.put(ProducerConfig.ACKS_CONFIG, "all");
  props.put(ProducerConfig.RETRIES_CONFIG, 0);
  props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
  props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
  props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 1024 * 1024);
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

  return new KafkaProducer<>(props);
}
 
Example 10
/**
 * Get a consumer connected to the given broker.
 *
 * @param broker The broker to connect to.
 * @return A consumer connected to the given broker.
 */
public static KafkaConsumer<String, String> consumerFor(CCEmbeddedBroker broker) {
  String bootstrap = broker.plaintextAddr();
  if (bootstrap == null) {
    bootstrap = broker.sslAddr();
  }

  Properties props = new Properties();
  props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrap);
  props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
  props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
  props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
  props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

  return new KafkaConsumer<>(props);
}
 
Example 11
protected void setSecurityConfigs(Properties clientProps, String certAlias) {
  SecurityProtocol protocol = securityProtocol();
  if (protocol == SecurityProtocol.SSL) {
    File trustStoreFile = trustStoreFile();
    if (trustStoreFile == null) {
      throw new AssertionError("ssl set but no trust store provided");
    }
    clientProps.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, protocol.name);
    clientProps.setProperty(KafkaConfig.SslEndpointIdentificationAlgorithmProp(), "");
    try {
      clientProps.putAll(TestSslUtils.createSslConfig(true, true, Mode.CLIENT, trustStoreFile, certAlias));
    } catch (Exception e) {
      throw new IllegalStateException(e);
    }
  }
}
 
Example 12
@Override
public Properties overridingProps() {
  Properties props = new Properties();
  int port = CCKafkaTestUtils.findLocalPort();
  // We need to convert all the properties to the Cruise Control properties.
  setSecurityConfigs(props, "producer");
  for (String configName : ProducerConfig.configNames()) {
    Object value = props.get(configName);
    if (value != null) {
      props.remove(configName);
      props.put(appendPrefix(configName), value);
    }
  }
  props.setProperty(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, CruiseControlMetricsReporter.class.getName());
  props.setProperty(KafkaConfig.ListenersProp(), "SSL://127.0.0.1:" + port);
  props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), "127.0.0.1:" + port);
  props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG), SecurityProtocol.SSL.name);
  props.setProperty(CRUISE_CONTROL_METRICS_REPORTER_INTERVAL_MS_CONFIG, "100");
  props.setProperty(CRUISE_CONTROL_METRICS_TOPIC_CONFIG, TOPIC);
  props.setProperty(KafkaConfig.LogFlushIntervalMessagesProp(), "1");
  props.setProperty(KafkaConfig.OffsetsTopicReplicationFactorProp(), "1");
  props.setProperty(KafkaConfig.DefaultReplicationFactorProp(), "2");
  return props;
}
 
Example 13
@Override
public Properties overridingProps() {
    Properties props = new Properties();
    int port = CCKafkaTestUtils.findLocalPort();
    props.setProperty(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, CruiseControlMetricsReporter.class.getName());
    props.setProperty(KafkaConfig.ListenersProp(), "PLAINTEXT://127.0.0.1:" + port);
    props.setProperty(CruiseControlMetricsReporterConfig.config(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG),
            "127.0.0.1:" + port);
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_REPORTER_INTERVAL_MS_CONFIG, "100");
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_CONFIG, TOPIC);
    // configure metrics topic auto-creation by the metrics reporter
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_AUTO_CREATE_CONFIG, "true");
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_AUTO_CREATE_TIMEOUT_MS_CONFIG, "5000");
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_AUTO_CREATE_RETRIES_CONFIG, "1");
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_NUM_PARTITIONS_CONFIG, "1");
    props.setProperty(CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_REPLICATION_FACTOR_CONFIG, "1");
    // disable topic auto-creation to leave the metrics reporter to create the metrics topic
    props.setProperty(KafkaConfig.AutoCreateTopicsEnableProp(), "false");
    props.setProperty(KafkaConfig.LogFlushIntervalMessagesProp(), "1");
    props.setProperty(KafkaConfig.OffsetsTopicReplicationFactorProp(), "1");
    props.setProperty(KafkaConfig.DefaultReplicationFactorProp(), "2");
    props.setProperty(KafkaConfig.NumPartitionsProp(), "2");
    return props;
}
 
Example 14
Source Project: DBus   Source File: ZkPropertiesProvider.java    License: Apache License 2.0 6 votes vote down vote up
private void addSecurityConf(Properties properties) throws Exception {
    if (isSecurity == null) {
        synchronized (this.getClass()) {
            if (isSecurity == null) {
                String path = Constants.COMMON_ROOT + "/" + Constants.GLOBAL_SECURITY_CONF;
                if (zookeeper.isExists(path)) {
                    Properties pro = zookeeper.getProperties(path);
                    if (StringUtils.equals(pro.getProperty("AuthenticationAndAuthorization"), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
                        isSecurity = true;
                    } else {
                        isSecurity = false;
                    }
                } else {
                    isSecurity = false;
                }
            }
        }
    }
    if (isSecurity) {
        properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    }
}
 
Example 15
Source Project: DBus   Source File: ZkHelper.java    License: Apache License 2.0 6 votes vote down vote up
public Properties loadKafkaConsumerConf() {
    Properties consumerProps;
    String path = Constants.TOPOLOGY_ROOT + "/" + topologyId + "-" + LOG_PROCESSOR + "/consumer.properties";
    try {
        consumerProps = zkService.getProperties(path);
    } catch (Exception e) {
        e.printStackTrace();
        logger.error("load consumer.properties error.", e);
        throw new RuntimeException("load consumer.properties error", e);
    }

    Properties props = loadSecurityConf();
    if (props != null) {
        if (StringUtils.equals(props.getProperty("AuthenticationAndAuthorization"), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
            logger.info("consumer security_protocol is enabled!  security_protocol_config is: SASL_PLAINTEXT");
        } else if (StringUtils.equals(props.getProperty("AuthenticationAndAuthorization"), "none")) {
            logger.info("consumer security_protocol is disabled!");
        }
    }

    return consumerProps;
}
 
Example 16
Source Project: common-docker   Source File: ClusterStatusSASLTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyWithSASLAndSSLUsingZK() throws Exception {
  Properties clientSecurityProps = kafka.getClientSecurityConfig();

  boolean zkReady = ClusterStatus.isZookeeperReady(this.kafka.getZookeeperConnectString(), 30000);
  if (!zkReady) {
    throw new RuntimeException(
        "Could not reach zookeeper " + this.kafka.getZookeeperConnectString());
  }
  Map<String, String> endpoints = ClusterStatus.getKafkaEndpointFromZookeeper(
      this.kafka.getZookeeperConnectString(),
      30000
  );

  String bootstrap_broker = endpoints.get("SASL_SSL");
  Map<String, String> config = Utils.propsToStringMap(clientSecurityProps);
  config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrap_broker);

  // Set password and enabled protocol as the Utils.propsToStringMap just returns toString()
  // representations and these properties don't have a valid representation.
  Password trustStorePassword = (Password) clientSecurityProps.get("ssl.truststore.password");
  config.put("ssl.truststore.password", trustStorePassword.value());
  config.put("ssl.enabled.protocols", "TLSv1.2");

  assertThat(ClusterStatus.isKafkaReady(config, 3, 10000)).isTrue();
}
 
Example 17
Source Project: DBus   Source File: ProjectTopologyService.java    License: Apache License 2.0 6 votes vote down vote up
public void rerunTopology(String topologyCode, String ctrlMsg) {
    KafkaProducer<String, byte[]> producer = null;
    try {
        String topic = StringUtils.joinWith("_", topologyCode, "ctrl");
        Properties props = zkService.getProperties(KeeperConstants.KEEPER_CTLMSG_PRODUCER_CONF);
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        props.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        producer = new KafkaProducer<>(props);
        producer.send(new ProducerRecord<String, byte[]>(topic, ctrlMsg.getBytes()), new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
            }
        });
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (producer != null) producer.close();
    }
}
 
Example 18
Source Project: DBus   Source File: ZKHepler.java    License: Apache License 2.0 6 votes vote down vote up
public Properties loadSinkerConf(String confName) throws Exception {
    String path = sinkerRootPath + "/" + confName;
    try {
        Properties properties = zkService.getProperties(path);
        if (confName.equals(SinkerConstants.CONSUMER) || confName.equals(SinkerConstants.PRODUCER)) {
            Properties globalProp = zkService.getProperties(Constants.GLOBAL_PROPERTIES_ROOT);
            properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, globalProp.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
            if (isSecurityConf()) {
                properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
            }
        }
        logger.info("load zk properties -- {} : {}", path, properties);
        return properties;
    } catch (Exception e) {
        logger.error("load sinker zk node [{}] properties error.", confName, e);
        throw e;
    }
}
 
Example 19
Source Project: DBus   Source File: KafkaConsumerManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public KafkaConsumer<String, byte[]> obtainKafkaClient(String url) {
    KafkaConsumer<String, byte[]> consumer = null;
    try {
        Properties props = Optional.ofNullable(PropertiesUtils.copy(properties)).orElseGet(() -> {return new Properties();});
        props.setProperty("bootstrap.servers", url);
        props.setProperty("group.id", StringUtils.joinWith("-", props.getProperty("group.id"), suffix));
        props.setProperty("client.id", StringUtils.joinWith("-", props.getProperty("client.id"), suffix));
        if (StringUtils.equals(securityConf, Constants.SECURITY_CONFIG_TRUE_VALUE))
            props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        consumer = new KafkaConsumer<>(props);
        logger.info("consumer manager create kafka consumer. url:{}", url);
    } catch (Exception e) {
        logger.error("consumer manager create kafka consumer error. url:{}", url);
        throw new RuntimeException(e);
    }
    return consumer;
}
 
Example 20
Source Project: common-docker   Source File: ClusterStatusSASLTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyWithSASLAndSSL() throws Exception {
  Properties clientSecurityProps = kafka.getClientSecurityConfig();

  Map<String, String> config = Utils.propsToStringMap(clientSecurityProps);
  config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker
      (SecurityProtocol.SASL_SSL));

  // Set password and enabled protocol as the Utils.propsToStringMap just returns toString()
  // representations and these properties don't have a valid representation.
  Password trustStorePassword = (Password) clientSecurityProps.get("ssl.truststore.password");
  config.put("ssl.truststore.password", trustStorePassword.value());
  config.put("ssl.enabled.protocols", "TLSv1.2");

  assertThat(ClusterStatus.isKafkaReady(config, 3, 10000)).isTrue();
}
 
Example 21
@Override
public void beforeAll(@Observes @Initialized(ApplicationScoped.class) Object event) throws Exception {
    Properties properties = new Properties();
    properties.put(
            CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
            System.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
    );
    properties.put("connections.max.idle.ms", 10000);
    properties.put("request.timeout.ms", 5000);
    try (AdminClient client = AdminClient.create(properties)) {
        ListTopicsResult topics = client.listTopics();
        Set<String> names = topics.names().get();
        log.info("Kafka is running - {} ...", names);
    }
}
 
Example 22
Source Project: apicurio-registry   Source File: KafkaServiceInitializer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void beforeAll(@Observes @Initialized(ApplicationScoped.class) Object event) throws Exception {
    Properties properties = new Properties();
    properties.put(
            CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
            System.getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
    );
    properties.put("connections.max.idle.ms", 10000);
    properties.put("request.timeout.ms", 5000);
    try (AdminClient client = AdminClient.create(properties)) {
        ListTopicsResult topics = client.listTopics();
        Set<String> names = topics.names().get();
        log.info("Kafka is running - {} ...", names);
    }
}
 
Example 23
Source Project: kbear   Source File: ConsumerProxy.java    License: Apache License 2.0 5 votes vote down vote up
protected KafkaConsumerConfig constructConsumerConfig(KafkaConsumerConfig consumerConfig,
        ConsumerGroup consumerGroup, Topic topic, Cluster cluster) {
    KafkaConsumerConfig kafkaConsumerConfig = consumerConfig.clone();
    kafkaConsumerConfig.getProperties().setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
            cluster.getMeta().get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
    kafkaConsumerConfig.getProperties().setProperty(CommonClientConfigs.CLIENT_ID_CONFIG,
            constructClientId(consumerGroup.getId()));
    return kafkaConsumerConfig;
}
 
Example 24
Source Project: kbear   Source File: ProducerProxy.java    License: Apache License 2.0 5 votes vote down vote up
protected KafkaProducerConfig constructProducerConfig(KafkaProducerConfig producerConfig, Topic topic,
        Cluster cluster) {
    KafkaProducerConfig kafkaProducerConfig = producerConfig.clone();
    kafkaProducerConfig.getProperties().setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
            cluster.getMeta().get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
    kafkaProducerConfig.getProperties().setProperty(CommonClientConfigs.CLIENT_ID_CONFIG,
            constructClientId(topic.getId()));
    return kafkaProducerConfig;
}
 
Example 25
Source Project: common-docker   Source File: ClusterStatusTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyFailWithLessBrokers() throws Exception {
  try {
    Map<String, String> config = new HashMap<>();
    config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker
        (SecurityProtocol.PLAINTEXT));
    assertThat(ClusterStatus.isKafkaReady(config, 5, 10000))
        .isFalse();
  } catch (Exception e) {
    fail("Unexpected error. " + e.getMessage());
  }

}
 
Example 26
@Test
public void shouldBuildKafkaSenderWithList() {
  // Given
  Map<String, Object> map = new HashMap<>();
  map.put(SENDER_TYPE_CONFIG, TracingBuilder.SenderBuilder.SenderType.KAFKA.name());
  map.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
    Arrays.asList("localhost:9092", "localhost:9094"));
  TracingConfiguration config = new TracingConfiguration(map);
  // When
  Sender sender = new TracingBuilder.SenderBuilder(config).build();
  // Then
  assertTrue(sender instanceof KafkaSender);
}
 
Example 27
Source Project: kafka-eagle   Source File: KafkaServiceImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get kafka 0.10.x topic real logsize by partitionid.
 */
public long getKafkaRealLogSize(String clusterAlias, String topic, int partitionid) {
	long realLogSize = 0L;
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getKafkaBrokerServer(clusterAlias));
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(props, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(props, clusterAlias);
	}
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	TopicPartition tp = new TopicPartition(topic, partitionid);
	consumer.assign(Collections.singleton(tp));
	java.util.Map<TopicPartition, Long> endLogSize = consumer.endOffsets(Collections.singleton(tp));
	java.util.Map<TopicPartition, Long> startLogSize = consumer.beginningOffsets(Collections.singleton(tp));
	try {
		realLogSize = endLogSize.get(tp).longValue() - startLogSize.get(tp).longValue();
	} catch (Exception e) {
		LOG.error("Get real topic logsize by partition list has error, msg is " + e.getMessage());
		e.printStackTrace();
	} finally {
		if (consumer != null) {
			consumer.close();
		}
	}
	return realLogSize;
}
 
Example 28
Source Project: ditto   Source File: ProducerSettingsFactory.java    License: Eclipse Public License 2.0 5 votes vote down vote up
private ProducerSettings<String, String> addMetadata(final ProducerSettings<String, String> settings) {
    // identify the connected Kafka client by the connectionId followed by the instance index
    // (in order to be able to differentiate if a clientCount >1 was configured):
    final InstanceIdentifierSupplier instanceIdentifierSupplier = InstanceIdentifierSupplier.getInstance();

    return settings.withProperty(CommonClientConfigs.CLIENT_ID_CONFIG,
            connection.getId() + "-" + instanceIdentifierSupplier.get());
}
 
Example 29
Source Project: DataLink   Source File: WorkerSystemStateProbeImpl.java    License: Apache License 2.0 5 votes vote down vote up
private void getWorkerSystemState() throws SigarException {
    SystemSnapshot systemSnapshot = SystemUtils.buildSystemSnapshot();
    WorkerSystemStateInfo systemStateInfo = new WorkerSystemStateInfo();
    systemStateInfo.setLoadAverage(systemSnapshot.getLoadAverage());
    systemStateInfo.setUserCPUUtilization(systemSnapshot.getUserCPUUtilization());
    systemStateInfo.setSysCPUUtilization(systemSnapshot.getSysCPUUtilization());
    Map<String, Long> incomingMap = systemSnapshot.getIncomingNetworkTrafficMap();
    Map<String, Long> outgoingMap = systemSnapshot.getOutgoingNetworkTrafficMap();
    Map<String,String> sysMap = sysPropertiesService.map();
    String netTrafficName = StringUtils.isNotBlank(sysMap.get("net_traffic_name")) ? sysMap.get("net_traffic_name") : "eth0";

    if(!incomingMap.containsKey(netTrafficName) && incomingMap.size()>0){
        netTrafficName = (String)incomingMap.keySet().toArray()[0];
    }

    if(incomingMap.size()>0) {
        long incoming = incomingMap.get(netTrafficName);
        long incomingNetworkTraffic = incoming - lastIncomingNetworkTraffic;
        lastIncomingNetworkTraffic = incoming;
        long outgoing = outgoingMap.get(netTrafficName);
        long outgoingNetworkTraffic = outgoing - lastOutgoingNetworkTraffic;
        lastOutgoingNetworkTraffic = outgoing;
        systemStateInfo.setIncomingNetworkTraffic(incomingNetworkTraffic);
        systemStateInfo.setOutgoingNetworkTraffic(outgoingNetworkTraffic);
    }

    systemStateInfo.setTcpCurrentEstab(systemSnapshot.getTcpCurrentEstab());
    String clientId = WorkerConfig.current().getString(CommonClientConfigs.CLIENT_ID_CONFIG);
    systemStateInfo.setWorkerId(Long.valueOf(clientId));
    WorkerInfo workerInfo = workerService.getById(Long.valueOf(clientId));
    if (workerInfo != null) {
        systemStateInfo.setHost(workerInfo.getWorkerAddress());
    }
    workerSystemStateService.insert(systemStateInfo);
}
 
Example 30
Source Project: DataLink   Source File: WorkerJvmStateProbeImpl.java    License: Apache License 2.0 5 votes vote down vote up
private void getWorkerJvmState() {
    WorkerJvmStateInfo jvmStateInfo = new WorkerJvmStateInfo();
    JvmSnapshot jvmSnapshot = JvmUtils.buildJvmSnapshot();
    long youngGCCount = jvmSnapshot.getYoungCollectionCount();
    long intervalYoungCollectionCount = youngGCCount - lastYoungCollectionCount;
    lastYoungCollectionCount = youngGCCount;
    long youngGCTime = jvmSnapshot.getYoungCollectionTime();
    long intervalYoungCollectionTime = youngGCTime - lastYoungCollectionTime;
    lastYoungCollectionTime = youngGCTime;
    long oldGCCount = jvmSnapshot.getOldCollectionCount();
    long intervalOldCollectionCount = oldGCCount - lastOldCollectionCount;
    lastOldCollectionCount = oldGCCount;
    long oldGCTime = jvmSnapshot.getOldCollectionTime();
    long intervalOldCollectionTime = oldGCTime - lastOldCollectionTime;
    lastOldCollectionTime = oldGCTime;
    jvmStateInfo.setOldMemUsed(jvmSnapshot.getOldUsed());
    jvmStateInfo.setOldMemMax(jvmSnapshot.getOldMax());
    jvmStateInfo.setYoungMemUsed(jvmSnapshot.getYoungUsed());
    jvmStateInfo.setYoungMemMax(jvmSnapshot.getYoungMax());
    jvmStateInfo.setIntervalYoungCollectionCount(intervalYoungCollectionCount);
    jvmStateInfo.setIntervalOldCollectionCount(intervalOldCollectionCount);
    jvmStateInfo.setIntervalYoungCollectionTime(intervalYoungCollectionTime);
    jvmStateInfo.setIntervalOldCollectionTime(intervalOldCollectionTime);
    jvmStateInfo.setCurrentThreadCount(jvmSnapshot.getCurrentThreadCount());
    String clientId = WorkerConfig.current().getString(CommonClientConfigs.CLIENT_ID_CONFIG);
    jvmStateInfo.setWorkerId(Long.valueOf(clientId));
    WorkerInfo workerInfo = workerService.getById(Long.valueOf(clientId));
    if (workerInfo != null) {
        jvmStateInfo.setHost(workerInfo.getWorkerAddress());
    }
    workerJvmStateService.insert(jvmStateInfo);
}