org.apache.kafka.clients.producer.ProducerConfig Java Examples

The following examples show how to use org.apache.kafka.clients.producer.ProducerConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafKaProducerAPITest.java    From javabase with Apache License 2.0 6 votes vote down vote up
/**
     * get kafkaProducer
     * Producer端的常用配置
     bootstrap.servers:Kafka集群连接串,可以由多个host:port组成
     acks:broker消息确认的模式,有三种:
     0:不进行消息接收确认,即Client端发送完成后不会等待Broker的确认
     1:由Leader确认,Leader接收到消息后会立即返回确认信息
     all:集群完整确认,Leader会等待所有in-sync的follower节点都确认收到消息后,再返回确认信息
     我们可以根据消息的重要程度,设置不同的确认模式。默认为1
     retries:发送失败时Producer端的重试次数,默认为0
     batch.size:当同时有大量消息要向同一个分区发送时,Producer端会将消息打包后进行批量发送。如果设置为0,则每条消息都独立发送。默认为16384字节
     linger.ms:发送消息前等待的毫秒数,与batch.size配合使用。在消息负载不高的情况下,配置linger.ms能够让Producer在发送消息前等待一定时间,以积累更多的消息打包发送,达到节省网络资源的目的。默认为0
     key.serializer/value.serializer:消息key/value的序列器Class,根据key和value的类型决定
     buffer.memory:消息缓冲池大小。尚未被发送的消息会保存在Producer的内存中,如果消息产生的速度大于消息发送的速度,那么缓冲池满后发送消息的请求会被阻塞。默认33554432字节(32MB)
     *
     *
     * @return
     */
    private static KafkaProducer<Integer, String> getProducer() {
        Properties properties = new Properties();
        //bootstrap.servers
//        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "123.56.118.135:9092,123.56.118.135:9093");
//        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "120.78.62.137:9093,120.78.62.137:9094");
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "120.78.62.137:9093,120.78.62.137:9094");
        //client.id
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, PRODUCER_CLIENT_ID);
        //batch.size 当同时有大量消息要向同一个分区发送时,Producer端会将消息打包后进行批量发送。如果设置为0,则每条消息都独立发送。默认为16384字节
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG,16384);
      //发送消息前等待的毫秒数,与batch.size配合使用。在消息负载不高的情况下,配置linger.ms能够让Producer在发送消息前等待一定时间,以积累更多的消息打包发送,达到节省网络资源的目的。默认为0
        properties.put(ProducerConfig.LINGER_MS_CONFIG,5000);
        //retries:发送失败时Producer端的重试次数,默认为0
        properties.put(ProducerConfig.RETRIES_CONFIG,0);
        //消息缓冲池大小。尚未被发送的消息会保存在Producer的内存中,如果消息产生的速度大于消息发送的速度,那么缓冲池满后发送消息的请求会被阻塞。默认33554432字节
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG,33554432);
        //key 和 value serializer的类
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        return new KafkaProducer(properties);
    }
 
Example #2
Source File: ProducerInTransaction.java    From javatech with Creative Commons Attribution Share Alike 4.0 International 6 votes vote down vote up
public static Producer buildProducer() {
	// 1. 指定生产者的配置
	Properties properties = new Properties();
	properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	properties.put(ProducerConfig.ACKS_CONFIG, "all");
	properties.put(ProducerConfig.RETRIES_CONFIG, 1);
	properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
	properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
	properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
	properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "first-transactional");
	properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringSerializer");
	properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringSerializer");

	// 2. 使用配置初始化 Kafka 生产者
	Producer<String, String> producer = new KafkaProducer<>(properties);
	return producer;
}
 
Example #3
Source File: KafkaTransportProvider.java    From brooklin with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Constructor for KafkaTransportProvider.
 * @param datastreamTask the {@link DatastreamTask} to which this transport provider is being assigned
 * @param producers Kafka producers to use for producing data to destination Kafka cluster
 * @param props Kafka producer configuration
 * @param metricsNamesPrefix the prefix to use when emitting metrics
 * @throws IllegalArgumentException if either datastreamTask or producers is null
 * @throws com.linkedin.datastream.common.DatastreamRuntimeException if "bootstrap.servers" is not specified in the
 * supplied config
 * @see ProducerConfig
 */
public KafkaTransportProvider(DatastreamTask datastreamTask, List<KafkaProducerWrapper<byte[], byte[]>> producers,
    Properties props, String metricsNamesPrefix) {
  org.apache.commons.lang.Validate.notNull(datastreamTask, "null tasks");
  org.apache.commons.lang.Validate.notNull(producers, "null producer wrappers");
  _producers = producers;
  _datastreamTask = datastreamTask;
  LOG.info("Creating kafka transport provider with properties: {}", props);
  if (!props.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
    String errorMessage = "Bootstrap servers are not set";
    ErrorLogger.logAndThrowDatastreamRuntimeException(LOG, errorMessage, null);
  }

  // initialize metrics
  _dynamicMetricsManager = DynamicMetricsManager.getInstance();
  _metricsNamesPrefix = metricsNamesPrefix == null ? CLASS_NAME : metricsNamesPrefix + CLASS_NAME;
  _eventWriteRate = new Meter();
  _eventByteWriteRate = new Meter();
  _eventTransportErrorRate = new Meter();
}
 
Example #4
Source File: KafkaProducerExample.java    From client-examples with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws InterruptedException {
    KafkaProducerConfig config = KafkaProducerConfig.fromEnv();
    Properties props = KafkaProducerConfig.createProperties(config);

    if (System.getenv("JAEGER_SERVICE_NAME") != null)   {
        Tracer tracer = Configuration.fromEnv().getTracer();
        GlobalTracer.registerIfAbsent(tracer);

        props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, TracingProducerInterceptor.class.getName());
    }

    KafkaProducer producer = new KafkaProducer(props);
    log.info("Sending {} messages ...", config.getMessageCount());
    for (long i = 0; i < config.getMessageCount(); i++) {
        log.info("Sending messages \"" + config.getMessage() + " - {}\"", i);
        producer.send(new ProducerRecord(config.getTopic(),  "\"" + config.getMessage()  + " - " + i + "\""));
        Thread.sleep(config.getDelay());
    }
    log.info("{} messages sent ...", config.getMessageCount());
    producer.close();
}
 
Example #5
Source File: KafkaNotification.java    From incubator-atlas with Apache License 2.0 6 votes vote down vote up
private void startKafka() throws IOException, URISyntaxException {
    String kafkaValue = properties.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
    LOG.debug("Starting kafka at {}", kafkaValue);
    URL kafkaAddress = getURL(kafkaValue);

    Properties brokerConfig = properties;
    brokerConfig.setProperty("broker.id", "1");
    brokerConfig.setProperty("host.name", kafkaAddress.getHost());
    brokerConfig.setProperty("port", String.valueOf(kafkaAddress.getPort()));
    brokerConfig.setProperty("log.dirs", constructDir("kafka").getAbsolutePath());
    brokerConfig.setProperty("log.flush.interval.messages", String.valueOf(1));

    kafkaServer = new KafkaServer(KafkaConfig.fromProps(brokerConfig), new SystemTime(),
            Option.apply(this.getClass().getName()));
    kafkaServer.startup();
    LOG.debug("Embedded kafka server started with broker config {}", brokerConfig);
}
 
Example #6
Source File: IntegrationTest.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
@BeforeEach
void setUp() throws ExecutionException, InterruptedException {
    testBucketAccessor.clear(gcsPrefix);

    final Properties adminClientConfig = new Properties();
    adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
    adminClient = AdminClient.create(adminClientConfig);

    final Map<String, Object> producerProps = new HashMap<>();
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            "org.apache.kafka.common.serialization.ByteArraySerializer");
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            "org.apache.kafka.common.serialization.ByteArraySerializer");
    producer = new KafkaProducer<>(producerProps);

    final NewTopic newTopic0 = new NewTopic(TEST_TOPIC_0, 4, (short) 1);
    final NewTopic newTopic1 = new NewTopic(TEST_TOPIC_1, 4, (short) 1);
    adminClient.createTopics(Arrays.asList(newTopic0, newTopic1)).all().get();

    connectRunner = new ConnectRunner(pluginDir, kafka.getBootstrapServers(), OFFSET_FLUSH_INTERVAL_MS);
    connectRunner.start();
}
 
Example #7
Source File: KafkaChannelAutoConfiguration.java    From servicecomb-pack with Apache License 2.0 6 votes vote down vote up
@PostConstruct
public void init() {
  Map props = new HashMap<>();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers);
  props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
  try (final AdminClient adminClient = KafkaAdminClient.create(props)) {
    try {
      final NewTopic newTopic = new NewTopic(topic, numPartitions, replicationFactor);
      final CreateTopicsResult createTopicsResult = adminClient
          .createTopics(Collections.singleton(newTopic));
      createTopicsResult.values().get(topic).get();
    } catch (InterruptedException | ExecutionException e) {
      if (e.getCause() instanceof InterruptedException) {
        Thread.currentThread().interrupt();
      }
      if (!(e.getCause() instanceof TopicExistsException)) {
        throw new RuntimeException(e.getMessage(), e);
      }
    }
  }
  LOG.info("Kafka Channel Init");
}
 
Example #8
Source File: ConsumerInterceptorTTL.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptorTTL.class.getName());

    KafkaConsumer consumer = new ConsumerFactory<String, String>().create(properties);
    try {
        // 开启生产者, 发送一些消息,超过10秒钟之后再执行本方法
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord record : records) {
                System.out.println(String.format("%s-%s-%s-%s",
                        record.topic(), record.partition(), record.offset(), record.value()));
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        consumer.close();
    }
}
 
Example #9
Source File: LiKafkaProducerFactory.java    From brooklin with BSD 2-Clause "Simplified" License 6 votes vote down vote up
static Properties buildProducerProperties(Properties prop, String clientId, String brokers, String enableLargeMessage) {
  prop.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
  prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
  prop.put(LiKafkaProducerConfig.LARGE_MESSAGE_ENABLED_CONFIG, enableLargeMessage);
  prop.putIfAbsent(CFG_RETRY_BACKOFF_MS, DEFAULT_RETRY_BACKOFF_MS);
  prop.putIfAbsent(CFG_REQUEST_TIMEOUT_MS, DEFAULT_REQUEST_TIMEOUT_MS);
  prop.putIfAbsent(CFG_METADATA_EXPIRY_MS, DEFAULT_METADATA_EXPIRY_MS);
  prop.putIfAbsent(CFG_MAX_PARTITION_BYTES, DEFAULT_MAX_PARTITION_BYTES);
  prop.putIfAbsent(CFG_TOTAL_MEMORY_BYTES, DEFAULT_TOTAL_MEMORY_BYTES);
  prop.putIfAbsent(CFG_REQUEST_REQUIRED_ACKS, DEFAULT_REQUEST_REQUIRED_ACKS);
  prop.putIfAbsent(CFG_LINGER_MS, DEFAULT_LINGER_MS);
  prop.putIfAbsent(CFG_SEND_BUFFER_BYTES, DEFAULT_SEND_BUFFER_BYTES);
  prop.putIfAbsent(CFG_RECEIVE_BUFFER_BYTES, DEFAULT_RECEIVE_BUFFER_BYTES);
  prop.putIfAbsent(CFG_MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE);
  prop.putIfAbsent(CFG_RECONNECT_BACKOFF_MS, DEFAULT_RECONNECT_BACKOFF_MS);
  prop.putIfAbsent(CFG_MAX_BLOCK_MS, DEFAULT_MAX_BLOCK_MS);
  prop.putIfAbsent(CFG_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, DEFAULT_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION);
  prop.putIfAbsent(CFG_REQUEST_RETRIES, DEFAULT_REQUEST_RETRIES);
  prop.putIfAbsent(CFG_COMPRESSION_TYPE, DEFAULT_COMPRESSION_TYPE);
  return prop;
}
 
Example #10
Source File: KafkaSource.java    From siddhi-io-kafka with Apache License 2.0 6 votes vote down vote up
private static Properties createProducerConfig(String zkServerList, String optionalConfigs,
                                               boolean isBinaryMessage) {
    Properties configProperties = new Properties();
    configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, zkServerList);
    configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            "org.apache.kafka.common.serialization.ByteArraySerializer");
    KafkaIOUtils.splitHeaderValues(optionalConfigs, configProperties);

    if (!isBinaryMessage) {
        configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
    } else {
        configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.ByteArraySerializer");
    }
    return configProperties;
}
 
Example #11
Source File: TestUtil.java    From feast with Apache License 2.0 6 votes vote down vote up
/**
 * Publish test Feature Row messages to a running Kafka broker
 *
 * @param bootstrapServers e.g. localhost:9092
 * @param topic e.g. my_topic
 * @param messages e.g. list of Feature Row
 * @param valueSerializer in Feast this valueSerializer should be "ByteArraySerializer.class"
 * @param publishTimeoutSec duration to wait for publish operation (of each message) to succeed
 */
public static <T extends Message> void publishToKafka(
    String bootstrapServers,
    String topic,
    List<Pair<String, T>> messages,
    Class<?> valueSerializer,
    long publishTimeoutSec) {

  Properties prop = new Properties();
  prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer);
  Producer<String, byte[]> producer = new KafkaProducer<>(prop);

  messages.forEach(
      featureRow -> {
        ProducerRecord<String, byte[]> record =
            new ProducerRecord<>(
                topic, featureRow.getLeft(), featureRow.getRight().toByteArray());
        try {
          producer.send(record).get(publishTimeoutSec, TimeUnit.SECONDS);
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
          e.printStackTrace();
        }
      });
}
 
Example #12
Source File: ProducerTest.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testStreamProduce(TestContext ctx) throws Exception {
  String topicName = "testStreamProduce";
  Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer");
  config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  producer = producer(Vertx.vertx(), config);
  producer.exceptionHandler(ctx::fail);
  int numMessages = 100000;
  for (int i = 0;i < numMessages;i++) {
    ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i);
    record.headers().add("header_key", ("header_value-" + i).getBytes());
    producer.write(record);
  }
  assertReceiveMessages(ctx, topicName, numMessages);
}
 
Example #13
Source File: MetricCollectorsTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldAggregateStatsAcrossAllProducers() throws Exception {
  ProducerCollector collector1 = new ProducerCollector();
  collector1.configure(ImmutableMap.of(ProducerConfig.CLIENT_ID_CONFIG, "client1"));

  ProducerCollector collector2 = new ProducerCollector();
  collector2.configure(ImmutableMap.of(ProducerConfig.CLIENT_ID_CONFIG, "client2"));

  for (int i = 0; i < 500; i++) {
    collector1.onSend(new ProducerRecord<>(TEST_TOPIC, "key", Integer.toString(i)));
    collector2.onSend(new ProducerRecord<>(TEST_TOPIC + "_" + i, "key",
                                           Integer.toString(i * 100)));
  }

  // The Kafka metrics in MetricCollectors is configured so that sampled stats (like the Rate
  // measurable stat) have a 100 samples, each with a duration of 1 second. In this test we
  // record a 1000 events, but only in a single sample since they all belong to the same second.
  // So 99 samples are empty. Hence the rate is computed as a tenth of what it should be. This
  // won't be a problem for a longer running program.
  assertEquals(10, Math.floor(MetricCollectors.currentProductionRate()), 0);
}
 
Example #14
Source File: KafkaProducerConfig.java    From SpringAll with MIT License 5 votes vote down vote up
@Bean
public ProducerFactory<String, Message> producerFactory() {
    Map<String, Object> configProps = new HashMap<>();
    configProps.put(
            ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
            bootstrapServers);
    configProps.put(
            ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class);
    configProps.put(
            ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            JsonSerializer.class);
    return new DefaultKafkaProducerFactory<>(configProps);
}
 
Example #15
Source File: SampleProducer.java    From kafka-encryption with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    // tag::produce[]

    // The payload is encrypted using AES
    AesGcmNoPaddingCryptoAlgorithm cryptoAlgorithm = new AesGcmNoPaddingCryptoAlgorithm();
    Encryptor encryptor = new DefaultEncryptor(keyProvider, cryptoAlgorithm);

    // Wrap base LongSerializer and StringSerializer with encrypted wrappers
    CryptoSerializerPairFactory cryptoSerializerPairFactory = new CryptoSerializerPairFactory(encryptor,
            keyReferenceExtractor);
    SerializerPair<Integer, String> serializerPair = cryptoSerializerPairFactory.build(new IntegerSerializer(), new StringSerializer());

    Properties producerProperties = new Properties();
    producerProperties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    Random random = new Random();

    try (KafkaProducer<Integer, String> producer =
                 new KafkaProducer<>(producerProperties, serializerPair.getKeySerializer(), serializerPair.getValueSerializer())) {

        for (long i = 0L; i < Long.MAX_VALUE; i++) {
            long accountId = i % 10l;
            producer.send(new ProducerRecord<>("operations", (int) accountId, "" + (random.nextInt(1000) - 500)));

            if (i % 100 == 99) {
                try {
                    Thread.sleep(2000L);
                }
                catch (InterruptedException e) {
                    return;
                }
            }

        }
    }
    // end::produce[]

}
 
Example #16
Source File: TestKafkaProducerManager.java    From conductor with Apache License 2.0 5 votes vote down vote up
@Test
public void testMaxBlockMsFromInput() {

	Configuration configuration = getConfiguration();
	KafkaProducerManager manager = new KafkaProducerManager(configuration);
	KafkaPublishTask.Input input = getInput();
	input.setMaxBlockMs(600);
	Properties props = manager.getProducerProperties(input);
	Assert.assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "600");

}
 
Example #17
Source File: PublishKafka_0_10.java    From nifi with Apache License 2.0 5 votes vote down vote up
protected PublisherPool createPublisherPool(final ProcessContext context) {
    final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue();
    final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue();

    final Map<String, Object> kafkaProperties = new HashMap<>();
    KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties);
    kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize));

    return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis);
}
 
Example #18
Source File: KafkaConfiguration.java    From eventeum with Apache License 2.0 5 votes vote down vote up
@Bean
public ConsumerFactory<String, EventeumMessage> eventeumConsumerFactory() {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, settings.getBootstrapAddresses());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, settings.getGroupId());
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, settings.getRequestTimeoutMsConfig());
    props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, settings.getRetryBackoffMsConfig());
    if ("PLAINTEXT".equals(settings.getSecurityProtocol())) {
        configurePlaintextSecurityProtocol(props);
    }
    return new DefaultKafkaConsumerFactory<>(props, null, new JsonDeserializer<>(EventeumMessage.class));
}
 
Example #19
Source File: KafkaSender.java    From zipkin-reporter-java with Apache License 2.0 5 votes vote down vote up
public static Builder newBuilder() {
  // Settings below correspond to "Producer Configs"
  // http://kafka.apache.org/0102/documentation.html#producerconfigs
  Properties properties = new Properties();
  properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
  properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
    ByteArraySerializer.class.getName());
  // disabling batching as duplicates effort covered by sender buffering.
  properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 0);
  properties.put(ProducerConfig.ACKS_CONFIG, "0");
  return new Builder(properties);
}
 
Example #20
Source File: SaslKafkaConsumerTest.java    From quarkus with Apache License 2.0 5 votes vote down vote up
public static Producer<Integer, String> createProducer() {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:19094");
    props.put(ProducerConfig.CLIENT_ID_CONFIG, "sasl-test-producer");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    addJaas(props);

    return new KafkaProducer<>(props);
}
 
Example #21
Source File: EmbeddedCassandraConnectorTestBase.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
protected static Properties generateDefaultConfigMap() throws IOException {
    Properties props = new Properties();
    props.put(CassandraConnectorConfig.CONNECTOR_NAME.name(), TEST_CONNECTOR_NAME);
    props.put(CassandraConnectorConfig.CASSANDRA_CONFIG.name(), TEST_CASSANDRA_YAML_CONFIG);
    props.put(CassandraConnectorConfig.KAFKA_TOPIC_PREFIX.name(), TEST_KAFKA_TOPIC_PREFIX);
    props.put(CassandraConnectorConfig.CASSANDRA_HOSTS.name(), TEST_CASSANDRA_HOSTS);
    props.put(CassandraConnectorConfig.CASSANDRA_PORT.name(), String.valueOf(TEST_CASSANDRA_PORT));
    props.put(CassandraConnectorConfig.OFFSET_BACKING_STORE_DIR.name(), Files.createTempDirectory("offset").toString());
    props.put(CassandraConnectorConfig.KAFKA_PRODUCER_CONFIG_PREFIX + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, TEST_KAFKA_SERVERS);
    props.put(CassandraConnectorConfig.COMMIT_LOG_RELOCATION_DIR.name(), Files.createTempDirectory("cdc_raw_relocation").toString());
    return props;
}
 
Example #22
Source File: KafkaBrokerTestHarness.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
/**
 * Returns properties for a Kafka producer.
 *
 * @return Producer properties.
 */
public Properties getProducerProps() {
    Properties props = new Properties();
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers());

    return props;
}
 
Example #23
Source File: KafkaBaseConnectorDescriptor.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public void validate() throws Exception {
    Assert.notNull(topic, "kafka的topic不能为空");
    Assert.notNull(properties, "kafka的properties不能为空");
    Assert.notNull(PropertiesUtil.fromYamlMap(this.properties).get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG),
        "kafak的" + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + "不能为空");

}
 
Example #24
Source File: KafkaProducerConfiguration.java    From ZTuoExchange_framework with MIT License 5 votes vote down vote up
public Map<String, Object> producerConfigs() {
		Map<String, Object> props = new HashMap<>();
		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		props.put(ProducerConfig.RETRIES_CONFIG, retries);
		props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
		props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
		props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
//		props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "cn.ztuo.bitrade.kafka.kafkaPartitioner");
		props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		return props;
	}
 
Example #25
Source File: AbstractKafkaMetrics.java    From micronaut-kafka with Apache License 2.0 5 votes vote down vote up
/**
 * Method to add a default metric reporter if not otherwise defined.
 *
 * @param event                         The event for bean created of type AbstractKafkaConfiguration
 * @param kafkaMetricsReporterClassName The class name to use for kafka metrics registration
 * @return The bean
 */
T addKafkaMetrics(BeanCreatedEvent<T> event, String kafkaMetricsReporterClassName) {
    Properties props = event.getBean().getConfig();
    if (!props.containsKey(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG)) {
        props.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, kafkaMetricsReporterClassName);
        if (LOG.isDebugEnabled()) {
            LOG.debug(String.format("Adding kafka property:value of %s:%s", ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, kafkaMetricsReporterClassName));
        }
    }
    return event.getBean();
}
 
Example #26
Source File: FlinkKafkaProducer011.java    From flink with Apache License 2.0 5 votes vote down vote up
private static Properties getPropertiesFromBrokerList(String brokerList) {
	String[] elements = brokerList.split(",");

	// validate the broker addresses
	for (String broker: elements) {
		NetUtils.getCorrectHostnamePort(broker);
	}

	Properties props = new Properties();
	props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
	return props;
}
 
Example #27
Source File: KafkaProducerConfiguration.java    From ZTuoExchange_framework with MIT License 5 votes vote down vote up
public Map<String, Object> producerConfigs() {
		Map<String, Object> props = new HashMap<>();
		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		props.put(ProducerConfig.RETRIES_CONFIG, retries);
		props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
		props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
		props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
//		props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "cn.ztuo.bitrade.kafka.kafkaPartitioner");
		props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		return props;
	}
 
Example #28
Source File: KafkaProducerWrapperTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void test_messageSentSynchronouslySuccessfully() throws IOException {
    long previousSendCount = KafkaProducerWrapper.SEND_TIMER.count();
    long previousSyncSendCount = KafkaProducerWrapper.SYNC_SEND_TIMER.count();
    long previousFlushCount = KafkaProducerWrapper.FLUSH_TIMER.count();
    long previousBatchSizeCount = KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.count();
    double previousBatchSizeSum = KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.sum();

    kafkaAdminClient.createTopic(topic, 4, 1, new Properties());

    Properties props = KafkaTests.getProps();
    props.setProperty(KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.setProperty(VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "10000");
    props.setProperty(ProducerConfig.LINGER_MS_CONFIG, "60000");

    KafkaProducerWrapper<String, String> producer = new KafkaProducerWrapper<>(new KafkaProducer<>(props));

    producer.sendSynchronously(
            new ProducerRecord<>(topic, "key"+testName.getMethodName(), "value"+ UUID.randomUUID()));
    producer.close();

    assertThat(KafkaProducerWrapper.SEND_TIMER.count(), is(previousSendCount));
    assertThat(KafkaProducerWrapper.SYNC_SEND_TIMER.count(), is(previousSyncSendCount + 1));
    assertThat(KafkaProducerWrapper.FLUSH_TIMER.count(), is(previousFlushCount));
    assertThat(KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.count(), is(previousBatchSizeCount + 1));
    assertThat(KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.sum(), is(previousBatchSizeSum + 1));
}
 
Example #29
Source File: MapR61StreamsProducer11.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void addUserConfiguredProperties(Map<String, Object> kafkaClientConfigs, Properties props) {
  //The following options, if specified, are ignored : "key.serializer" and "value.serializer"
  if (kafkaClientConfigs != null && !kafkaClientConfigs.isEmpty()) {
    kafkaClientConfigs.remove(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
    kafkaClientConfigs.remove(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);

    for (Map.Entry<String, Object> producerConfig : kafkaClientConfigs.entrySet()) {
      props.put(producerConfig.getKey(), producerConfig.getValue());
    }
  }
}
 
Example #30
Source File: FlinkKafkaProducerMigrationTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected Properties createProperties() {
	Properties properties = new Properties();
	properties.putAll(standardProps);
	properties.putAll(secureProps);
	properties.put(ProducerConfig.CLIENT_ID_CONFIG, "producer-client-id");
	properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "producer-transaction-id");
	properties.put(FlinkKafkaProducer.KEY_DISABLE_METRICS, "true");
	return properties;
}