Java Code Examples for org.apache.kafka.clients.producer.ProducerConfig

The following examples show how to use org.apache.kafka.clients.producer.ProducerConfig. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: doctorkafka   Source File: DoctorKafkaActionReporter.java    License: Apache License 2.0 6 votes vote down vote up
public DoctorKafkaActionReporter(String zkUrl, SecurityProtocol securityProtocol,
    String topic,  Map<String, String> producerConfigs) {
  this.topic = topic;
  String bootstrapBrokers = OperatorUtil.getBrokers(zkUrl, securityProtocol);
  Properties props = new Properties();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
  props.put(ProducerConfig.ACKS_CONFIG, "1");
  props.put(ProducerConfig.RETRIES_CONFIG, 3);
  props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1638400);
  props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
  props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip");
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");

  for (Map.Entry<String, String> entry : producerConfigs.entrySet()) {
    props.put(entry.getKey(), entry.getValue());
  }
  this.kafkaProducer = new KafkaProducer<>(props);
}
 
Example 2
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptorTTL.class.getName());

    KafkaConsumer consumer = new ConsumerFactory<String, String>().create(properties);
    try {
        // 开启生产者, 发送一些消息,超过10秒钟之后再执行本方法
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord record : records) {
                System.out.println(String.format("%s-%s-%s-%s",
                        record.topic(), record.partition(), record.offset(), record.value()));
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        consumer.close();
    }
}
 
Example 3
static Properties buildProducerProperties(Properties prop, String clientId, String brokers, String enableLargeMessage) {
  prop.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
  prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
  prop.put(LiKafkaProducerConfig.LARGE_MESSAGE_ENABLED_CONFIG, enableLargeMessage);
  prop.putIfAbsent(CFG_RETRY_BACKOFF_MS, DEFAULT_RETRY_BACKOFF_MS);
  prop.putIfAbsent(CFG_REQUEST_TIMEOUT_MS, DEFAULT_REQUEST_TIMEOUT_MS);
  prop.putIfAbsent(CFG_METADATA_EXPIRY_MS, DEFAULT_METADATA_EXPIRY_MS);
  prop.putIfAbsent(CFG_MAX_PARTITION_BYTES, DEFAULT_MAX_PARTITION_BYTES);
  prop.putIfAbsent(CFG_TOTAL_MEMORY_BYTES, DEFAULT_TOTAL_MEMORY_BYTES);
  prop.putIfAbsent(CFG_REQUEST_REQUIRED_ACKS, DEFAULT_REQUEST_REQUIRED_ACKS);
  prop.putIfAbsent(CFG_LINGER_MS, DEFAULT_LINGER_MS);
  prop.putIfAbsent(CFG_SEND_BUFFER_BYTES, DEFAULT_SEND_BUFFER_BYTES);
  prop.putIfAbsent(CFG_RECEIVE_BUFFER_BYTES, DEFAULT_RECEIVE_BUFFER_BYTES);
  prop.putIfAbsent(CFG_MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE);
  prop.putIfAbsent(CFG_RECONNECT_BACKOFF_MS, DEFAULT_RECONNECT_BACKOFF_MS);
  prop.putIfAbsent(CFG_MAX_BLOCK_MS, DEFAULT_MAX_BLOCK_MS);
  prop.putIfAbsent(CFG_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, DEFAULT_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION);
  prop.putIfAbsent(CFG_REQUEST_RETRIES, DEFAULT_REQUEST_RETRIES);
  prop.putIfAbsent(CFG_COMPRESSION_TYPE, DEFAULT_COMPRESSION_TYPE);
  return prop;
}
 
Example 4
@Test
public void shouldAggregateStatsAcrossAllProducers() throws Exception {
  ProducerCollector collector1 = new ProducerCollector();
  collector1.configure(ImmutableMap.of(ProducerConfig.CLIENT_ID_CONFIG, "client1"));

  ProducerCollector collector2 = new ProducerCollector();
  collector2.configure(ImmutableMap.of(ProducerConfig.CLIENT_ID_CONFIG, "client2"));

  for (int i = 0; i < 500; i++) {
    collector1.onSend(new ProducerRecord<>(TEST_TOPIC, "key", Integer.toString(i)));
    collector2.onSend(new ProducerRecord<>(TEST_TOPIC + "_" + i, "key",
                                           Integer.toString(i * 100)));
  }

  // The Kafka metrics in MetricCollectors is configured so that sampled stats (like the Rate
  // measurable stat) have a 100 samples, each with a duration of 1 second. In this test we
  // record a 1000 events, but only in a single sample since they all belong to the same second.
  // So 99 samples are empty. Hence the rate is computed as a tenth of what it should be. This
  // won't be a problem for a longer running program.
  assertEquals(10, Math.floor(MetricCollectors.currentProductionRate()), 0);
}
 
Example 5
Source Project: javabase   Source File: KafKaProducerAPITest.java    License: Apache License 2.0 6 votes vote down vote up
/**
     * get kafkaProducer
     * Producer端的常用配置
     bootstrap.servers:Kafka集群连接串,可以由多个host:port组成
     acks:broker消息确认的模式,有三种:
     0:不进行消息接收确认,即Client端发送完成后不会等待Broker的确认
     1:由Leader确认,Leader接收到消息后会立即返回确认信息
     all:集群完整确认,Leader会等待所有in-sync的follower节点都确认收到消息后,再返回确认信息
     我们可以根据消息的重要程度,设置不同的确认模式。默认为1
     retries:发送失败时Producer端的重试次数,默认为0
     batch.size:当同时有大量消息要向同一个分区发送时,Producer端会将消息打包后进行批量发送。如果设置为0,则每条消息都独立发送。默认为16384字节
     linger.ms:发送消息前等待的毫秒数,与batch.size配合使用。在消息负载不高的情况下,配置linger.ms能够让Producer在发送消息前等待一定时间,以积累更多的消息打包发送,达到节省网络资源的目的。默认为0
     key.serializer/value.serializer:消息key/value的序列器Class,根据key和value的类型决定
     buffer.memory:消息缓冲池大小。尚未被发送的消息会保存在Producer的内存中,如果消息产生的速度大于消息发送的速度,那么缓冲池满后发送消息的请求会被阻塞。默认33554432字节(32MB)
     *
     *
     * @return
     */
    private static KafkaProducer<Integer, String> getProducer() {
        Properties properties = new Properties();
        //bootstrap.servers
//        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "123.56.118.135:9092,123.56.118.135:9093");
//        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "120.78.62.137:9093,120.78.62.137:9094");
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "120.78.62.137:9093,120.78.62.137:9094");
        //client.id
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, PRODUCER_CLIENT_ID);
        //batch.size 当同时有大量消息要向同一个分区发送时,Producer端会将消息打包后进行批量发送。如果设置为0,则每条消息都独立发送。默认为16384字节
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG,16384);
      //发送消息前等待的毫秒数,与batch.size配合使用。在消息负载不高的情况下,配置linger.ms能够让Producer在发送消息前等待一定时间,以积累更多的消息打包发送,达到节省网络资源的目的。默认为0
        properties.put(ProducerConfig.LINGER_MS_CONFIG,5000);
        //retries:发送失败时Producer端的重试次数,默认为0
        properties.put(ProducerConfig.RETRIES_CONFIG,0);
        //消息缓冲池大小。尚未被发送的消息会保存在Producer的内存中,如果消息产生的速度大于消息发送的速度,那么缓冲池满后发送消息的请求会被阻塞。默认33554432字节
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG,33554432);
        //key 和 value serializer的类
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        return new KafkaProducer(properties);
    }
 
Example 6
Source Project: vertx-kafka-client   Source File: ProducerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStreamProduce(TestContext ctx) throws Exception {
  String topicName = "testStreamProduce";
  Properties config = kafkaCluster.useTo().getProducerProperties("testStreamProduce_producer");
  config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  producer = producer(Vertx.vertx(), config);
  producer.exceptionHandler(ctx::fail);
  int numMessages = 100000;
  for (int i = 0;i < numMessages;i++) {
    ProducerRecord<String, String> record = new ProducerRecord<>(topicName, 0, "key-" + i, "value-" + i);
    record.headers().add("header_key", ("header_value-" + i).getBytes());
    producer.write(record);
  }
  assertReceiveMessages(ctx, topicName, numMessages);
}
 
Example 7
Source Project: feast   Source File: TestUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Publish test Feature Row messages to a running Kafka broker
 *
 * @param bootstrapServers e.g. localhost:9092
 * @param topic e.g. my_topic
 * @param messages e.g. list of Feature Row
 * @param valueSerializer in Feast this valueSerializer should be "ByteArraySerializer.class"
 * @param publishTimeoutSec duration to wait for publish operation (of each message) to succeed
 */
public static <T extends Message> void publishToKafka(
    String bootstrapServers,
    String topic,
    List<Pair<String, T>> messages,
    Class<?> valueSerializer,
    long publishTimeoutSec) {

  Properties prop = new Properties();
  prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer);
  Producer<String, byte[]> producer = new KafkaProducer<>(prop);

  messages.forEach(
      featureRow -> {
        ProducerRecord<String, byte[]> record =
            new ProducerRecord<>(
                topic, featureRow.getLeft(), featureRow.getRight().toByteArray());
        try {
          producer.send(record).get(publishTimeoutSec, TimeUnit.SECONDS);
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
          e.printStackTrace();
        }
      });
}
 
Example 8
Source Project: siddhi-io-kafka   Source File: KafkaSource.java    License: Apache License 2.0 6 votes vote down vote up
private static Properties createProducerConfig(String zkServerList, String optionalConfigs,
                                               boolean isBinaryMessage) {
    Properties configProperties = new Properties();
    configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, zkServerList);
    configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            "org.apache.kafka.common.serialization.ByteArraySerializer");
    KafkaIOUtils.splitHeaderValues(optionalConfigs, configProperties);

    if (!isBinaryMessage) {
        configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
    } else {
        configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.ByteArraySerializer");
    }
    return configProperties;
}
 
Example 9
@PostConstruct
public void init() {
  Map props = new HashMap<>();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrap_servers);
  props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
  try (final AdminClient adminClient = KafkaAdminClient.create(props)) {
    try {
      final NewTopic newTopic = new NewTopic(topic, numPartitions, replicationFactor);
      final CreateTopicsResult createTopicsResult = adminClient
          .createTopics(Collections.singleton(newTopic));
      createTopicsResult.values().get(topic).get();
    } catch (InterruptedException | ExecutionException e) {
      if (e.getCause() instanceof InterruptedException) {
        Thread.currentThread().interrupt();
      }
      if (!(e.getCause() instanceof TopicExistsException)) {
        throw new RuntimeException(e.getMessage(), e);
      }
    }
  }
  LOG.info("Kafka Channel Init");
}
 
Example 10
@BeforeEach
void setUp() throws ExecutionException, InterruptedException {
    testBucketAccessor.clear(gcsPrefix);

    final Properties adminClientConfig = new Properties();
    adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
    adminClient = AdminClient.create(adminClientConfig);

    final Map<String, Object> producerProps = new HashMap<>();
    producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
    producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            "org.apache.kafka.common.serialization.ByteArraySerializer");
    producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            "org.apache.kafka.common.serialization.ByteArraySerializer");
    producer = new KafkaProducer<>(producerProps);

    final NewTopic newTopic0 = new NewTopic(TEST_TOPIC_0, 4, (short) 1);
    final NewTopic newTopic1 = new NewTopic(TEST_TOPIC_1, 4, (short) 1);
    adminClient.createTopics(Arrays.asList(newTopic0, newTopic1)).all().get();

    connectRunner = new ConnectRunner(pluginDir, kafka.getBootstrapServers(), OFFSET_FLUSH_INTERVAL_MS);
    connectRunner.start();
}
 
Example 11
Source Project: client-examples   Source File: KafkaProducerExample.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws InterruptedException {
    KafkaProducerConfig config = KafkaProducerConfig.fromEnv();
    Properties props = KafkaProducerConfig.createProperties(config);

    if (System.getenv("JAEGER_SERVICE_NAME") != null)   {
        Tracer tracer = Configuration.fromEnv().getTracer();
        GlobalTracer.registerIfAbsent(tracer);

        props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, TracingProducerInterceptor.class.getName());
    }

    KafkaProducer producer = new KafkaProducer(props);
    log.info("Sending {} messages ...", config.getMessageCount());
    for (long i = 0; i < config.getMessageCount(); i++) {
        log.info("Sending messages \"" + config.getMessage() + " - {}\"", i);
        producer.send(new ProducerRecord(config.getTopic(),  "\"" + config.getMessage()  + " - " + i + "\""));
        Thread.sleep(config.getDelay());
    }
    log.info("{} messages sent ...", config.getMessageCount());
    producer.close();
}
 
Example 12
Source Project: incubator-atlas   Source File: KafkaNotification.java    License: Apache License 2.0 6 votes vote down vote up
private void startKafka() throws IOException, URISyntaxException {
    String kafkaValue = properties.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
    LOG.debug("Starting kafka at {}", kafkaValue);
    URL kafkaAddress = getURL(kafkaValue);

    Properties brokerConfig = properties;
    brokerConfig.setProperty("broker.id", "1");
    brokerConfig.setProperty("host.name", kafkaAddress.getHost());
    brokerConfig.setProperty("port", String.valueOf(kafkaAddress.getPort()));
    brokerConfig.setProperty("log.dirs", constructDir("kafka").getAbsolutePath());
    brokerConfig.setProperty("log.flush.interval.messages", String.valueOf(1));

    kafkaServer = new KafkaServer(KafkaConfig.fromProps(brokerConfig), new SystemTime(),
            Option.apply(this.getClass().getName()));
    kafkaServer.startup();
    LOG.debug("Embedded kafka server started with broker config {}", brokerConfig);
}
 
Example 13
public static Producer buildProducer() {
	// 1. 指定生产者的配置
	Properties properties = new Properties();
	properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	properties.put(ProducerConfig.ACKS_CONFIG, "all");
	properties.put(ProducerConfig.RETRIES_CONFIG, 1);
	properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
	properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
	properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
	properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "first-transactional");
	properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringSerializer");
	properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringSerializer");

	// 2. 使用配置初始化 Kafka 生产者
	Producer<String, String> producer = new KafkaProducer<>(properties);
	return producer;
}
 
Example 14
/**
 * Constructor for KafkaTransportProvider.
 * @param datastreamTask the {@link DatastreamTask} to which this transport provider is being assigned
 * @param producers Kafka producers to use for producing data to destination Kafka cluster
 * @param props Kafka producer configuration
 * @param metricsNamesPrefix the prefix to use when emitting metrics
 * @throws IllegalArgumentException if either datastreamTask or producers is null
 * @throws com.linkedin.datastream.common.DatastreamRuntimeException if "bootstrap.servers" is not specified in the
 * supplied config
 * @see ProducerConfig
 */
public KafkaTransportProvider(DatastreamTask datastreamTask, List<KafkaProducerWrapper<byte[], byte[]>> producers,
    Properties props, String metricsNamesPrefix) {
  org.apache.commons.lang.Validate.notNull(datastreamTask, "null tasks");
  org.apache.commons.lang.Validate.notNull(producers, "null producer wrappers");
  _producers = producers;
  _datastreamTask = datastreamTask;
  LOG.info("Creating kafka transport provider with properties: {}", props);
  if (!props.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
    String errorMessage = "Bootstrap servers are not set";
    ErrorLogger.logAndThrowDatastreamRuntimeException(LOG, errorMessage, null);
  }

  // initialize metrics
  _dynamicMetricsManager = DynamicMetricsManager.getInstance();
  _metricsNamesPrefix = metricsNamesPrefix == null ? CLASS_NAME : metricsNamesPrefix + CLASS_NAME;
  _eventWriteRate = new Meter();
  _eventByteWriteRate = new Meter();
  _eventTransportErrorRate = new Meter();
}
 
Example 15
Source Project: common-kafka   Source File: KafkaProducerWrapperTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void test_messageSentSynchronouslySuccessfully() throws IOException {
    long previousSendCount = KafkaProducerWrapper.SEND_TIMER.count();
    long previousSyncSendCount = KafkaProducerWrapper.SYNC_SEND_TIMER.count();
    long previousFlushCount = KafkaProducerWrapper.FLUSH_TIMER.count();
    long previousBatchSizeCount = KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.count();
    double previousBatchSizeSum = KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.sum();

    kafkaAdminClient.createTopic(topic, 4, 1, new Properties());

    Properties props = KafkaTests.getProps();
    props.setProperty(KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.setProperty(VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "10000");
    props.setProperty(ProducerConfig.LINGER_MS_CONFIG, "60000");

    KafkaProducerWrapper<String, String> producer = new KafkaProducerWrapper<>(new KafkaProducer<>(props));

    producer.sendSynchronously(
            new ProducerRecord<>(topic, "key"+testName.getMethodName(), "value"+ UUID.randomUUID()));
    producer.close();

    assertThat(KafkaProducerWrapper.SEND_TIMER.count(), is(previousSendCount));
    assertThat(KafkaProducerWrapper.SYNC_SEND_TIMER.count(), is(previousSyncSendCount + 1));
    assertThat(KafkaProducerWrapper.FLUSH_TIMER.count(), is(previousFlushCount));
    assertThat(KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.count(), is(previousBatchSizeCount + 1));
    assertThat(KafkaProducerWrapper.BATCH_SIZE_HISTOGRAM.sum(), is(previousBatchSizeSum + 1));
}
 
Example 16
Source Project: mercury   Source File: EventProducer.java    License: Apache License 2.0 5 votes vote down vote up
private synchronized void startProducer() {
    if (producer == null) {
        // create unique ID from origin ID by dropping date prefix and adding a sequence suffix
        String id = (Platform.getInstance().getOrigin()+"p"+(++seq)).substring(8);
        Properties properties = getProperties();
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, id);
        producer = new KafkaProducer<>(properties);
        producerId = id;
        lastStarted = System.currentTimeMillis();
        log.info("Producer {} ready", id);
    }
}
 
Example 17
protected Producer<String, String> createProducerTopicMetadataClient(Map<String, Object> kafkaClientConfigs) {
  Properties props = new Properties();
  props.put(ProducerConfig.CLIENT_ID_CONFIG, "topicMetadataClient");
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ORG_APACHE_KAFKA_COMMON_SERIALIZATION_STRING_SERIALIZER);
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ORG_APACHE_KAFKA_COMMON_SERIALIZATION_STRING_SERIALIZER);

  // Check if user has configured 'max.block.ms' option, otherwise wait for 60 seconds to fetch metadata
  if (kafkaClientConfigs != null && kafkaClientConfigs.containsKey(STREAMS_RPC_TIMEOUT_MS)) {
    props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, kafkaClientConfigs.get(STREAMS_RPC_TIMEOUT_MS));
  } else {
    props.put(STREAMS_RPC_TIMEOUT_MS, 60000);
  }
  return new KafkaProducer<>(props);
}
 
Example 18
@Bean
public ProducerFactory<String, MailContext> mailContextProducerFactory() {
    Map<String, Object> configProps = new HashMap<>();
    configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, environment.getProperty("kafka.bootstrap.address"));
    configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
    configProps.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 5000);
    return new DefaultKafkaProducerFactory<>(configProps);
}
 
Example 19
Source Project: javabase   Source File: KafkaProducerTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * 构造KafkaProducer
 *
 * @return KafkaProducer
 */
private KafkaProducer<Integer, String> getProducer() {
    Properties properties = new Properties();
    //bootstrap.servers
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "123.56.118.135:9092");
    //client.id
    properties.put(ProducerConfig.CLIENT_ID_CONFIG, "KafkaProducerTest");
    //key 和 value serializer的类
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    return new KafkaProducer(properties);
}
 
Example 20
Source Project: staccato   Source File: TestProducer.java    License: Apache License 2.0 5 votes vote down vote up
@PostConstruct
public void init() {
    Map<String, Object> props = new HashMap<>();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configProps.getBootstrapServers());
    props.put(ProducerConfig.ACKS_CONFIG, "all");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    SenderOptions<Integer, String> senderOptions = SenderOptions.create(props);

    sender = KafkaSender.create(senderOptions);
}
 
Example 21
Source Project: tutorials   Source File: KafkaProducerConfig.java    License: MIT License 5 votes vote down vote up
@Bean
public ProducerFactory<String, Greeting> greetingProducerFactory() {
    Map<String, Object> configProps = new HashMap<>();
    configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
    configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
    return new DefaultKafkaProducerFactory<>(configProps);
}
 
Example 22
/**
 * Constructor for KafkaTransportProviderAdmin.
 * @param transportProviderName transport provider name
 * @param props TransportProviderAdmin configuration properties, e.g. ZooKeeper connection string, bootstrap.servers.
 */
public KafkaTransportProviderAdmin(String transportProviderName, Properties props) {
  _transportProviderProperties = props;
  VerifiableProperties transportProviderProperties = new VerifiableProperties(_transportProviderProperties);

  // ZK connect string and bootstrap servers configs might not exist for connectors that manage their own destinations
  _zkAddress = Optional.ofNullable(_transportProviderProperties.getProperty(ZK_CONNECT_STRING_CONFIG))
      .filter(v -> !v.isEmpty());

  _zkUtils = _zkAddress.map(address -> new ZkUtils(new ZkClient(address), new ZkConnection(address), false));

  //Load default producer bootstrap server from config if available
  _brokersConfig =
      Optional.ofNullable(_transportProviderProperties.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));

  _numProducersPerConnector =
      transportProviderProperties.getInt(CONFIG_NUM_PRODUCERS_PER_CONNECTOR, DEFAULT_PRODUCERS_PER_CONNECTOR);

  _defaultNumProducersPerTask = transportProviderProperties.getInt(CONFIG_PRODUCERS_PER_TASK, 1);
  org.apache.commons.lang3.Validate.isTrue(_defaultNumProducersPerTask > 0 && _defaultNumProducersPerTask <= _numProducersPerConnector,
      "Invalid value for " + CONFIG_PRODUCERS_PER_TASK);

  String metricsPrefix = transportProviderProperties.getString(CONFIG_METRICS_NAMES_PREFIX, null);
  if (metricsPrefix != null && !metricsPrefix.endsWith(".")) {
    _transportProviderMetricsNamesPrefix = metricsPrefix + ".";
  } else {
    _transportProviderMetricsNamesPrefix = metricsPrefix;
  }

  _topicProperties = transportProviderProperties.getDomainProperties(DOMAIN_TOPIC);
}
 
Example 23
Source Project: conductor   Source File: TestKafkaProducerManager.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testRequestTimeoutSetFromDefault() {

	KafkaProducerManager manager = new KafkaProducerManager(new SystemPropertiesConfiguration());
	KafkaPublishTask.Input input = getInput();
	Properties props = manager.getProducerProperties(input);
	Assert.assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "100");

}
 
Example 24
Source Project: nifi   Source File: PublishKafkaRecord_0_10.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected PropertyDescriptor getSupportedDynamicPropertyDescriptor(final String propertyDescriptorName) {
    return new PropertyDescriptor.Builder()
        .description("Specifies the value for '" + propertyDescriptorName + "' Kafka Configuration.")
        .name(propertyDescriptorName)
        .addValidator(new KafkaProcessorUtils.KafkaConfigValidator(ProducerConfig.class))
        .dynamic(true)
        .build();
}
 
Example 25
@Test void canOverridesProperty_bootstrapServers() {
  TestPropertyValues.of(
      "zipkin.storage.type:kafka",
      "zipkin.storage.kafka.bootstrap-servers:host1:19092"
  ).applyTo(context);
  Access.registerKafka(context);
  context.refresh();

  assertThat(context.getBean(KafkaStorage.class)
      .producerConfig.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))
      .isEqualTo("host1:19092");
}
 
Example 26
@Test void canOverridesProperty_producerConfigs() {
  TestPropertyValues.of(
      "zipkin.storage.type:kafka",
      "zipkin.storage.kafka.span-partitioning.overrides.acks:1"
  ).applyTo(context);
  Access.registerKafka(context);
  context.refresh();

  assertThat(context.getBean(KafkaStorage.class)
      .producerConfig.get(ProducerConfig.ACKS_CONFIG))
      .isEqualTo("1");
}
 
Example 27
Source Project: javabase   Source File: KafkaClientUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * 构造KafkaProducer
 * @return
 */
private  KafkaProducer<Integer, String> getProducer() {
    Properties properties = new Properties();
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
    //key 和 value serializer的类
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    return new KafkaProducer(properties);
}
 
Example 28
Source Project: camel-quarkus   Source File: CamelKafkaSupport.java    License: Apache License 2.0 5 votes vote down vote up
public static Producer<Integer, String> createProducer() {
    Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, System.getProperty("camel.component.kafka.brokers"));
    props.put(ProducerConfig.CLIENT_ID_CONFIG, "test-consumer");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

    return new KafkaProducer<>(props);
}
 
Example 29
Source Project: zipkin-reporter-java   Source File: ITKafkaSender.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void checkFalseWhenKafkaIsDown() throws Exception {
  broker.stop();

  // Make a new tracer that fails faster than 60 seconds
  sender.close();
  Map<String, String> overrides = new LinkedHashMap<>();
  overrides.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "100");
  sender = sender.toBuilder().overrides(overrides).build();

  CheckResult check = sender.check();
  assertThat(check.ok()).isFalse();
  assertThat(check.error()).isInstanceOf(TimeoutException.class);
}
 
Example 30
Source Project: Flink-CEPplus   Source File: FlinkKafkaProducer011.java    License: Apache License 2.0 5 votes vote down vote up
private static Properties getPropertiesFromBrokerList(String brokerList) {
	String[] elements = brokerList.split(",");

	// validate the broker addresses
	for (String broker: elements) {
		NetUtils.getCorrectHostnamePort(broker);
	}

	Properties props = new Properties();
	props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
	return props;
}