Java Code Examples for org.apache.kafka.clients.producer.ProducerRecord

The following examples show how to use org.apache.kafka.clients.producer.ProducerRecord. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: redpipe   Source File: MetricsVerticle.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void start() throws Exception {
  systemMBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);

  // A random identifier
  String pid = UUID.randomUUID().toString();

  // Get the kafka producer config
  JsonObject config = config();

  // Create the producer
  producer = KafkaWriteStream.create(vertx.getDelegate(), config.getMap(), String.class, JsonObject.class);

  // Publish the metircs in Kafka
  vertx.setPeriodic(1000, id -> {
    JsonObject metrics = new JsonObject();
    metrics.put("CPU", systemMBean.getProcessCpuLoad());
    metrics.put("Mem", systemMBean.getTotalPhysicalMemorySize() - systemMBean.getFreePhysicalMemorySize());
    producer.write(new ProducerRecord<>("the_topic", new JsonObject().put(pid, metrics)));
  });
}
 
Example 2
Source Project: SkaETL   Source File: ESErrorRetryWriter.java    License: Apache License 2.0 6 votes vote down vote up
public void sendToErrorTopic(String applicationId, ErrorData errorData) {
    JsonNode value = JSONUtils.getInstance().parse(errorData.message);
    String type = value.has("type") ?  value.get("type").asText() : "unknown";
    Metrics.counter("skaetl_nb_produce_message_kafka_count",
            Lists.newArrayList(
                    Tag.of("processConsumerName", applicationId),
                    Tag.of("topic", kafkaConfiguration.getErrorTopic()),
                    Tag.of("type", type)
            )
    ).increment();
    Metrics.counter("skaetl_nb_produce_error_kafka_count",
            Lists.newArrayList(
                    Tag.of("processConsumerName", applicationId),
                    Tag.of("type", type),
                    Tag.of("reason", errorData.getErrorReason())
            )
    ).increment();
    errorProducer.send(new ProducerRecord<>(kafkaConfiguration.getErrorTopic(), errorData));
}
 
Example 3
Source Project: java-specialagent   Source File: KafkaTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void clients(final MockTracer tracer) throws Exception {
  try (final Producer<Integer,String> producer = createProducer()) {
    // Send 1
    producer.send(new ProducerRecord<>("messages", 1, "test"));

    // Send 2
    producer.send(new ProducerRecord<>("messages", 1, "test"));

    final CountDownLatch latch = new CountDownLatch(2);
    createConsumer(latch, 1, tracer);
  }

  final List<MockSpan> mockSpans = tracer.finishedSpans();
  assertEquals(4, mockSpans.size());
  assertNull(tracer.activeSpan());
}
 
Example 4
Source Project: ranger   Source File: KafkaRangerAuthorizerGSSTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testUnauthorizedWrite() throws Exception {
    // Create the Producer
    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", "localhost:" + port);
    producerProps.put("acks", "all");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    producerProps.put("sasl.mechanism", "GSSAPI");
    producerProps.put("sasl.kerberos.service.name", "kafka");

    final Producer<String, String> producer = new KafkaProducer<>(producerProps);

    // Send a message
    try {
        Future<RecordMetadata> record =
            producer.send(new ProducerRecord<String, String>("dev", "somekey", "somevalue"));
        producer.flush();
        record.get();
    } catch (Exception ex) {
        Assert.assertTrue(ex.getMessage().contains("Not authorized to access topics"));
    }

    producer.close();
}
 
Example 5
Source Project: smallrye-reactive-messaging   Source File: KafkaUsage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Use the supplied function to asynchronously produce messages and write them to the cluster.
 *
 * @param producerName the name of the producer; may not be null
 * @param messageCount the number of messages to produce; must be positive
 * @param keySerializer the serializer for the keys; may not be null
 * @param valueSerializer the serializer for the values; may not be null
 * @param completionCallback the function to be called when the producer is completed; may be null
 * @param messageSupplier the function to produce messages; may not be null
 */
public <K, V> void produce(String producerName, int messageCount,
        Serializer<K> keySerializer, Serializer<V> valueSerializer,
        Runnable completionCallback,
        Supplier<ProducerRecord<K, V>> messageSupplier) {
    Properties props = getProducerProperties(producerName);
    Thread t = new Thread(() -> {
        LOGGER.infof("Starting producer %s to write %s messages", producerName, messageCount);
        try (KafkaProducer<K, V> producer = new KafkaProducer<>(props, keySerializer, valueSerializer)) {
            for (int i = 0; i != messageCount; ++i) {
                ProducerRecord<K, V> record = messageSupplier.get();
                producer.send(record);
                producer.flush();
                LOGGER.infof("Producer %s: sent message %s", producerName, record);
            }
        } finally {
            if (completionCallback != null) {
                completionCallback.run();
            }
            LOGGER.infof("Stopping producer %s", producerName);
        }
    });
    t.setName(producerName + "-thread");
    t.start();
}
 
Example 6
Source Project: ad   Source File: MyProducer.java    License: Apache License 2.0 6 votes vote down vote up
/** 适中*/
private static void sendMessageAsync() {
    ProducerRecord<String, String> record = new ProducerRecord<>(
            "kafka-topic",
            "key",
            "async"
    );
    producer.send(record, ((metadata, e) -> {
        log.info("coming into callback");
        if (e != null) {
            log.error("kafka exception: {}", e.getMessage());
            return;
        }
        log.debug("topic: {}", metadata.topic());
        log.debug("partition: {}", metadata.partition());
        log.debug("offset: {}", metadata.offset());
    }));
}
 
Example 7
Source Project: mercury   Source File: EventProducer.java    License: Apache License 2.0 6 votes vote down vote up
private void initializeConsumer() {
    startProducer();
    try {
        String origin = Platform.getInstance().getOrigin();
        String uuid = Utility.getInstance().getUuid();
        EventEnvelope direct = new EventEnvelope().setTo(ServiceDiscovery.SERVICE_REGISTRY).setHeader(TYPE, INIT);
        producer.send(new ProducerRecord<>(origin, uuid, direct.toBytes())).get(20, TimeUnit.SECONDS);
        totalEvents++;
        lastActive = System.currentTimeMillis();
        log.info("Tell event consumer to start with {}", origin);
    } catch (Exception e) {
        /*
         * Unrecoverable error. Shutdown and let infrastructure to restart this app instance.
         */
        log.error("Unable to initialize consumer - {}", e.getMessage());
        System.exit(20);
    }
}
 
Example 8
Source Project: jMetalSP   Source File: CounterProviderString.java    License: MIT License 6 votes vote down vote up
public void run(){
    int count = 0;
    long startTime = System.currentTimeMillis();
    while (true){
        String auxCount = count+"";
        Future<RecordMetadata> send =
                producer.send(new ProducerRecord<String, String>
                        (topic,auxCount,auxCount), new ProducerCallBack(startTime, count, "Count ->" + count) );

        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        count++;
    }
}
 
Example 9
Source Project: DBus   Source File: BoltCommandHandlerHelper.java    License: Apache License 2.0 6 votes vote down vote up
public static void writeEmailMessage(String subject, String contents, String dataSchema, Producer<String, String> producer) {
    try {
        // 发邮件
        ControlMessage gm = new ControlMessage(System.currentTimeMillis(), ControlType.COMMON_EMAIL_MESSAGE.toString(), BoltCommandHandlerHelper.class.getName());

        gm.addPayload("subject", subject);
        gm.addPayload("contents", contents);
        gm.addPayload("datasource_schema", Utils.getDatasource().getDsName() + "/" + dataSchema);

        String topic = PropertiesHolder.getProperties(Constants.Properties.CONFIGURE, Constants.ConfigureKey.GLOBAL_EVENT_TOPIC);
        ProducerRecord<String, String> record = new ProducerRecord<>(topic, gm.getType(), gm.toJSONString());
        producer.send(record, (metadata, exception) -> {
            if (exception != null) {
                logger.error("Send global event error.{}", exception.getMessage());
            }
        });
    } catch (Exception e) {
        logger.error("send email error. schema:{}, subject:{}, content:{}", dataSchema, subject, contents, e);
    } finally {
        if (producer != null) producer.close();
    }
}
 
Example 10
Source Project: singer   Source File: TestKafkaWriter.java    License: Apache License 2.0 6 votes vote down vote up
public KafkaWritingTaskResult createResult(KafkaWritingTask worker, List<PartitionInfo> sortedPartitions){
    long start = System.currentTimeMillis();
    List<ProducerRecord<byte[], byte[]>> messages = worker.getMessages();
    List<RecordMetadata> recordMetadataList = new ArrayList<>();
    int bytes = 0;
    for(int i = 0; i < messages.size(); i++){
      int keySize = messages.get(i).key().length;
      int valSize = messages.get(i).value().length;
      bytes += keySize + valSize;
      int partition = messages.get(i).partition();
      String topic = sortedPartitions.get(partition).topic();
      TopicPartition topicPartition = new TopicPartition(topic,partition);
      recordMetadataList.add(new RecordMetadata(topicPartition, 0,0,0,0L, keySize, valSize));
    }
    long end = System.currentTimeMillis();
    KafkaWritingTaskResult  kafkaWritingTaskResult = new KafkaWritingTaskResult(true, bytes, (int)(end - start));
    kafkaWritingTaskResult.setPartition(messages.get(0).partition());
    kafkaWritingTaskResult.setRecordMetadataList(recordMetadataList);
    return kafkaWritingTaskResult;
}
 
Example 11
Source Project: atlas   Source File: KafkaNotificationMockTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void shouldThrowExceptionIfProducerFails() throws NotificationException,
        ExecutionException, InterruptedException {
    Properties configProperties = mock(Properties.class);
    KafkaNotification kafkaNotification = new KafkaNotification(configProperties);

    Producer producer = mock(Producer.class);
    String topicName = kafkaNotification.getProducerTopicName(NotificationInterface.NotificationType.HOOK);
    String message = "This is a test message";
    Future returnValue = mock(Future.class);
    when(returnValue.get()).thenThrow(new RuntimeException("Simulating exception"));
    ProducerRecord expectedRecord = new ProducerRecord(topicName, message);
    when(producer.send(expectedRecord)).thenReturn(returnValue);

    try {
        kafkaNotification.sendInternalToProducer(producer,
            NotificationInterface.NotificationType.HOOK, Arrays.asList(new String[]{message}));
        fail("Should have thrown NotificationException");
    } catch (NotificationException e) {
        assertEquals(e.getFailedMessages().size(), 1);
        assertEquals(e.getFailedMessages().get(0), "This is a test message");
    }
}
 
Example 12
Source Project: incubator-atlas   Source File: KafkaNotificationMockTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void shouldSendMessagesSuccessfully() throws NotificationException,
        ExecutionException, InterruptedException {
    Properties configProperties = mock(Properties.class);
    KafkaNotification kafkaNotification = new KafkaNotification(configProperties);

    Producer producer = mock(Producer.class);
    String topicName = kafkaNotification.getTopicName(NotificationInterface.NotificationType.HOOK);
    String message = "This is a test message";
    Future returnValue = mock(Future.class);
    when(returnValue.get()).thenReturn(new RecordMetadata(new TopicPartition(topicName, 0), 0, 0));
    ProducerRecord expectedRecord = new ProducerRecord(topicName, message);
    when(producer.send(expectedRecord)).thenReturn(returnValue);

    kafkaNotification.sendInternalToProducer(producer,
            NotificationInterface.NotificationType.HOOK, new String[]{message});

    verify(producer).send(expectedRecord);
}
 
Example 13
@Override
public ProducerRecord<K, V> record() {
  if (headers.isEmpty()) {
    return new ProducerRecord<>(topic, partition, timestamp, key, value);
  } else {
    return new ProducerRecord<>(
      topic,
      partition,
      timestamp,
      key,
      value,
      headers.stream()
        .map(header -> new RecordHeader(header.key(), header.value().getBytes()))
        .collect(Collectors.toList()));
  }
}
 
Example 14
Source Project: java-kafka-client   Source File: TracingKafkaProducer.java    License: Apache License 2.0 6 votes vote down vote up
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback,
    SpanContext parent) {
  /*
  // Create wrappedRecord because headers can be read only in record (if record is sent second time)
  ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(),
      record.partition(),
      record.timestamp(),
      record.key(),
      record.value(),
      record.headers());
  */

  Span span = TracingKafkaUtils
      .buildAndInjectSpan(record, tracer, producerSpanNameProvider, parent, spanDecorators);
  try (Scope ignored = tracer.activateSpan(span)) {
    Callback wrappedCallback = new TracingCallback(callback, span, tracer, spanDecorators);
    return producer.send(record, wrappedCallback);
  }
}
 
Example 15
Source Project: atlas   Source File: KafkaNotificationMockTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void shouldSendMessagesSuccessfully() throws NotificationException,
        ExecutionException, InterruptedException {
    Properties configProperties = mock(Properties.class);
    KafkaNotification kafkaNotification = new KafkaNotification(configProperties);

    Producer producer = mock(Producer.class);
    String topicName = kafkaNotification.getProducerTopicName(NotificationInterface.NotificationType.HOOK);
    String message = "This is a test message";
    Future returnValue = mock(Future.class);
    TopicPartition topicPartition = new TopicPartition(topicName, 0);
    when(returnValue.get()).thenReturn(new RecordMetadata(topicPartition, 0, 0, 0, Long.valueOf(0), 0, 0));
    ProducerRecord expectedRecord = new ProducerRecord(topicName, message);
    when(producer.send(expectedRecord)).thenReturn(returnValue);

    kafkaNotification.sendInternalToProducer(producer,
            NotificationInterface.NotificationType.HOOK, Arrays.asList(new String[]{message}));

    verify(producer).send(expectedRecord);
}
 
Example 16
Source Project: java-course-ee   Source File: KafkaMessageWriter.java    License: MIT License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Properties props = new Properties();
    props.put("bootstrap.servers", "localhost:9092");
    props.put("acks", "all");
    props.put("delivery.timeout.ms", 30000);
    props.put("request.timeout.ms", 20000);
    props.put("batch.size", 16384);
    props.put("linger.ms", 1);
    props.put("buffer.memory", 33554432);
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");


    Producer<String, String> producer = new KafkaProducer<>(props);
    for (int i = 0; i < 100; i++) {
        producer.send(new ProducerRecord<String, String>("test.topic", Integer.toString(i), Integer.toString(i)));
    }

    producer.close();
}
 
Example 17
Source Project: smallrye-reactive-messaging   Source File: KafkaSink.java    License: Apache License 2.0 6 votes vote down vote up
private void handleWriteResult(AsyncResult<?> ar, Message<?> message, ProducerRecord<?, ?> record,
        UniEmitter<? super Void> emitter) {
    String actualTopic = record.topic();
    if (ar.succeeded()) {
        log.successfullyToTopic(message, actualTopic);
        message.ack().whenComplete((x, f) -> {
            if (f != null) {
                emitter.fail(f);
            } else {
                emitter.complete(null);
            }
        });
    } else {
        // Fail, there will be retry.
        emitter.fail(ar.cause());
    }
}
 
Example 18
@Test
public void testFromKafkaToAppWithMetadata() {
    KafkaUsage usage = new KafkaUsage();
    deploy(getKafkaSinkConfigForMyAppWithKafkaMetadata(), MyAppWithKafkaMetadata.class);

    AtomicInteger value = new AtomicInteger();
    usage.produceIntegers(100, null,
            () -> new ProducerRecord<>("metadata-topic", "a-key", value.getAndIncrement()));

    MyAppWithKafkaMetadata bean = container.getBeanManager().createInstance().select(MyAppWithKafkaMetadata.class).get();
    await().atMost(2, TimeUnit.MINUTES).until(() -> bean.list().size() >= 10);
    assertThat(bean.list()).contains(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);

    assertThat(bean.getMetadata()).isNotNull();
    assertThat(bean.getMetadata()).contains(bean.getOriginal());
    AtomicBoolean foundMetadata = new AtomicBoolean(false);
    for (Object object : bean.getMetadata()) {
        if (object instanceof IncomingKafkaRecordMetadata) {
            IncomingKafkaRecordMetadata incomingMetadata = (IncomingKafkaRecordMetadata) object;
            assertThat(incomingMetadata.getKey()).isEqualTo("a-key");
            assertThat(incomingMetadata.getTopic()).isEqualTo("metadata-topic");
            foundMetadata.compareAndSet(false, true);
        }
    }
    assertThat(foundMetadata.get()).isTrue();
}
 
Example 19
Source Project: metron   Source File: KafkaLoader.java    License: Apache License 2.0 6 votes vote down vote up
public void start() {
  Map<String, Object> producerConfig = new HashMap<>();
  producerConfig.put("bootstrap.servers", brokerUrl);
  producerConfig.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
  producerConfig.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
  kafkaProducer = new KafkaProducer<>(producerConfig);
  try {
    while (iterations == -1 || iterations-- > 0) {
      BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(samplePath), StandardCharsets.UTF_8));
      String line;
      while((line = reader.readLine()) != null) {
        kafkaProducer.send(new ProducerRecord<String, String>(topic, line));
        Thread.sleep(delay);
      }
      reader.close();
    }
  } catch (Exception e) {
    e.printStackTrace();
  }
}
 
Example 20
Source Project: ranger   Source File: KafkaRangerAuthorizerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAuthorizedWrite() throws Exception {
    // Create the Producer
    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", "localhost:" + port);
    producerProps.put("acks", "all");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
    producerProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS");
    producerProps.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, serviceKeystorePath);
    producerProps.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "sspass");
    producerProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "skpass");
    producerProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, truststorePath);
    producerProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "security");
    
    final Producer<String, String> producer = new KafkaProducer<>(producerProps);
    // Send a message
    Future<RecordMetadata> record = 
        producer.send(new ProducerRecord<String, String>("dev", "somekey", "somevalue"));
    producer.flush();
    record.get();

    producer.close();
}
 
Example 21
/**
 * Read counts from output to map ignoring start and end dates
 * If existing word is incremented, it can appear twice in output and is replaced in map
 *
 * @return Map of Word and counts
 */
private Map<String, Long> getOutputList() {
    final Map<String, Long> output = new HashMap<>();
    ProducerRecord<Bytes, KafkaStreamsWordCountApplication.WordCount> outputRow;
    while ((outputRow = readOutput()) != null) {
        output.put(outputRow.value().getWord(), outputRow.value().getCount());
    }
    return output;
}
 
Example 22
@Test
public void healthIndicatorDownTest() throws Exception {
	try (ConfigurableApplicationContext context = singleStream("ApplicationHealthTest-xyzabc")) {
		receive(context,
				Lists.newArrayList(new ProducerRecord<>("in", "{\"id\":\"123\"}"),
						new ProducerRecord<>("in", "{\"id\":\"124\"}")),
				Status.DOWN, "out");
	}
}
 
Example 23
@Test
public void testReadingMessageWithoutTypeShouldReturnEmptyMessage() {
    EventMessage<?> event = eventMessage();
    ProducerRecord<String, byte[]> msg = testSubject.createKafkaMessage(event, SOME_TOPIC);
    msg.headers().remove(MESSAGE_TYPE);

    assertThat(testSubject.readKafkaMessage(toReceiverRecord(msg)).isPresent()).isFalse();
}
 
Example 24
Source Project: SO   Source File: AProducerHandler.java    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * send a record to producer.<BR/>
 *
 * @param key key
 * @param value value
 */
@Override
public Future<RecordMetadata> send(K key, V value) {
    ProducerRecord<K, V> producerRecord = new ProducerRecord<>(getTopic(), key, value);
    if(producer == null) {
        producer = createProducer();
    }
    return producer.send(producerRecord);
}
 
Example 25
@Override
public void run() {
    int messageCounter = 1;
    String producerName = Thread.currentThread().getName();

    logger.trace("Starting Producer thread" + producerName);
    for (JsonNode data : dataList) {
        producer.send(new ProducerRecord<>(topicName, messageKey, data));
        messageCounter++;
    }
    logger.trace("Finished Producer thread" + producerName + " sent " + messageCounter + " messages");
}
 
Example 26
Source Project: brave   Source File: ITKafkaTracing.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void nextSpan_makes_child() {
  producer = createTracingProducer();
  consumer = createTracingConsumer();

  send(new ProducerRecord<>(testName.getMethodName(), TEST_KEY, TEST_VALUE));

  ConsumerRecords<String, String> records = consumer.poll(10000);

  assertThat(records).hasSize(1);
  MutableSpan producerSpan = takeProducerSpan();
  MutableSpan consumerSpan = takeConsumerSpan();

  for (ConsumerRecord<String, String> record : records) {
    brave.Span processor = kafkaTracing.nextSpan(record);

    assertThat(consumerSpan.tags())
      .containsEntry(KAFKA_TOPIC_TAG, record.topic());

    assertThat(processor.context().traceIdString()).isEqualTo(consumerSpan.traceId());
    assertThat(processor.context().parentIdString()).isEqualTo(consumerSpan.id());

    processor.start().name("processor").finish();

    // The processor doesn't taint the consumer span which has already finished
    MutableSpan processorSpan = testSpanHandler.takeLocalSpan();
    assertThat(processorSpan.id())
      .isNotEqualTo(consumerSpan.id());
  }
}
 
Example 27
Source Project: kafka-tutorials   Source File: WindowFinalResultTest.java    License: Apache License 2.0 5 votes vote down vote up
private ProducerRecord<Windowed<String>, Long> readNext() {
    return topologyTestDriver.readOutput(
            this.outputTopic,
            this.keyResultSerde.deserializer(),
            Serdes.Long().deserializer()
    );
}
 
Example 28
Source Project: pulsar   Source File: KafkaApiTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSimpleProducer() throws Exception {
    String topic = "testSimpleProducer";

    @Cleanup
    PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(getPlainTextServiceUrl()).build();
    org.apache.pulsar.client.api.Consumer<byte[]> pulsarConsumer = pulsarClient.newConsumer().topic(topic)
            .subscriptionName("my-subscription")
            .subscribe();

    Properties props = new Properties();
    props.put("bootstrap.servers", getPlainTextServiceUrl());

    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());

    Producer<Integer, String> producer = new KafkaProducer<>(props);

    for (int i = 0; i < 10; i++) {
        producer.send(new ProducerRecord<Integer, String>(topic, i, "hello-" + i));
    }

    producer.flush();
    producer.close();

    for (int i = 0; i < 10; i++) {
        Message<byte[]> msg = pulsarConsumer.receive(1, TimeUnit.SECONDS);
        assertEquals(new String(msg.getData()), "hello-" + i);
        pulsarConsumer.acknowledge(msg);
    }
}
 
Example 29
@Advice.OnMethodExit(onThrowable = Throwable.class, suppress = Throwable.class)
public static void wrapRecordList(@Nullable @Advice.Return(readOnly = false) List<ConsumerRecord> list) {
    if (tracer == null || !tracer.isRunning() || tracer.currentTransaction() != null) {
        return;
    }

    //noinspection ConstantConditions,rawtypes
    KafkaInstrumentationHeadersHelper<ConsumerRecord, ProducerRecord> kafkaInstrumentationHelper =
        kafkaInstrHeadersHelperManager.getForClassLoaderOfClass(KafkaProducer.class);
    if (list != null && kafkaInstrumentationHelper != null) {
        list = kafkaInstrumentationHelper.wrapConsumerRecordList(list);
    }
}
 
Example 30
Source Project: kafka-encryption   Source File: SampleProducer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    // tag::produce[]

    Encryptor encryptor = new DefaultEncryptor(keyProvider, cryptoAlgorithm);

    // Wrap base LongSerializer and StringSerializer with encrypted wrappers
    CryptoSerializerPairFactory cryptoSerializerPairFactory = new CryptoSerializerPairFactory(encryptor, keyReferenceExtractor);
    SerializerPair<Long, String> serializerPair = cryptoSerializerPairFactory.build(new LongSerializer(), new StringSerializer());

    Properties producerProperties = new Properties();
    producerProperties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    try (KafkaProducer<Long, String> producer =
                 new KafkaProducer<>(producerProperties, serializerPair.getKeySerializer(), serializerPair.getValueSerializer())) {

        for (long i = 0L; i < Long.MAX_VALUE; i++) {
            producer.send(new ProducerRecord<>("sampletopic", i, "test number " + i));
            try {
                Thread.sleep(1000L);
            }
            catch (InterruptedException e) {
                return;
            }
        }
    }
    // end::produce[]
}