Java Code Examples for org.apache.kafka.clients.producer.ProducerRecord#topic()

The following examples show how to use org.apache.kafka.clients.producer.ProducerRecord#topic() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaSink.java    From smallrye-reactive-messaging with Apache License 2.0 6 votes vote down vote up
private void handleWriteResult(AsyncResult<?> ar, Message<?> message, ProducerRecord<?, ?> record,
        UniEmitter<? super Void> emitter) {
    String actualTopic = record.topic();
    if (ar.succeeded()) {
        log.successfullyToTopic(message, actualTopic);
        message.ack().whenComplete((x, f) -> {
            if (f != null) {
                emitter.fail(f);
            } else {
                emitter.complete(null);
            }
        });
    } else {
        // Fail, there will be retry.
        emitter.fail(ar.cause());
    }
}
 
Example 2
Source File: ProducerAspect.java    From glowroot with Apache License 2.0 6 votes vote down vote up
@OnBefore
public static @Nullable AsyncTraceEntry onBefore(ThreadContext context,
        @BindParameter @Nullable ProducerRecord<?, ?> record,
        @BindParameter ParameterHolder<Callback> callbackHolder) {
    if (record == null) {
        return null;
    }
    String topic = record.topic();
    if (topic == null) {
        topic = "";
    }
    AsyncTraceEntry asyncTraceEntry = context.startAsyncServiceCallEntry("Kafka", topic,
            MessageSupplier.create("kafka send: {}", topic), timerName);
    Callback callback = callbackHolder.get();
    if (callback == null) {
        callbackHolder.set(new CallbackWrapperForNullDelegate(asyncTraceEntry));
    } else {
        callbackHolder.set(new CallbackWrapper(callback, asyncTraceEntry,
                context.createAuxThreadContext()));
    }
    return asyncTraceEntry;
}
 
Example 3
Source File: ProducerRecordCoder.java    From beam with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public Object structuralValue(ProducerRecord<K, V> value) {
  if (consistentWithEquals()) {
    return value;
  } else {
    if (!ConsumerSpEL.hasHeaders()) {
      return new ProducerRecord<>(
          value.topic(), value.partition(), value.timestamp(), value.key(), value.value());
    } else {
      return new ProducerRecord<>(
          value.topic(),
          value.partition(),
          value.timestamp(),
          value.key(),
          value.value(),
          value.headers());
    }
  }
}
 
Example 4
Source File: KafkaIT.java    From uavstack with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public void syncSendStart(String appid, String methodName, Object[] args) {
    getSendHost(args);
    ProducerRecord<String, String> record = (ProducerRecord<String, String>) args[1];
    String kafkaUrl = sendHost + "/" + record.topic();
    Map<String, Object> params = new HashMap<String, Object>();
    params.put(CaptureConstants.INFO_CLIENT_REQUEST_URL, kafkaUrl);
    params.put(CaptureConstants.INFO_CLIENT_REQUEST_ACTION, "Producer." + methodName);
    params.put(CaptureConstants.INFO_CLIENT_APPID, appid);
    params.put(CaptureConstants.INFO_CLIENT_TYPE, "kafka.client");
    if (logger.isDebugable()) {
        logger.debug("KAFKA " + methodName + " START: " + kafkaUrl, null);
    }
    UAVServer.instance().runMonitorCaptureOnServerCapPoint(CaptureConstants.CAPPOINT_APP_CLIENT,
            Monitor.CapturePhase.PRECAP, params);

    // register adapter
    UAVServer.instance().runSupporter("com.creditease.uav.apm.supporters.InvokeChainSupporter", "registerAdapter",
            KafkaProducerAdapter.class);

    ivcContextParams = (Map<String, Object>) UAVServer.instance().runSupporter(
            "com.creditease.uav.apm.supporters.InvokeChainSupporter", "runCap",
            InvokeChainConstants.CHAIN_APP_CLIENT, InvokeChainConstants.CapturePhase.PRECAP, params,
            KafkaProducerAdapter.class, args);
}
 
Example 5
Source File: KafkaIT.java    From uavstack with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Object asyncSend(String appid, String methodName, Object[] args) {
    ProducerRecord<String, String> record = (ProducerRecord<String, String>) args[1];
    String kafkaUrl = getSendHost(args) + "/" + record.topic();
    Map<String, Object> params = new HashMap<String, Object>();
    params.put(CaptureConstants.INFO_CLIENT_REQUEST_URL, kafkaUrl);
    params.put(CaptureConstants.INFO_CLIENT_REQUEST_ACTION, "Producer." + methodName);
    params.put(CaptureConstants.INFO_CLIENT_APPID, appid);
    params.put(CaptureConstants.INFO_CLIENT_TYPE, "kafka.client");
    if (logger.isDebugable()) {
        logger.debug("KAFKA DOSEND START: " + kafkaUrl, null);
    }

    ccMap = UAVServer.instance().runMonitorAsyncCaptureOnServerCapPoint(CaptureConstants.CAPPOINT_APP_CLIENT,
            Monitor.CapturePhase.PRECAP, params, null);
    ivcContextParams = (Map<String, Object>) UAVServer.instance().runSupporter(
            "com.creditease.uav.apm.supporters.InvokeChainSupporter", "runCap",
            InvokeChainConstants.CHAIN_APP_CLIENT, InvokeChainConstants.CapturePhase.PRECAP, params,
            KafkaProducerAdapter.class, args);
    return JDKProxyInvokeUtil.newProxyInstance(Callback.class.getClassLoader(), new Class<?>[] { Callback.class },
            new JDKProxyInvokeHandler<Callback>((Callback) args[2], new KafkaCallbackProxyInvokeProcessor()));
}
 
Example 6
Source File: KafkaWriter.java    From beam with Apache License 2.0 6 votes vote down vote up
@ProcessElement
@SuppressWarnings("FutureReturnValueIgnored")
public void processElement(ProcessContext ctx) throws Exception {
  checkForFailures();

  ProducerRecord<K, V> record = ctx.element();
  Long timestampMillis =
      record.timestamp() != null
          ? record.timestamp()
          : (spec.getPublishTimestampFunction() != null
              ? spec.getPublishTimestampFunction()
                  .getTimestamp(record, ctx.timestamp())
                  .getMillis()
              : null);
  String topicName = record.topic() != null ? record.topic() : spec.getTopic();

  producer.send(
      new ProducerRecord<>(topicName, null, timestampMillis, record.key(), record.value()),
      new SendCallback());

  elementsWritten.inc();
}
 
Example 7
Source File: KafkaDistributedLogProducer.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord, Callback callback) {
    com.twitter.util.Future<DLSN> dlsnFuture;
    if (null == producerRecord.key()) {
        dlsnFuture = getUnpartitionedMultiWriter(producerRecord.topic()).write(producerRecord.value());
    } else {
        // TODO: be able to publish to a specific partition
        dlsnFuture = getPartitionedMultiWriter(producerRecord.topic()).write(producerRecord.key(),
                producerRecord.value());
    }
    return new DLFutureRecordMetadata(producerRecord.topic(), dlsnFuture, callback);
}
 
Example 8
Source File: TracingProducerBenchmarks.java    From brave with Apache License 2.0 5 votes vote down vote up
@Override
public Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) {
  TopicPartition tp = new TopicPartition(record.topic(), 0);
  RecordMetadata rm = new RecordMetadata(tp, -1L, -1L, 1L, 2L, 3, 4);
  if (callback != null) callback.onCompletion(rm, null);
  return Futures.immediateFuture(rm);
}
 
Example 9
Source File: ProducerSendInterceptor.java    From pinpoint with Apache License 2.0 5 votes vote down vote up
@Override
public void after(Object target, Object[] args, Object result, Throwable throwable) {
    if (logger.isDebugEnabled()) {
        logger.afterInterceptor(target, args, result, throwable);
    }

    ProducerRecord record = getProducerRecord(args);
    if (record == null) {
        return;
    }

    final Trace trace = traceContext.currentTraceObject();
    if (trace == null) {
        return;
    }

    if (!trace.canSampled()) {
        return;
    }

    try {
        SpanEventRecorder recorder = trace.currentSpanEventRecorder();
        recorder.recordApi(descriptor);

        String remoteAddress = getRemoteAddress(target);
        recorder.recordEndPoint(remoteAddress);
        recorder.recordDestinationId(remoteAddress);

        String topic = record.topic();
        recorder.recordAttribute(KafkaConstants.KAFKA_TOPIC_ANNOTATION_KEY, topic);

        if (throwable != null) {
            recorder.recordException(throwable);
        }
    } finally {
        trace.traceBlockEnd();
    }
}
 
Example 10
Source File: KafkaDistributedLogProducer.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public Future<RecordMetadata> send(ProducerRecord<K, V> producerRecord, Callback callback) {
    com.twitter.util.Future<DLSN> dlsnFuture;
    if (null == producerRecord.key()) {
        dlsnFuture = getUnpartitionedMultiWriter(producerRecord.topic()).write(producerRecord.value());
    } else {
        // TODO: be able to publish to a specific partition
        dlsnFuture = getPartitionedMultiWriter(producerRecord.topic()).write(producerRecord.key(),
                producerRecord.value());
    }
    return new DLFutureRecordMetadata(producerRecord.topic(), dlsnFuture, callback);
}
 
Example 11
Source File: TracingKafkaUtils.java    From java-kafka-client with Apache License 2.0 5 votes vote down vote up
static <K, V> Span buildAndInjectSpan(ProducerRecord<K, V> record, Tracer tracer,
    BiFunction<String, ProducerRecord, String> producerSpanNameProvider,
    SpanContext parent, Collection<SpanDecorator> spanDecorators) {
  String producerOper =
      TO_PREFIX + record.topic(); // <======== It provides better readability in the UI
  Tracer.SpanBuilder spanBuilder = tracer
      .buildSpan(producerSpanNameProvider.apply(producerOper, record))
      .withTag(Tags.SPAN_KIND.getKey(), Tags.SPAN_KIND_PRODUCER);

  SpanContext spanContext = TracingKafkaUtils.extractSpanContext(record.headers(), tracer);

  if (spanContext != null) {
    spanBuilder.asChildOf(spanContext);
  } else if (parent != null) {
    spanBuilder.asChildOf(parent);
  }

  Span span = spanBuilder.start();

  for (SpanDecorator decorator : spanDecorators) {
    decorator.onSend(record, span);
  }

  try {
    TracingKafkaUtils.inject(span.context(), record.headers(), tracer);
  } catch (Exception e) {
    // it can happen if headers are read only (when record is sent second time)
    logger.error("failed to inject span context. sending record second time?", e);
  }

  return span;
}
 
Example 12
Source File: DemoProducerInterceptor.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
/**
 * 在将消息序列化和计算分区之前会调用
 *
 * @param record
 * @return
 */
@Override
public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
    // 拦截器,仅在 value 之前添加前缀
    String modifiedValue = "prefix1-" + record.value();
    return new ProducerRecord<>(record.topic(), record.partition(), record.timestamp(),
            record.key(), modifiedValue, record.headers());
}
 
Example 13
Source File: KafkaInstrumentationHelperImpl.java    From apm-agent-java with Apache License 2.0 5 votes vote down vote up
@Nullable
@Override
public Span onSendStart(ProducerRecord record) {

    String topic = record.topic();
    if (ignoreTopic(topic)) {
        return null;
    }

    final AbstractSpan<?> activeSpan = tracer.getActive();
    if (activeSpan == null) {
        return null;
    }

    Span span = activeSpan.createExitSpan();
    if (span == null) {
        return null;
    }

    span.withType("messaging").withSubtype("kafka").withAction("send");
    span.withName("KafkaProducer#send to ").appendToName(topic);
    span.getContext().getMessage().withQueue(topic);
    span.getContext().getDestination().getService().withType("messaging").withName("kafka")
        .getResource().append("kafka/").append(topic);
    span.activate();
    return span;
}
 
Example 14
Source File: ProducerInterceptorPrefix.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
@Override
    public ProducerRecord<String, String> onSend(
            ProducerRecord<String, String> record) {
        String modifiedValue = "prefix1-" + record.value();
        return new ProducerRecord<>(record.topic(),
                record.partition(), record.timestamp(),
                record.key(), modifiedValue, record.headers());
//        if (record.value().length() < 5) {
//            throw new RuntimeException();
//        }
//        return record;
    }
 
Example 15
Source File: TestKafkaWriter.java    From singer with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteLogMessagesWithCrcPartitioning() throws Exception {
  KafkaMessagePartitioner partitioner = new Crc32ByteArrayPartitioner();
  KafkaProducerConfig config = new KafkaProducerConfig();
  SingerSettings.setSingerConfig(new SingerConfig());
  KafkaProducerManager.injectTestProducer(config, producer);
  // default value for skip noleader partition is false
  KafkaWriter writer = new KafkaWriter(config, partitioner, "topicx", false, Executors.newCachedThreadPool());

  List<PartitionInfo> partitions = ImmutableList.copyOf(Arrays.asList(
      new PartitionInfo("topicx", 1, new Node(2, "broker2", 9092, "us-east-1b"), null, null),
      new PartitionInfo("topicx", 0, new Node(1, "broker1", 9092, "us-east-1a"), null, null),
      new PartitionInfo("topicx", 2, new Node(3, "broker3", 9092, "us-east-1c"), null, null),
      new PartitionInfo("topicx", 6, new Node(2, "broker2", 9092, "us-east-1b"), null, null),
      new PartitionInfo("topicx", 3, new Node(4, "broker4", 9092, "us-east-1a"), null, null),
      new PartitionInfo("topicx", 5, new Node(1, "broker1", 9092, "us-east-1a"), null, null),
      new PartitionInfo("topicx", 7, new Node(3, "broker3", 9092, "us-east-1c"), null, null),
      new PartitionInfo("topicx", 4, new Node(5, "broker5", 9092, "us-east-1b"), null, null),
      new PartitionInfo("topicx", 8, new Node(4, "broker4", 9092, "us-east-1a"), null, null),
      new PartitionInfo("topicx", 9, new Node(5, "broker5", 9092, "us-east-1b"), null, null),
      new PartitionInfo("topicx", 10, new Node(1, "broker1", 9092, "us-east-1a"), null, null)));

  when(producer.partitionsFor("topicx")).thenReturn(partitions);

  // message with same key will be put together in the same bucket (same partition);
  List<String> keys = IntStream.range(0, NUM_KEYS).mapToObj(i->"key"+i).collect(Collectors.toList());
  Map<Integer, List<LogMessage>> msgPartitionMap = new HashMap<>();
  Map<Integer, List<ProducerRecord<byte[], byte[]>>> recordPartitionMap = new HashMap<>();
  Map<Integer, List<RecordMetadata>> metadataPartitionMap = new HashMap<>();
  HashFunction crc32 = Hashing.crc32();
  List<LogMessage> logMessages = new ArrayList<>();
  for(int i = 0; i < NUM_KEYS; i++){
    for(int j = 0; j < NUM_EVENTS / NUM_KEYS; j++){
       LogMessage logMessage = new LogMessage();
       logMessage.setKey(keys.get(i).getBytes());
       logMessage.setMessage(ByteBuffer.allocate(100).put(String.valueOf(i).getBytes()));
       logMessages.add(logMessage);
       int partitionId = Math.abs(crc32.hashBytes(logMessage.getKey()).asInt() % partitions.size());
       ProducerRecord<byte[], byte[]> record = new ProducerRecord<byte[], byte[]>("topicx", partitionId, logMessage.getKey(), logMessage.getMessage());
       RecordMetadata recordMetadata = new RecordMetadata(new TopicPartition(  record.topic(),
          record.partition()), 0, 0, 0, 0L, record.key().length, record.value().length);
       when(producer.send(record)).thenReturn(ConcurrentUtils.constantFuture(recordMetadata));

       if (msgPartitionMap.containsKey(partitionId)){
         msgPartitionMap.get(partitionId).add(logMessage);
         recordPartitionMap.get(partitionId).add(record);
         metadataPartitionMap.get(partitionId).add(recordMetadata);
       } else {
         msgPartitionMap.put(partitionId, new ArrayList<>());
         recordPartitionMap.put(partitionId, new ArrayList<>());
         metadataPartitionMap.put(partitionId, new ArrayList<>());
         msgPartitionMap.get(partitionId).add(logMessage);
         recordPartitionMap.get(partitionId).add(record);
         metadataPartitionMap.get(partitionId).add(recordMetadata);
       }
    }
  }

  List<PartitionInfo> sortedPartitions = new ArrayList<>(partitions);
  Collections.sort(sortedPartitions, new PartitionComparator());

  Map<Integer, Map<Integer, LoggingAuditHeaders>> mapOfHeadersMap = new HashMap<>();
  Map<Integer, List<ProducerRecord<byte[], byte[]>>> messageCollation = writer.messageCollation(partitions, "topicx", logMessages, mapOfHeadersMap);

  for(int partitionId = 0; partitionId < messageCollation.keySet().size(); partitionId++) {
    if (messageCollation.get(partitionId).size() == 0) {
      continue;
    }
    List<ProducerRecord<byte[], byte[]>> writerOutput = messageCollation.get(partitionId);

    // verify the message order is what is expected by calling messageCollation()
    List<ProducerRecord<byte[], byte[]>> expectedRecords = recordPartitionMap.get(partitionId);
    assertEquals(expectedRecords.size(), writerOutput.size());
    for(int j = 0; j < writerOutput.size(); j++){
      assertEquals(expectedRecords.get(j), writerOutput.get(j));
    }

    // verify the content of LogMessage and the content of ProducerRecord match
    List<LogMessage> originalData = msgPartitionMap.get(partitionId);
    assertEquals(originalData.size(), writerOutput.size());
    for (int j = 0; j < writerOutput.size(); j++) {
      assertTrue(Arrays.equals(originalData.get(j).getKey(), writerOutput.get(j).key()));
      assertTrue(Arrays.equals(originalData.get(j).getMessage(), writerOutput.get(j).value()));
    }

    // verify the RecordMetadata that corresponds to record send to certain partitions are put
    // together into a list and the order of the RecordMetadata is same as the original message order
    List<RecordMetadata> expectedRecordMetadata = metadataPartitionMap.get(partitionId);
    KafkaWritingTaskResult kafkaWritingTaskResult  = writer.getClusterThreadPool().submit(new
        KafkaWritingTask(producer, writerOutput, 0, sortedPartitions)).get();
    assertEquals(expectedRecordMetadata.size(), kafkaWritingTaskResult.getRecordMetadataList().size());
    for(int j = 0; j < expectedRecordMetadata.size(); j++){
      assertEquals(expectedRecordMetadata.get(j), kafkaWritingTaskResult.getRecordMetadataList().get(j));
    }
  }

  // validate if writes are throwing any error
  writer.writeLogMessages(logMessages);
  writer.close();
}
 
Example 16
Source File: ClientSpanNameProvider.java    From java-kafka-client with Apache License 2.0 4 votes vote down vote up
private static String replaceIfNull(ProducerRecord input, String replacement) {
  return ((input == null) ? replacement : input.topic());
}
 
Example 17
Source File: MockKafkaProducer.java    From samza with Apache License 2.0 4 votes vote down vote up
private RecordMetadata getRecordMetadata(ProducerRecord record) {
  return new RecordMetadata(new TopicPartition(record.topic(), record.partition() == null ? 0 : record.partition()), 0, this.msgsSent.get(), -1L, -1L, -1, -1);
}
 
Example 18
Source File: MockKafkaProducer.java    From samza with Apache License 2.0 4 votes vote down vote up
public FutureSuccess(ProducerRecord record, int offset) {
  this.record = record;
  this.metadata = new RecordMetadata(new TopicPartition(record.topic(), record.partition() == null ? 0 : record.partition()), 0, offset, RecordBatch.NO_TIMESTAMP, -1L, -1, -1);
}