org.apache.kafka.clients.producer.Callback Java Examples

The following examples show how to use org.apache.kafka.clients.producer.Callback. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestKafkaRecordSink_2_0.java    From nifi with Apache License 2.0 7 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
protected Producer<byte[], byte[]> createProducer(Map<String, Object> kafkaProperties) {
    final Producer<byte[], byte[]> mockProducer = (Producer<byte[], byte[]>) mock(Producer.class);
    when(mockProducer.send(Mockito.argThat(new ByteProducerRecordMatcher()), any(Callback.class))).then(
            (Answer<Future<RecordMetadata>>) invocationOnMock -> {
                ProducerRecord<byte[], byte[]> producerRecord = invocationOnMock.getArgument(0);
                final byte[] data = producerRecord.value();
                dataSent.add(data);
                Callback callback = invocationOnMock.getArgument(1);
                RecordMetadata recordMetadata = new RecordMetadata(
                        new TopicPartition(producerRecord.topic(), producerRecord.partition() != null ? producerRecord.partition() : 0),
                        0,
                        data.length,
                        producerRecord.timestamp() != null ? producerRecord.timestamp() : System.currentTimeMillis(),
                        0L,
                        producerRecord.key() != null ? producerRecord.key().length : 0,
                        data.length);
                callback.onCompletion(recordMetadata, null);
                return new FutureTask(() -> {}, recordMetadata);
            });
    return mockProducer;
}
 
Example #2
Source File: CruiseControlMetricsReporterTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Setup the unit test.
 */
@Before
public void setUp() {
  super.setUp();
  Properties props = new Properties();
  props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
  AtomicInteger failed = new AtomicInteger(0);
  try (Producer<String, String> producer = createProducer(props)) {
    for (int i = 0; i < 10; i++) {
      producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() {
        @Override
        public void onCompletion(RecordMetadata recordMetadata, Exception e) {
          if (e != null) {
            failed.incrementAndGet();
          }
        }
      });
    }
  }
  assertEquals(0, failed.get());
}
 
Example #3
Source File: OnrampImplTest.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Test
public void sendFails()
  throws InvalidEventException, InterruptedException, ExecutionException, JsonProcessingException, IOException {
  when(kafkaProducer.send(any(ProducerRecord.class), any(Callback.class))).thenReturn(future);
  doThrow(new ExecutionException(new BufferExhaustedException("exhausted"))).when(future).get();

  Future<Boolean> result = underTest.sendEvent(mapper.readTree("{\"f\": \"f16\"}"));

  try {
    result.get();
  } catch (ExecutionException e) {
    assertThat(e.getCause(), instanceOf(BufferExhaustedException.class));
    return;
  }
  fail("Expected ExecutionException");
}
 
Example #4
Source File: FlinkKafkaProducerBaseTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * This test is meant to assure that testAtLeastOnceProducer is valid by testing that if flushing is disabled,
 * the snapshot method does indeed finishes without waiting for pending records;
 * we set a timeout because the test will not finish if the logic is broken.
 */
@SuppressWarnings("unchecked")
@Test(timeout = 5000)
public void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable {
	final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
		FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
	producer.setFlushOnCheckpoint(false);

	final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer();

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));

	testHarness.open();

	testHarness.processElement(new StreamRecord<>("msg"));

	// make sure that all callbacks have not been completed
	verify(mockProducer, times(1)).send(any(ProducerRecord.class), any(Callback.class));

	// should return even if there are pending records
	testHarness.snapshot(123L, 123L);

	testHarness.close();
}
 
Example #5
Source File: FlinkKafkaProducerBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * This test is meant to assure that testAtLeastOnceProducer is valid by testing that if flushing is disabled,
 * the snapshot method does indeed finishes without waiting for pending records;
 * we set a timeout because the test will not finish if the logic is broken.
 */
@SuppressWarnings("unchecked")
@Test(timeout = 5000)
public void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable {
	final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
		FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
	producer.setFlushOnCheckpoint(false);

	final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer();

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));

	testHarness.open();

	testHarness.processElement(new StreamRecord<>("msg"));

	// make sure that all callbacks have not been completed
	verify(mockProducer, times(1)).send(any(ProducerRecord.class), any(Callback.class));

	// should return even if there are pending records
	testHarness.snapshot(123L, 123L);

	testHarness.close();
}
 
Example #6
Source File: DLFutureRecordMetadata.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
DLFutureRecordMetadata(final String topic,
                       com.twitter.util.Future<DLSN> dlsnFuture,
                       final Callback callback) {
    this.topic = topic;
    this.dlsnFuture = dlsnFuture;
    this.callback = callback;

    this.dlsnFuture.addEventListener(new FutureEventListener<DLSN>() {
        @Override
        public void onFailure(Throwable cause) {
            callback.onCompletion(null, new IOException(cause));
        }

        @Override
        public void onSuccess(DLSN value) {
            callback.onCompletion(new RecordMetadata(new TopicPartition(topic, 0), -1L, -1L), null);
        }
    });
}
 
Example #7
Source File: KafkaIT.java    From uavstack with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Object asyncSend(String appid, String methodName, Object[] args) {
    ProducerRecord<String, String> record = (ProducerRecord<String, String>) args[1];
    String kafkaUrl = getSendHost(args) + "/" + record.topic();
    Map<String, Object> params = new HashMap<String, Object>();
    params.put(CaptureConstants.INFO_CLIENT_REQUEST_URL, kafkaUrl);
    params.put(CaptureConstants.INFO_CLIENT_REQUEST_ACTION, "Producer." + methodName);
    params.put(CaptureConstants.INFO_CLIENT_APPID, appid);
    params.put(CaptureConstants.INFO_CLIENT_TYPE, "kafka.client");
    if (logger.isDebugable()) {
        logger.debug("KAFKA DOSEND START: " + kafkaUrl, null);
    }

    ccMap = UAVServer.instance().runMonitorAsyncCaptureOnServerCapPoint(CaptureConstants.CAPPOINT_APP_CLIENT,
            Monitor.CapturePhase.PRECAP, params, null);
    ivcContextParams = (Map<String, Object>) UAVServer.instance().runSupporter(
            "com.creditease.uav.apm.supporters.InvokeChainSupporter", "runCap",
            InvokeChainConstants.CHAIN_APP_CLIENT, InvokeChainConstants.CapturePhase.PRECAP, params,
            KafkaProducerAdapter.class, args);
    return JDKProxyInvokeUtil.newProxyInstance(Callback.class.getClassLoader(), new Class<?>[] { Callback.class },
            new JDKProxyInvokeHandler<Callback>((Callback) args[2], new KafkaCallbackProxyInvokeProcessor()));
}
 
Example #8
Source File: KafkaAvroSerDesWithKafkaServerTest.java    From registry with Apache License 2.0 6 votes vote down vote up
private String produceMessage(String topicName, Object msg, Boolean storeSchemaInHeader) {
    String bootstrapServers = CLUSTER.bootstrapServers();
    Map<String, Object> config = new HashMap<>();
    config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    config.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true));
    config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName());
    config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName());
    config.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, storeSchemaInHeader.toString());

    final Producer<String, Object> producer = new KafkaProducer<>(config);
    final Callback callback = new ProducerCallback();
    LOG.info("Sending message: [{}] to topic: [{}]", msg, topicName);
    ProducerRecord<String, Object> producerRecord = new ProducerRecord<>(topicName, getKey(msg), msg);
    producer.send(producerRecord, callback);
    producer.flush();
    LOG.info("Message successfully sent to topic: [{}]", topicName);
    producer.close(5, TimeUnit.SECONDS);

    return bootstrapServers;
}
 
Example #9
Source File: PublisherLease.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
private void publish(final FlowFile flowFile, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) {
    final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent);
    producer.send(record, new Callback() {
        @Override
        public void onCompletion(final RecordMetadata metadata, final Exception exception) {
            if (exception == null) {
                tracker.incrementAcknowledgedCount(flowFile);
            } else {
                tracker.fail(flowFile, exception);
                poison();
            }
        }
    });

    tracker.incrementSentCount(flowFile);
}
 
Example #10
Source File: TestKafkaRecordSink_1_0.java    From nifi with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
protected Producer<byte[], byte[]> createProducer(Map<String, Object> kafkaProperties) {
    final Producer<byte[], byte[]> mockProducer = (Producer<byte[], byte[]>) mock(Producer.class);
    when(mockProducer.send(Mockito.argThat(new ByteProducerRecordMatcher()), any(Callback.class))).then(
            (Answer<Future<RecordMetadata>>) invocationOnMock -> {
                ProducerRecord<byte[], byte[]> producerRecord = invocationOnMock.getArgument(0);
                final byte[] data = producerRecord.value();
                dataSent.add(data);
                Callback callback = invocationOnMock.getArgument(1);
                RecordMetadata recordMetadata = new RecordMetadata(
                        new TopicPartition(producerRecord.topic(), producerRecord.partition() != null ? producerRecord.partition() : 0),
                        0,
                        data.length,
                        producerRecord.timestamp() != null ? producerRecord.timestamp() : System.currentTimeMillis(),
                        new Long(0L),
                        producerRecord.key() != null ? producerRecord.key().length : 0,
                        data.length);
                callback.onCompletion(recordMetadata, null);
                return new FutureTask(() -> {}, recordMetadata);
            });
    return mockProducer;
}
 
Example #11
Source File: ProjectTopologyService.java    From DBus with Apache License 2.0 6 votes vote down vote up
public void rerunTopology(String topologyCode, String ctrlMsg) {
    KafkaProducer<String, byte[]> producer = null;
    try {
        String topic = StringUtils.joinWith("_", topologyCode, "ctrl");
        Properties props = zkService.getProperties(KeeperConstants.KEEPER_CTLMSG_PRODUCER_CONF);
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        props.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        producer = new KafkaProducer<>(props);
        producer.send(new ProducerRecord<String, byte[]>(topic, ctrlMsg.getBytes()), new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
            }
        });
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (producer != null) producer.close();
    }
}
 
Example #12
Source File: KafkaSinglePortExactlyOnceOutputOperator.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
protected void sendTuple(T tuple)
{
  if (alreadyInKafka(tuple)) {
    return;
  }

  getProducer().send(new ProducerRecord<>(getTopic(), key, tuple), new Callback()
  {
    public void onCompletion(RecordMetadata metadata, Exception e)
    {
      if (e != null) {
        logger.info("Wrting to Kafka failed with an exception {}" + e.getMessage());
        throw new RuntimeException(e);
      }
    }
  });
}
 
Example #13
Source File: TracingKafkaProducer.java    From java-kafka-client with Apache License 2.0 6 votes vote down vote up
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback,
    SpanContext parent) {
  /*
  // Create wrappedRecord because headers can be read only in record (if record is sent second time)
  ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(),
      record.partition(),
      record.timestamp(),
      record.key(),
      record.value(),
      record.headers());
  */

  Span span = TracingKafkaUtils
      .buildAndInjectSpan(record, tracer, producerSpanNameProvider, parent, spanDecorators);
  try (Scope ignored = tracer.activateSpan(span)) {
    Callback wrappedCallback = new TracingCallback(callback, span, tracer, spanDecorators);
    return producer.send(record, wrappedCallback);
  }
}
 
Example #14
Source File: TestPublisherLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testPoisonOnFailure() throws IOException {
    final PoisonCountingLease lease = new PoisonCountingLease();
    final FlowFile flowFile = new MockFlowFile(1L);
    final String topic = "unit-test";
    final byte[] messageKey = null;
    final byte[] demarcatorBytes = null;

    doAnswer(new Answer<Object>() {
        @Override
        public Object answer(final InvocationOnMock invocation) {
            final Callback callback = invocation.getArgument(1);
            callback.onCompletion(null, new RuntimeException("Unit Test Intentional Exception"));
            return null;
        }
    }).when(producer).send(any(ProducerRecord.class), any(Callback.class));

    lease.publish(flowFile, new ByteArrayInputStream(new byte[1]), messageKey, demarcatorBytes, topic, null);

    assertEquals(1, lease.getPoisonCount());

    final PublishResult result = lease.complete();
    assertTrue(result.isFailure());
}
 
Example #15
Source File: Kafka08DataWriter.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
public Future<WriteResponse> write(Pair<K, V> keyValuePair, final WriteCallback callback) {
  try {
    return new WriteResponseFuture<>(this.producer
        .send(new ProducerRecord<>(topic, keyValuePair.getKey(), keyValuePair.getValue()), new Callback() {
          @Override
          public void onCompletion(final RecordMetadata metadata, Exception exception) {
            if (exception != null) {
              callback.onFailure(exception);
            } else {
              callback.onSuccess(WRITE_RESPONSE_WRAPPER.wrap(metadata));
            }
          }
        }), WRITE_RESPONSE_WRAPPER);
  } catch (Exception e) {
    throw new RuntimeException("Failed to create a Kafka write request", e);
  }
}
 
Example #16
Source File: AsynchronousDeliveryStrategy.java    From logback-kafka-appender with Apache License 2.0 6 votes vote down vote up
@Override
public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event,
                              final FailedDeliveryCallback<E> failedDeliveryCallback) {
    try {
        producer.send(record, new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception != null) {
                    failedDeliveryCallback.onFailedDelivery(event, exception);
                }
            }
        });
        return true;
    } catch (BufferExhaustedException | TimeoutException e) {
        failedDeliveryCallback.onFailedDelivery(event, e);
        return false;
    }
}
 
Example #17
Source File: PregelComputation.java    From kafka-graphs with Apache License 2.0 6 votes vote down vote up
@Override
public void process(final K readOnlyKey, final Tuple2<Integer, Map<K, List<Message>>> value) {
    try {
        int superstep = value._1 - 1;
        for (Map.Entry<K, List<Message>> entry : value._2.entrySet()) {
            // List of messages may be empty in case of sending to self
            Tuple3<Integer, K, List<Message>> tuple = new Tuple3<>(superstep + 1, readOnlyKey, entry.getValue());
            ProducerRecord<K, Tuple3<Integer, K, List<Message>>> producerRecord =
                new ProducerRecord<>(workSetTopic, entry.getKey(), tuple);
            Callback cb = callback(superstep, readOnlyKey, entry.getKey(), entry.getValue());
            producer.send(producerRecord, cb);
        }
        producer.flush();
        // Deactivate this vertex
        deactivateVertex(superstep, readOnlyKey);
    } catch (Exception e) {
        throw toRuntimeException(e);
    }
}
 
Example #18
Source File: PublisherLease.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
private void publish(final FlowFile flowFile, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) {
    final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent);
    producer.send(record, new Callback() {
        @Override
        public void onCompletion(final RecordMetadata metadata, final Exception exception) {
            if (exception == null) {
                tracker.incrementAcknowledgedCount(flowFile);
            } else {
                tracker.fail(flowFile, exception);
                poison();
            }
        }
    });

    tracker.incrementSentCount(flowFile);
}
 
Example #19
Source File: TracingCallbackTest.java    From brave with Apache License 2.0 5 votes vote down vote up
@Test public void on_completion_should_tag_if_exception() {
  Span span = tracing.tracer().nextSpan().start();

  Callback tracingCallback = TracingCallback.create(null, span, currentTraceContext);
  tracingCallback.onCompletion(null, error);

  assertThat(spans.get(0).finishTimestamp()).isNotZero();
  assertThat(spans.get(0).error()).isEqualTo(error);
}
 
Example #20
Source File: KafkaPublisherTest.java    From ja-micro with Apache License 2.0 5 votes vote down vote up
@Test
public void sendFailsReturnsFalse() {
    KafkaProducer producer = mock(KafkaProducer.class);
    publisher.realProducer = producer;
    RecordMetadata metadata = new RecordMetadata(null, 0, 0,
            0, Long.valueOf(0), 0, 0);
    ArgumentCaptor<Callback> captor = ArgumentCaptor.forClass(Callback.class);
    when(producer.send(any(), captor.capture())).then(
        invocation -> {
            captor.getValue().onCompletion(metadata, new TimeoutException("error"));
            return new CompletableFuture();
        });
    String[] events = { "test" };
    assertThat(publisher.publishEvents(false, null, events)).isFalse();
}
 
Example #21
Source File: TestPublisherLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testPoisonOnFailure() throws IOException {
    final AtomicInteger poisonCount = new AtomicInteger(0);

    final PublisherLease lease = new PublisherLease(producer, 1024 * 1024, 1000L, logger) {
        @Override
        public void poison() {
            poisonCount.incrementAndGet();
            super.poison();
        }
    };

    final FlowFile flowFile = new MockFlowFile(1L);
    final String topic = "unit-test";
    final byte[] messageKey = null;
    final byte[] demarcatorBytes = null;

    doAnswer(new Answer<Object>() {
        @Override
        public Object answer(final InvocationOnMock invocation) throws Throwable {
            final Callback callback = invocation.getArgument(1);
            callback.onCompletion(null, new RuntimeException("Unit Test Intentional Exception"));
            return null;
        }
    }).when(producer).send(any(ProducerRecord.class), any(Callback.class));

    lease.publish(flowFile, new ByteArrayInputStream(new byte[1]), messageKey, demarcatorBytes, topic);

    assertEquals(1, poisonCount.get());

    final PublishResult result = lease.complete();
    assertTrue(result.getFailedFlowFiles().contains(flowFile));
    assertFalse(result.getSuccessfulFlowFiles().contains(flowFile));
}
 
Example #22
Source File: TestPublisherLease.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testPoisonOnFailure() throws IOException {
    final AtomicInteger poisonCount = new AtomicInteger(0);

    final PublisherLease lease = new PublisherLease(producer, 1024 * 1024, 1000L, logger) {
        @Override
        public void poison() {
            poisonCount.incrementAndGet();
            super.poison();
        }
    };

    final FlowFile flowFile = new MockFlowFile(1L);
    final String topic = "unit-test";
    final byte[] messageKey = null;
    final byte[] demarcatorBytes = null;

    doAnswer(new Answer<Object>() {
        @Override
        public Object answer(final InvocationOnMock invocation) throws Throwable {
            final Callback callback = invocation.getArgumentAt(1, Callback.class);
            callback.onCompletion(null, new RuntimeException("Unit Test Intentional Exception"));
            return null;
        }
    }).when(producer).send(any(ProducerRecord.class), any(Callback.class));

    lease.publish(flowFile, new ByteArrayInputStream(new byte[1]), messageKey, demarcatorBytes, topic);

    assertEquals(1, poisonCount.get());

    final PublishResult result = lease.complete();
    assertTrue(result.getFailedFlowFiles().contains(flowFile));
    assertFalse(result.getSuccessfulFlowFiles().contains(flowFile));
}
 
Example #23
Source File: DBusRouterKafkaWriteBolt.java    From DBus with Apache License 2.0 5 votes vote down vote up
private void sendKafka(String key, String data, String url, String topic, String ns, Tuple input, long offset) {
    if (StringUtils.isBlank(topic)) {
        logger.warn("namespace: {}, not obtain topic. ums: {}", ns, data);
        logger.info("kafka write bolt fail {}, topic {}", offset, topic);
        collector.fail(input);
        return;
    }
    // KafkaProducer<String, byte[]> producer = producerMap.get(url);
    Pair<String, KafkaProducer> pair = kafkaProducerManager.getKafkaClient(url);
    KafkaProducer<String, byte[]> producer = pair.getValue();
    if (producer != null) {
        producer.send(new ProducerRecord<String, byte[]>(topic, key, data.getBytes()), new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception == null) {
                    logger.info("kafka write bolt ack {}, topic {}, offset {}", offset, metadata.topic(), metadata.offset());
                    collector.ack(input);
                } else {
                    logger.error("kafka write bolt fail {}, topic {}", offset, metadata.topic());
                    logger.error("kafka write bolt fail {}", exception.getMessage());
                    collector.fail(input);
                }
            }
        });
    } else {
        collector.fail(input);
        logger.warn("namespace: {}, not obtain producer. sink:{} ums: {}", ns, url, data);
    }
}
 
Example #24
Source File: KafkaBoltTest.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
@Test
public void executeATMOSTONCE() {
  kafkaBolt.prepare(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE),
      null, outputCollector);
  when(tupleTransformer.transformToKey(tuple)).thenReturn("key");
  byte[] value = new byte[]{1, 2, 3};
  when(tupleTransformer.transformToValue(tuple)).thenReturn(value);
  when(tupleTransformer.getTopicName(tuple)).thenReturn("topic");

  ProducerRecord<String, byte[]> producerRecord = new ProducerRecord<>("topic", "key", value);
  kafkaBolt.execute(tuple);
  verify(producer).send(eq(producerRecord), any(Callback.class));
}
 
Example #25
Source File: AsynchronousDeliveryStrategyTest.java    From logback-kafka-appender with Apache License 2.0 5 votes vote down vote up
@Test
public void testCallbackWillTriggerOnFailedDeliveryOnProducerSendTimeout() {
    final TimeoutException exception = new TimeoutException("miau");
    final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg");

    when(producer.send(same(record), any(Callback.class))).thenThrow(exception);

    unit.send(producer, record, "msg", failedDeliveryCallback);

    verify(failedDeliveryCallback).onFailedDelivery(eq("msg"), same(exception));
}
 
Example #26
Source File: LiKafkaProducerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
public ErrorLoggingCallback(UUID messageId,
                            Object auditToken,
                            String topic,
                            Long timestamp,
                            Integer serializedSize,
                            Auditor<K, V> auditor,
                            Callback userCallback) {
  _messageId = messageId;
  _topic = topic;
  _timestamp = timestamp;
  _serializedSize = serializedSize;
  _auditor = auditor;
  _auditToken = auditToken;
  _userCallback = userCallback;
}
 
Example #27
Source File: KafkaConfiguration.java    From hbase-connect-kafka with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the default callback handler when running in async mode.
 * @return
 */
public Callback getCallbackHandler() {
	  try {
		    final String handlerClassName = this.configuration.get(KAFKA_PRODUCER_CALLBACK_HANDLER_PARAM,
			        KAFKA_DEFAULT_CALLBACK_HANDLER);
		    final Class<? extends Callback> handlerClass = (Class<? extends Callback>) Class.forName(handlerClassName);
		    return handlerClass.newInstance();
	  } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
		    throw new ReplicationException(e);
	  }
}
 
Example #28
Source File: CallbackConstructorInterceptor.java    From skywalking with Apache License 2.0 5 votes vote down vote up
@Override
public void onConstruct(EnhancedInstance objInst, Object[] allArguments) {
    Callback callback = (Callback) allArguments[0];
    CallbackCache cache;
    if (null != objInst.getSkyWalkingDynamicField()) {
        cache = (CallbackCache) objInst.getSkyWalkingDynamicField();
    } else {
        cache = new CallbackCache();
    }
    cache.setCallback(callback);
    objInst.setSkyWalkingDynamicField(cache);
}
 
Example #29
Source File: TracingCallback.java    From java-kafka-client with Apache License 2.0 5 votes vote down vote up
TracingCallback(Callback callback, Span span, Tracer tracer,
    Collection<SpanDecorator> spanDecorators) {
  this.callback = callback;
  this.span = span;
  this.tracer = tracer;
  this.spanDecorators = spanDecorators;
}
 
Example #30
Source File: KafkaAccessLogWriterTest.java    From armeria with Apache License 2.0 5 votes vote down vote up
@Test
void withKeyExtractor() {
    final KafkaAccessLogWriter<String, String> service =
            new KafkaAccessLogWriter<>(producer, TOPIC_NAME,
                                       log -> log.context().decodedPath(),
                                       log -> log.requestHeaders().authority());

    service.log(log);

    verify(producer, times(1)).send(captor.capture(), any(Callback.class));

    final ProducerRecord<String, String> record = captor.getValue();
    assertThat(record.key()).isEqualTo("/kyuto");
    assertThat(record.value()).isEqualTo("kawamuray");
}