org.apache.kafka.clients.producer.Callback Java Examples
The following examples show how to use
org.apache.kafka.clients.producer.Callback.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: nifi Author: apache File: TestKafkaRecordSink_2_0.java License: Apache License 2.0 | 7 votes |
@SuppressWarnings("unchecked") @Override protected Producer<byte[], byte[]> createProducer(Map<String, Object> kafkaProperties) { final Producer<byte[], byte[]> mockProducer = (Producer<byte[], byte[]>) mock(Producer.class); when(mockProducer.send(Mockito.argThat(new ByteProducerRecordMatcher()), any(Callback.class))).then( (Answer<Future<RecordMetadata>>) invocationOnMock -> { ProducerRecord<byte[], byte[]> producerRecord = invocationOnMock.getArgument(0); final byte[] data = producerRecord.value(); dataSent.add(data); Callback callback = invocationOnMock.getArgument(1); RecordMetadata recordMetadata = new RecordMetadata( new TopicPartition(producerRecord.topic(), producerRecord.partition() != null ? producerRecord.partition() : 0), 0, data.length, producerRecord.timestamp() != null ? producerRecord.timestamp() : System.currentTimeMillis(), 0L, producerRecord.key() != null ? producerRecord.key().length : 0, data.length); callback.onCompletion(recordMetadata, null); return new FutureTask(() -> {}, recordMetadata); }); return mockProducer; }
Example #2
Source Project: data-highway Author: HotelsDotCom File: OnrampImplTest.java License: Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Test public void sendFails() throws InvalidEventException, InterruptedException, ExecutionException, JsonProcessingException, IOException { when(kafkaProducer.send(any(ProducerRecord.class), any(Callback.class))).thenReturn(future); doThrow(new ExecutionException(new BufferExhaustedException("exhausted"))).when(future).get(); Future<Boolean> result = underTest.sendEvent(mapper.readTree("{\"f\": \"f16\"}")); try { result.get(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(BufferExhaustedException.class)); return; } fail("Expected ExecutionException"); }
Example #3
Source Project: attic-apex-malhar Author: apache File: KafkaSinglePortExactlyOnceOutputOperator.java License: Apache License 2.0 | 6 votes |
protected void sendTuple(T tuple) { if (alreadyInKafka(tuple)) { return; } getProducer().send(new ProducerRecord<>(getTopic(), key, tuple), new Callback() { public void onCompletion(RecordMetadata metadata, Exception e) { if (e != null) { logger.info("Wrting to Kafka failed with an exception {}" + e.getMessage()); throw new RuntimeException(e); } } }); }
Example #4
Source Project: Flink-CEPplus Author: ljygz File: FlinkKafkaProducerBaseTest.java License: Apache License 2.0 | 6 votes |
/** * This test is meant to assure that testAtLeastOnceProducer is valid by testing that if flushing is disabled, * the snapshot method does indeed finishes without waiting for pending records; * we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") @Test(timeout = 5000) public void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); producer.setFlushOnCheckpoint(false); final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer(); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg")); // make sure that all callbacks have not been completed verify(mockProducer, times(1)).send(any(ProducerRecord.class), any(Callback.class)); // should return even if there are pending records testHarness.snapshot(123L, 123L); testHarness.close(); }
Example #5
Source Project: flink Author: flink-tpc-ds File: FlinkKafkaProducerBaseTest.java License: Apache License 2.0 | 6 votes |
/** * This test is meant to assure that testAtLeastOnceProducer is valid by testing that if flushing is disabled, * the snapshot method does indeed finishes without waiting for pending records; * we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") @Test(timeout = 5000) public void testDoesNotWaitForPendingRecordsIfFlushingDisabled() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); producer.setFlushOnCheckpoint(false); final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer(); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg")); // make sure that all callbacks have not been completed verify(mockProducer, times(1)).send(any(ProducerRecord.class), any(Callback.class)); // should return even if there are pending records testHarness.snapshot(123L, 123L); testHarness.close(); }
Example #6
Source Project: distributedlog Author: apache File: DLFutureRecordMetadata.java License: Apache License 2.0 | 6 votes |
DLFutureRecordMetadata(final String topic, com.twitter.util.Future<DLSN> dlsnFuture, final Callback callback) { this.topic = topic; this.dlsnFuture = dlsnFuture; this.callback = callback; this.dlsnFuture.addEventListener(new FutureEventListener<DLSN>() { @Override public void onFailure(Throwable cause) { callback.onCompletion(null, new IOException(cause)); } @Override public void onSuccess(DLSN value) { callback.onCompletion(new RecordMetadata(new TopicPartition(topic, 0), -1L, -1L), null); } }); }
Example #7
Source Project: registry Author: hortonworks File: KafkaAvroSerDesWithKafkaServerTest.java License: Apache License 2.0 | 6 votes |
private String produceMessage(String topicName, Object msg, Boolean storeSchemaInHeader) { String bootstrapServers = CLUSTER.bootstrapServers(); Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); config.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true)); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName()); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName()); config.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, storeSchemaInHeader.toString()); final Producer<String, Object> producer = new KafkaProducer<>(config); final Callback callback = new ProducerCallback(); LOG.info("Sending message: [{}] to topic: [{}]", msg, topicName); ProducerRecord<String, Object> producerRecord = new ProducerRecord<>(topicName, getKey(msg), msg); producer.send(producerRecord, callback); producer.flush(); LOG.info("Message successfully sent to topic: [{}]", topicName); producer.close(5, TimeUnit.SECONDS); return bootstrapServers; }
Example #8
Source Project: localization_nifi Author: wangrenlei File: PublisherLease.java License: Apache License 2.0 | 6 votes |
private void publish(final FlowFile flowFile, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) { final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent); producer.send(record, new Callback() { @Override public void onCompletion(final RecordMetadata metadata, final Exception exception) { if (exception == null) { tracker.incrementAcknowledgedCount(flowFile); } else { tracker.fail(flowFile, exception); poison(); } } }); tracker.incrementSentCount(flowFile); }
Example #9
Source Project: localization_nifi Author: wangrenlei File: PublisherLease.java License: Apache License 2.0 | 6 votes |
private void publish(final FlowFile flowFile, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) { final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent); producer.send(record, new Callback() { @Override public void onCompletion(final RecordMetadata metadata, final Exception exception) { if (exception == null) { tracker.incrementAcknowledgedCount(flowFile); } else { tracker.fail(flowFile, exception); poison(); } } }); tracker.incrementSentCount(flowFile); }
Example #10
Source Project: kafka-graphs Author: rayokota File: PregelComputation.java License: Apache License 2.0 | 6 votes |
@Override public void process(final K readOnlyKey, final Tuple2<Integer, Map<K, List<Message>>> value) { try { int superstep = value._1 - 1; for (Map.Entry<K, List<Message>> entry : value._2.entrySet()) { // List of messages may be empty in case of sending to self Tuple3<Integer, K, List<Message>> tuple = new Tuple3<>(superstep + 1, readOnlyKey, entry.getValue()); ProducerRecord<K, Tuple3<Integer, K, List<Message>>> producerRecord = new ProducerRecord<>(workSetTopic, entry.getKey(), tuple); Callback cb = callback(superstep, readOnlyKey, entry.getKey(), entry.getValue()); producer.send(producerRecord, cb); } producer.flush(); // Deactivate this vertex deactivateVertex(superstep, readOnlyKey); } catch (Exception e) { throw toRuntimeException(e); } }
Example #11
Source Project: logback-kafka-appender Author: danielwegener File: AsynchronousDeliveryStrategy.java License: Apache License 2.0 | 6 votes |
@Override public <K, V, E> boolean send(Producer<K, V> producer, ProducerRecord<K, V> record, final E event, final FailedDeliveryCallback<E> failedDeliveryCallback) { try { producer.send(record, new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception != null) { failedDeliveryCallback.onFailedDelivery(event, exception); } } }); return true; } catch (BufferExhaustedException | TimeoutException e) { failedDeliveryCallback.onFailedDelivery(event, e); return false; } }
Example #12
Source Project: incubator-gobblin Author: apache File: Kafka08DataWriter.java License: Apache License 2.0 | 6 votes |
public Future<WriteResponse> write(Pair<K, V> keyValuePair, final WriteCallback callback) { try { return new WriteResponseFuture<>(this.producer .send(new ProducerRecord<>(topic, keyValuePair.getKey(), keyValuePair.getValue()), new Callback() { @Override public void onCompletion(final RecordMetadata metadata, Exception exception) { if (exception != null) { callback.onFailure(exception); } else { callback.onSuccess(WRITE_RESPONSE_WRAPPER.wrap(metadata)); } } }), WRITE_RESPONSE_WRAPPER); } catch (Exception e) { throw new RuntimeException("Failed to create a Kafka write request", e); } }
Example #13
Source Project: java-kafka-client Author: opentracing-contrib File: TracingKafkaProducer.java License: Apache License 2.0 | 6 votes |
public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback, SpanContext parent) { /* // Create wrappedRecord because headers can be read only in record (if record is sent second time) ProducerRecord<K, V> wrappedRecord = new ProducerRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), record.value(), record.headers()); */ Span span = TracingKafkaUtils .buildAndInjectSpan(record, tracer, producerSpanNameProvider, parent, spanDecorators); try (Scope ignored = tracer.activateSpan(span)) { Callback wrappedCallback = new TracingCallback(callback, span, tracer, spanDecorators); return producer.send(record, wrappedCallback); } }
Example #14
Source Project: cruise-control Author: linkedin File: CruiseControlMetricsReporterTest.java License: BSD 2-Clause "Simplified" License | 6 votes |
/** * Setup the unit test. */ @Before public void setUp() { super.setUp(); Properties props = new Properties(); props.setProperty(ProducerConfig.ACKS_CONFIG, "-1"); AtomicInteger failed = new AtomicInteger(0); try (Producer<String, String> producer = createProducer(props)) { for (int i = 0; i < 10; i++) { producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() { @Override public void onCompletion(RecordMetadata recordMetadata, Exception e) { if (e != null) { failed.incrementAndGet(); } } }); } } assertEquals(0, failed.get()); }
Example #15
Source Project: nifi Author: apache File: TestPublisherLease.java License: Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("unchecked") public void testPoisonOnFailure() throws IOException { final PoisonCountingLease lease = new PoisonCountingLease(); final FlowFile flowFile = new MockFlowFile(1L); final String topic = "unit-test"; final byte[] messageKey = null; final byte[] demarcatorBytes = null; doAnswer(new Answer<Object>() { @Override public Object answer(final InvocationOnMock invocation) { final Callback callback = invocation.getArgument(1); callback.onCompletion(null, new RuntimeException("Unit Test Intentional Exception")); return null; } }).when(producer).send(any(ProducerRecord.class), any(Callback.class)); lease.publish(flowFile, new ByteArrayInputStream(new byte[1]), messageKey, demarcatorBytes, topic, null); assertEquals(1, lease.getPoisonCount()); final PublishResult result = lease.complete(); assertTrue(result.isFailure()); }
Example #16
Source Project: DBus Author: BriData File: ProjectTopologyService.java License: Apache License 2.0 | 6 votes |
public void rerunTopology(String topologyCode, String ctrlMsg) { KafkaProducer<String, byte[]> producer = null; try { String topic = StringUtils.joinWith("_", topologyCode, "ctrl"); Properties props = zkService.getProperties(KeeperConstants.KEEPER_CTLMSG_PRODUCER_CONF); Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF); props.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS)); if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) { props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); } producer = new KafkaProducer<>(props); producer.send(new ProducerRecord<String, byte[]>(topic, ctrlMsg.getBytes()), new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { } }); } catch (Exception e) { throw new RuntimeException(e); } finally { if (producer != null) producer.close(); } }
Example #17
Source Project: nifi Author: apache File: TestKafkaRecordSink_1_0.java License: Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override protected Producer<byte[], byte[]> createProducer(Map<String, Object> kafkaProperties) { final Producer<byte[], byte[]> mockProducer = (Producer<byte[], byte[]>) mock(Producer.class); when(mockProducer.send(Mockito.argThat(new ByteProducerRecordMatcher()), any(Callback.class))).then( (Answer<Future<RecordMetadata>>) invocationOnMock -> { ProducerRecord<byte[], byte[]> producerRecord = invocationOnMock.getArgument(0); final byte[] data = producerRecord.value(); dataSent.add(data); Callback callback = invocationOnMock.getArgument(1); RecordMetadata recordMetadata = new RecordMetadata( new TopicPartition(producerRecord.topic(), producerRecord.partition() != null ? producerRecord.partition() : 0), 0, data.length, producerRecord.timestamp() != null ? producerRecord.timestamp() : System.currentTimeMillis(), new Long(0L), producerRecord.key() != null ? producerRecord.key().length : 0, data.length); callback.onCompletion(recordMetadata, null); return new FutureTask(() -> {}, recordMetadata); }); return mockProducer; }
Example #18
Source Project: uavstack Author: uavorg File: KafkaIT.java License: Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public Object asyncSend(String appid, String methodName, Object[] args) { ProducerRecord<String, String> record = (ProducerRecord<String, String>) args[1]; String kafkaUrl = getSendHost(args) + "/" + record.topic(); Map<String, Object> params = new HashMap<String, Object>(); params.put(CaptureConstants.INFO_CLIENT_REQUEST_URL, kafkaUrl); params.put(CaptureConstants.INFO_CLIENT_REQUEST_ACTION, "Producer." + methodName); params.put(CaptureConstants.INFO_CLIENT_APPID, appid); params.put(CaptureConstants.INFO_CLIENT_TYPE, "kafka.client"); if (logger.isDebugable()) { logger.debug("KAFKA DOSEND START: " + kafkaUrl, null); } ccMap = UAVServer.instance().runMonitorAsyncCaptureOnServerCapPoint(CaptureConstants.CAPPOINT_APP_CLIENT, Monitor.CapturePhase.PRECAP, params, null); ivcContextParams = (Map<String, Object>) UAVServer.instance().runSupporter( "com.creditease.uav.apm.supporters.InvokeChainSupporter", "runCap", InvokeChainConstants.CHAIN_APP_CLIENT, InvokeChainConstants.CapturePhase.PRECAP, params, KafkaProducerAdapter.class, args); return JDKProxyInvokeUtil.newProxyInstance(Callback.class.getClassLoader(), new Class<?>[] { Callback.class }, new JDKProxyInvokeHandler<Callback>((Callback) args[2], new KafkaCallbackProxyInvokeProcessor())); }
Example #19
Source Project: data-highway Author: HotelsDotCom File: OnrampImplTest.java License: Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void sendEncodedEvent_UpdateMetrics_Success() throws InvalidKeyException { RecordMetadata metadata = new RecordMetadata(null, 0, 0, 0, Long.valueOf(0), 0, 1); Exception exception = null; when(kafkaProducer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(invocation -> { ((Callback) invocation.getArgument(1)).onCompletion(metadata, exception); return future; }); underTest.sendEncodedEvent(new Event<>(null, null), null); verify(metrics).markSuccessMetrics(ROAD_NAME, 1); }
Example #20
Source Project: data-highway Author: HotelsDotCom File: OnrampImplTest.java License: Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void sendEncodedEvent_UpdateMetrics_Failure() throws InvalidKeyException { RecordMetadata metadata = null; Exception exception = new Exception(); when(kafkaProducer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(invocation -> { ((Callback) invocation.getArgument(1)).onCompletion(metadata, exception); return future; }); underTest.sendEncodedEvent(new Event<>(null, null), null); verify(metrics).markFailureMetrics(ROAD_NAME); }
Example #21
Source Project: brave Author: openzipkin File: TracingCallbackTest.java License: Apache License 2.0 | 5 votes |
@Test public void on_completion_should_tag_if_exception() { Span span = tracing.tracer().nextSpan().start(); Callback tracingCallback = TracingCallback.create(null, span, currentTraceContext); tracingCallback.onCompletion(null, error); assertThat(spans.get(0).finishTimestamp()).isNotZero(); assertThat(spans.get(0).error()).isEqualTo(error); }
Example #22
Source Project: incubator-heron Author: apache File: KafkaBoltTest.java License: Apache License 2.0 | 5 votes |
@Test public void executeATMOSTONCE() { kafkaBolt.prepare(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE), null, outputCollector); when(tupleTransformer.transformToKey(tuple)).thenReturn("key"); byte[] value = new byte[]{1, 2, 3}; when(tupleTransformer.transformToValue(tuple)).thenReturn(value); when(tupleTransformer.getTopicName(tuple)).thenReturn("topic"); ProducerRecord<String, byte[]> producerRecord = new ProducerRecord<>("topic", "key", value); kafkaBolt.execute(tuple); verify(producer).send(eq(producerRecord), any(Callback.class)); }
Example #23
Source Project: nifi Author: apache File: TestPublisherLease.java License: Apache License 2.0 | 5 votes |
@Test @SuppressWarnings("unchecked") public void testPoisonOnFailure() throws IOException { final AtomicInteger poisonCount = new AtomicInteger(0); final PublisherLease lease = new PublisherLease(producer, 1024 * 1024, 1000L, logger) { @Override public void poison() { poisonCount.incrementAndGet(); super.poison(); } }; final FlowFile flowFile = new MockFlowFile(1L); final String topic = "unit-test"; final byte[] messageKey = null; final byte[] demarcatorBytes = null; doAnswer(new Answer<Object>() { @Override public Object answer(final InvocationOnMock invocation) throws Throwable { final Callback callback = invocation.getArgument(1); callback.onCompletion(null, new RuntimeException("Unit Test Intentional Exception")); return null; } }).when(producer).send(any(ProducerRecord.class), any(Callback.class)); lease.publish(flowFile, new ByteArrayInputStream(new byte[1]), messageKey, demarcatorBytes, topic); assertEquals(1, poisonCount.get()); final PublishResult result = lease.complete(); assertTrue(result.getFailedFlowFiles().contains(flowFile)); assertFalse(result.getSuccessfulFlowFiles().contains(flowFile)); }
Example #24
Source Project: skywalking Author: apache File: KafkaProducerInterceptorTest.java License: Apache License 2.0 | 5 votes |
@Before public void setUp() { producerInterceptor = new KafkaProducerInterceptor(); //when use lambda expression not to generate inner class,and not to trigger class define. Callback callback = (metadata, exception) -> { if (null != metadata) { } }; arguments = new Object[]{ messageInstance, callback }; argumentType = new Class[] {ProducerRecord.class}; }
Example #25
Source Project: brave Author: openzipkin File: TracingProducerBenchmarks.java License: Apache License 2.0 | 5 votes |
@Override public Future<RecordMetadata> send(ProducerRecord<String, String> record, Callback callback) { TopicPartition tp = new TopicPartition(record.topic(), 0); RecordMetadata rm = new RecordMetadata(tp, -1L, -1L, 1L, 2L, 3, 4); if (callback != null) callback.onCompletion(rm, null); return Futures.immediateFuture(rm); }
Example #26
Source Project: brave Author: openzipkin File: TracingCallbackTest.java License: Apache License 2.0 | 5 votes |
@Test public void on_completion_should_forward_then_finish_span() { Span span = tracing.tracer().nextSpan().start(); Callback delegate = mock(Callback.class); Callback tracingCallback = TracingCallback.create(delegate, span, currentTraceContext); RecordMetadata md = createRecordMetadata(); tracingCallback.onCompletion(md, null); verify(delegate).onCompletion(md, null); assertThat(spans.get(0).finishTimestamp()).isNotZero(); }
Example #27
Source Project: brave Author: openzipkin File: TracingCallbackTest.java License: Apache License 2.0 | 5 votes |
@Test public void on_completion_should_forward_then_tag_if_exception() { Span span = tracing.tracer().nextSpan().start(); Callback delegate = mock(Callback.class); Callback tracingCallback = TracingCallback.create(delegate, span, currentTraceContext); RecordMetadata md = createRecordMetadata(); tracingCallback.onCompletion(md, error); verify(delegate).onCompletion(md, error); assertThat(spans.get(0).finishTimestamp()).isNotZero(); assertThat(spans.get(0).error()).isEqualTo(error); }
Example #28
Source Project: kylin-on-parquet-v2 Author: Kyligence File: KafkaActiveReserviorListener.java License: Apache License 2.0 | 5 votes |
protected void sendWrapper(String topic, Record record, Callback callback) { try { send(topic, record, callback); } catch (org.apache.kafka.common.errors.TimeoutException e) { setUnAvailable(topic); throw e; } }
Example #29
Source Project: logback-kafka-appender Author: danielwegener File: AsynchronousDeliveryStrategyTest.java License: Apache License 2.0 | 5 votes |
@Test public void testCallbackWillNotTriggerOnFailedDeliveryOnNoException() { final ProducerRecord<String,String> record = new ProducerRecord<String,String>("topic", 0, null, "msg"); unit.send(producer, record, "msg", failedDeliveryCallback); final ArgumentCaptor<Callback> callbackCaptor = ArgumentCaptor.forClass(Callback.class); verify(producer).send(Mockito.refEq(record), callbackCaptor.capture()); final Callback callback = callbackCaptor.getValue(); callback.onCompletion(recordMetadata, null); verify(failedDeliveryCallback, never()).onFailedDelivery(anyString(), any(Throwable.class)); }
Example #30
Source Project: qcon-microservices Author: confluentinc File: OrderService.java License: Apache License 2.0 | 5 votes |
private Callback callback(final AsyncResponse response, final String orderId) { return (recordMetadata, e) -> { if (e != null) { response.resume(e); } else { try { //Return the location of the newly created resource Response uri = Response.created(new URI("/v1/orders/" + orderId)).build(); response.resume(uri); } catch (URISyntaxException e2) { e2.printStackTrace(); } } }; }