org.apache.kafka.clients.consumer.ConsumerRecord Java Examples
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecord.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SettleListenerOfPayment.java From reliable with Apache License 2.0 | 6 votes |
@ReliableOnConsumed(svc = "cat-settle",nextTopic = "CAT_SETTLE_CREATED", nextRetryMax = 2, nextSvcs = {"cat-statement"}) @KafkaListener(topics = "CAT_PAID_TCC_TRY") public CatStatement onCatPaid_TCC_TRY(ConsumerRecord<String, String> record) { ReliableDto dto = dtoConverter.convertOnConsumed(record); //-------------- CatSettle catSettle = new CatSettle(); catSettle.setId("CAT_SETTLE_TEST"); catSettle.setName("TRY"); settleController.create(catSettle); CatStatement catStatement = new CatStatement(); catStatement.setId("TEST_STATEMENT"); catStatement.setTest("CAT_SETTLE_CREATED"); return catStatement; }
Example #2
Source File: KafkaHelper.java From kafka-junit with Apache License 2.0 | 6 votes |
@Override public List<ConsumerRecord<K, V>> call() throws Exception { try { Map<TopicPartition, OffsetAndMetadata> commitBuffer = Maps.newHashMap(); List<ConsumerRecord<K, V>> polledMessages = new ArrayList<>(numRecordsToPoll); while ((polledMessages.size() < numRecordsToPoll) && (!Thread.currentThread().isInterrupted())) { ConsumerRecords<K, V> records = consumer.poll(0); for (ConsumerRecord<K, V> rec : records) { polledMessages.add(rec); commitBuffer.put( new TopicPartition(rec.topic(), rec.partition()), new OffsetAndMetadata(rec.offset() + 1) ); if (polledMessages.size() == numRecordsToPoll) { consumer.commitSync(commitBuffer); break; } } } return polledMessages; } finally { consumer.close(); } }
Example #3
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 6 votes |
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
Example #4
Source File: EarliestNativeTest.java From vertx-kafka-client with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Map<String, String> config = new HashMap<>(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); config.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group"); config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); KafkaConsumer consumer = new KafkaConsumer(config); consumer.subscribe(Collections.singleton("my-topic")); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); for (ConsumerRecord<String, String> record: records) { System.out.println(record); } } }
Example #5
Source File: OffsetCommitSync.java From kafka_book_demo with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties props = initConfig(); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); try { while (running.get()) { ConsumerRecords<String, String> records = consumer.poll(1000); for (ConsumerRecord<String, String> record : records) { //do some logical processing. } consumer.commitSync(); } } finally { consumer.close(); } }
Example #6
Source File: ProcessingServiceBackend.java From java-11-examples with Apache License 2.0 | 6 votes |
public void start() { Collection<String> topics = Collections.singletonList(TOPIC_SERVICE_REQUESTS); this.consumer.subscribe(topics); LOG.info("Waiting for requests {} ...", serviceId); this.running = true; while (running) { ConsumerRecords<String, Bytes> records = consumer.poll(Duration.ofMillis(10)); if (!records.isEmpty()) { for (ConsumerRecord<String, Bytes> record: records) { try { ServiceRequest request = dataMapper.deserialize(record.value(), ServiceRequest.class); LOG.info("Received Request: {}:{}:{}", record.key(), request.getClientId(), request.getTaskId()); ServiceResponse response = new ServiceResponse(request.getTaskId(), request.getClientId(), request.getData(), "response:" + request.getData()); Bytes bytes = dataMapper.serialize(response); ProducerRecord<String, Bytes> recordReply = new ProducerRecord<>(TOPIC_SERVICE_RESPONSES, response.getTaskId(), bytes); producer.send(recordReply); LOG.info("Response has been send !"); } catch (IOException e) { LOG.error("Exception: ", e); } } } } LOG.info("done {}.", serviceId); }
Example #7
Source File: AbstractKafkaInputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void emitTuples() { int count = consumerWrapper.messageSize(); if (maxTuplesPerWindow > 0) { count = Math.min(count, maxTuplesPerWindow - emitCount); } for (int i = 0; i < count; i++) { Pair<String, ConsumerRecord<byte[], byte[]>> tuple = consumerWrapper.pollMessage(); ConsumerRecord<byte[], byte[]> msg = tuple.getRight(); emitTuple(tuple.getLeft(), msg); AbstractKafkaPartitioner.PartitionMeta pm = new AbstractKafkaPartitioner.PartitionMeta(tuple.getLeft(), msg.topic(), msg.partition()); offsetTrack.put(pm, msg.offset() + 1); if (isIdempotent() && !windowStartOffset.containsKey(pm)) { windowStartOffset.put(pm, msg.offset()); } } emitCount += count; processConsumerError(); }
Example #8
Source File: ConsumerRecordsDesc.java From pinpoint with Apache License 2.0 | 6 votes |
static ConsumerRecordsDesc create(Iterator consumerRecordIterator) { Set<String> topicSet = new HashSet<String>(1); String remoteAddress = null; int count = 0; while (consumerRecordIterator.hasNext()) { Object consumerRecord = consumerRecordIterator.next(); if (consumerRecord instanceof ConsumerRecord) { if (remoteAddress == null) { remoteAddress = getRemoteAddress(consumerRecord); } String topic = ((ConsumerRecord) consumerRecord).topic(); topicSet.add(topic); count++; } } if (count > 0) { return new ConsumerRecordsDesc(topicSet, remoteAddress, count); } return null; }
Example #9
Source File: FirstMultiConsumerThreadDemo.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
@Override public void run() { try { while (true) { ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100)); for (ConsumerRecord<String, String> record : records) { //process record. System.out.println(getName() + " -> " + record.value()); } } } catch (Exception e) { e.printStackTrace(); } finally { kafkaConsumer.close(); } }
Example #10
Source File: KafkaDecoderTest.java From synapse with Apache License 2.0 | 6 votes |
@Test public void shouldDecodeBrokenCompoundKeysAsMessageKey() { final KafkaDecoder decoder = new KafkaDecoder(); final ConsumerRecord<String,String> record = new ConsumerRecord<>( "ch01", 0, 42L, 1234L, TimestampType.CREATE_TIME, -1L, -1, -1, "record-key", null, new RecordHeaders(asList( new RecordHeader("_synapse_msg_partitionKey", "1234".getBytes(UTF_8)), new RecordHeader("_synapse_msg_compactionKey", "key-1234".getBytes(UTF_8)) )) ); // when final TextMessage decodedMessage = decoder.apply(record); // then assertThat(decodedMessage.getKey().isCompoundKey(), is(false)); assertThat(decodedMessage.getKey().compactionKey(), is("record-key")); }
Example #11
Source File: KafkaShortRetentionTestBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public String deserialize(ConsumerRecord<byte[], byte[]> record) { final long offset = record.offset(); if (offset != nextExpected) { numJumps++; nextExpected = offset; LOG.info("Registered now jump at offset {}", offset); } nextExpected++; try { Thread.sleep(10); // slow down data consumption to trigger log eviction } catch (InterruptedException e) { throw new RuntimeException("Stopping it"); } return ""; }
Example #12
Source File: JSONKeyValueDeserializationSchemaTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testDeserializeWithMetadata() throws Exception { ObjectMapper mapper = new ObjectMapper(); ObjectNode initialKey = mapper.createObjectNode(); initialKey.put("index", 4); byte[] serializedKey = mapper.writeValueAsBytes(initialKey); ObjectNode initialValue = mapper.createObjectNode(); initialValue.put("word", "world"); byte[] serializedValue = mapper.writeValueAsBytes(initialValue); JSONKeyValueDeserializationSchema schema = new JSONKeyValueDeserializationSchema(true); final ConsumerRecord<byte[], byte[]> consumerRecord = newConsumerRecord("topic#1", 3, 4L, serializedKey, serializedValue); ObjectNode deserializedValue = schema.deserialize(consumerRecord); Assert.assertEquals(4, deserializedValue.get("key").get("index").asInt()); Assert.assertEquals("world", deserializedValue.get("value").get("word").asText()); Assert.assertEquals("topic#1", deserializedValue.get("metadata").get("topic").asText()); Assert.assertEquals(4, deserializedValue.get("metadata").get("offset").asInt()); Assert.assertEquals(3, deserializedValue.get("metadata").get("partition").asInt()); }
Example #13
Source File: GeoLocationConsumer.java From Microservices-Deployment-Cookbook with MIT License | 6 votes |
public void run() { Properties props = new Properties(); props.put("bootstrap.servers", "192.168.99.100:9092"); props.put("group.id", "geolocationConsumer"); props.put("key.deserializer", StringDeserializer.class.getName()); props.put("value.deserializer", StringDeserializer.class.getName()); try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) { consumer.subscribe(Arrays.asList("geolocations")); while (true) { ConsumerRecords<String, String> records = consumer.poll(100); for (ConsumerRecord<String, String> record : records) { System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value()); REPO.addGeoLocation(GSON.fromJson(record.value(), GeoLocation.class)); } } } catch (Exception e) { System.err.println("Error while consuming geolocations. Details: " + e.getMessage()); } }
Example #14
Source File: EventListener.java From eventapis with Apache License 2.0 | 6 votes |
public void onEventMessage(ConsumerRecord<String, Serializable> record, PublishedEventWrapper eventWrapper) { try { String topic = record.topic(); if (topic.equals("operation-events")) { log.warn("Topic must not be operation-events"); return; } String opId = eventWrapper.getContext().getOpId(); log.info("opId:" + opId + " EventMessage -> Topic: " + topic + " - Sender: " + eventWrapper.getSender() + " - aggregateId: " + eventWrapper.getContext().getCommandContext()); topicsMap.submitToKey(topic, new EndOffsetSetter(record.partition(), record.offset() + 1)); } catch (Exception e) { log.error("Error While Handling Event:" + e.getMessage(), e); } }
Example #15
Source File: KafkaRecordsConsumerTest.java From synapse with Apache License 2.0 | 6 votes |
@Test public void shouldInterceptMessage() { // given final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon()); final ConsumerRecord<String, String> record = someRecord(0, 42L, Clock.systemDefaultZone()); // when registry.register(allChannelsWith((m) -> { return TextMessage.of(m.getKey(), m.getHeader(), "intercepted"); })); final ConsumerRecords<String,String> records = new ConsumerRecords<>(ImmutableMap.of( new TopicPartition("foo", 0), singletonList(record)) ); consumer.apply(records); // then verify(dispatcher).accept(of(Key.of("key"), of(fromPosition("0", "42")), "intercepted")); }
Example #16
Source File: KafkaShortRetentionTestBase.java From flink with Apache License 2.0 | 6 votes |
@Override public String deserialize(ConsumerRecord<byte[], byte[]> record) { final long offset = record.offset(); if (offset != nextExpected) { numJumps++; nextExpected = offset; LOG.info("Registered now jump at offset {}", offset); } nextExpected++; try { Thread.sleep(10); // slow down data consumption to trigger log eviction } catch (InterruptedException e) { throw new RuntimeException("Stopping it"); } return ""; }
Example #17
Source File: KafkaTestUtil.java From rya with Apache License 2.0 | 6 votes |
/** * Polls a {@link Consumer} until it has either polled too many times without hitting the target number * of results, or it hits the target number of results. * * @param pollMs - How long each poll could take. * @param pollIterations - The maximum number of polls that will be attempted. * @param targetSize - The number of results to read before stopping. * @param consumer - The consumer that will be polled. * @return The results that were read from the consumer. * @throws Exception If the poll failed. */ public static <K, V> List<V> pollForResults( final int pollMs, final int pollIterations, final int targetSize, final Consumer<K, V> consumer) throws Exception { requireNonNull(consumer); final List<V> values = new ArrayList<>(); int i = 0; while(values.size() < targetSize && i < pollIterations) { for(final ConsumerRecord<K, V> record : consumer.poll(pollMs)) { values.add( record.value() ); } i++; } return values; }
Example #18
Source File: StreamsSelectAndProjectIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void shouldUseTimestampExtractedFromDDLStatement() throws Exception { final String outputStream = "DDL_TIMESTAMP"; ksqlContext.sql("CREATE STREAM "+ outputStream + " WITH(timestamp='ordertime')" + " AS SELECT ORDERID, ORDERTIME FROM " + avroTimestampStreamName + " WHERE ITEMID='ITEM_4';"); final List<ConsumerRecord> records = testHarness.consumerRecords(outputStream, 1, IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS); final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); final long timestamp = records.get(0).timestamp(); assertThat(timestamp, equalTo(dateFormat.parse("2018-01-04").getTime())); }
Example #19
Source File: ConsumerMultiRecordEntryPointInterceptorTest.java From pinpoint with Apache License 2.0 | 6 votes |
@Test public void createTraceTest1() { List<ConsumerRecord> consumerRecordList = new ArrayList<ConsumerRecord>(); consumerRecordList.add(new ConsumerRecord("Test", 1, 1, "hello", "hello too")); doReturn(trace).when(traceContext).newTraceObject(); doReturn(true).when(trace).canSampled(); doReturn(recorder).when(trace).getSpanRecorder(); doReturn(consumerRecordList.iterator()).when(consumerRecords).iterator(); ConsumerMultiRecordEntryPointInterceptor interceptor = new ConsumerMultiRecordEntryPointInterceptor(traceContext, descriptor, 0); interceptor.createTrace(new Object(), new Object[]{consumerRecords}); verify(recorder).recordAcceptorHost("Unknown"); verify(recorder).recordAttribute(KafkaConstants.KAFKA_TOPIC_ANNOTATION_KEY, "Test"); verify(recorder).recordAttribute(KafkaConstants.KAFKA_BATCH_ANNOTATION_KEY, 1); verify(recorder).recordRpcName("kafka://topic=Test?batch=1"); }
Example #20
Source File: SimpleKafkaConsumer.java From joyqueue with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:50088"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "test_app"); props.put(ConsumerConfig.CLIENT_ID_CONFIG, "test_app"); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList("test_topic_0")); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000 * 1)); for (ConsumerRecord<String, String> record : records) { System.out.println(String.format("record, key: %s, value: %s, offset: %s", record.key(), record.value(), record.offset())); } } }
Example #21
Source File: TestStreamApplicationIntegrationTestHarness.java From samza with Apache License 2.0 | 6 votes |
@Test public void testTheTestHarness() { List<String> inputMessages = Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"); // create input topic and produce the first batch of input messages boolean topicCreated = createTopic(INPUT_TOPIC, 1); if (!topicCreated) { fail("Could not create input topic."); } inputMessages.forEach(m -> produceMessage(INPUT_TOPIC, 0, m, m)); // verify that the input messages were produced successfully if (inputMessages.size() > 0) { List<ConsumerRecord<String, String>> inputRecords = consumeMessages(Collections.singletonList(INPUT_TOPIC), inputMessages.size()); List<String> readInputMessages = inputRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList()); Assert.assertEquals(inputMessages, readInputMessages); } }
Example #22
Source File: TracingKafkaTest.java From java-kafka-client with Apache License 2.0 | 5 votes |
@Test public void testConsumerBuilderWithStandardSpanNameProvider() throws InterruptedException { Producer<Integer, String> producer = createTracingProducer(); producer.send(new ProducerRecord<>("messages", 1, "test")); producer.close(); assertEquals(1, mockTracer.finishedSpans().size()); ExecutorService executorService = Executors.newSingleThreadExecutor(); final CountDownLatch latch = new CountDownLatch(1); executorService.execute(() -> { Consumer<Integer, String> consumer = createConsumerWithSpanNameProvider(null); while (latch.getCount() > 0) { ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(100)); for (ConsumerRecord<Integer, String> record : records) { SpanContext spanContext = TracingKafkaUtils .extractSpanContext(record.headers(), mockTracer); assertNotNull(spanContext); assertEquals("test", record.value()); assertEquals((Integer) 1, record.key()); consumer.commitSync(); latch.countDown(); } } consumer.close(); }); assertTrue(latch.await(30, TimeUnit.SECONDS)); assertEquals("From_messages", mockTracer.finishedSpans().get(1).operationName()); }
Example #23
Source File: ConsumerRecordsRecordsInstrumentation.java From apm-agent-java with Apache License 2.0 | 5 votes |
@Advice.OnMethodExit(onThrowable = Throwable.class, suppress = Throwable.class) public static void wrapIterable(@Nullable @Advice.Return(readOnly = false) Iterable<ConsumerRecord> iterable) { if (tracer == null || !tracer.isRunning() || tracer.currentTransaction() != null) { return; } //noinspection ConstantConditions,rawtypes KafkaInstrumentationHeadersHelper<ConsumerRecord, ProducerRecord> kafkaInstrumentationHelper = kafkaInstrHeadersHelperManager.getForClassLoaderOfClass(KafkaProducer.class); if (iterable != null && kafkaInstrumentationHelper != null) { iterable = kafkaInstrumentationHelper.wrapConsumerRecordIterable(iterable); } }
Example #24
Source File: FeatureSetSpecReadAndWriteTest.java From feast with Apache License 2.0 | 5 votes |
private List<IngestionJobProto.FeatureSetSpecAck> getFeatureSetSpecAcks() { ConsumerRecords<String, IngestionJobProto.FeatureSetSpecAck> consumerRecords = consumer.poll(java.time.Duration.ofSeconds(KAFKA_POLL_TIMEOUT_SEC)); return Lists.newArrayList(consumerRecords.records(KAFKA_SPECS_ACK_TOPIC)).stream() .map(ConsumerRecord::value) .collect(Collectors.toList()); }
Example #25
Source File: SubpartitionSupplierTest.java From kafka-workers with Apache License 2.0 | 5 votes |
@Test public void shouldReturnSubpartitions() { // given SubpartitionSupplier<byte[], byte[]> subpartitionSupplier = new SubpartitionSupplier<>(new WorkerPartitioner<byte[], byte[]>() { @Override public int subpartition(ConsumerRecord<byte[], byte[]> consumerRecord) { return 1; } @Override public int count(TopicPartition topicPartition) { return topicPartition.partition(); } }); // when List<WorkerSubpartition> subpartitions = subpartitionSupplier .subpartitions(Arrays.asList(new TopicPartition("topic", 2), new TopicPartition("topic", 3))); // then assertThat(subpartitions.size()).isEqualTo(5); assertThat(subpartitions.get(0)).isEqualTo(WorkerSubpartition.getInstance("topic", 2, 0)); assertThat(subpartitions.get(1)).isEqualTo(WorkerSubpartition.getInstance("topic", 2, 1)); assertThat(subpartitions.get(2)).isEqualTo(WorkerSubpartition.getInstance("topic", 3, 0)); assertThat(subpartitions.get(3)).isEqualTo(WorkerSubpartition.getInstance("topic", 3, 1)); assertThat(subpartitions.get(4)).isEqualTo(WorkerSubpartition.getInstance("topic", 3, 2)); }
Example #26
Source File: UserDeserializer.java From stateful-functions with Apache License 2.0 | 5 votes |
@Override public User deserialize(ConsumerRecord<byte[], byte[]> input) { try { return mapper.readValue(input.value(), User.class); } catch (IOException e) { LOG.debug("Failed to deserialize record", e); return null; } }
Example #27
Source File: DataPullingSpout.java From DBus with Apache License 2.0 | 5 votes |
@Override public void fail(Object msgId) { try { if (msgId != null && ConsumerRecord.class.isInstance(msgId)) { ConsumerRecord<String, byte[]> record = getMessageId(msgId); JSONObject wrapperJson = JSONObject.parseObject(new String(record.value())); String reqString = wrapperJson.getString(FullPullConstants.FULLPULL_REQ_PARAM); Long id = FullPullHelper.getSeqNo(reqString); String splitIndex = wrapperJson.getString(FullPullConstants.DATA_CHUNK_SPLIT_INDEX); logger.error("[pull fail] topic: {}, offset: {}, split index:{}, key: {}", record.topic(), record.offset(), splitIndex, record.key()); shardsProcessManager.failAndClearShardElementQueue(id, record.offset()); //写monitor,并且发送错误返回等, 只报错一次 if (!failAndBreakTuplesSet.contains(reqString)) { FullPullHelper.finishPullReport(reqString, FullPullConstants.FULL_PULL_STATUS_ABORT, null); } sendFinishMsgToBolt(JSONObject.parseObject(reqString)); failAndBreakTuplesSet.add(reqString); } super.fail(msgId); } catch (Exception e) { logger.error("[pull fail] exception!", e); } }
Example #28
Source File: DBusRouterEncodeBolt.java From DBus with Apache License 2.0 | 5 votes |
private String processUmsMsg(EmitWarp<ConsumerRecord<String, byte[]>> data) throws Exception { DbusMessage ums = obtainUms(data); updateSchemaChangeFlag(data.getTableId()); if (encodeConfigMap != null && encodeConfigMap.get(data.getTableId()) != null) { UmsEncoder encoder = new PluggableMessageEncoder(PluginManagerProvider.getManager(), (e, column, message) -> { }); encoder.encode(ums, encodeConfigMap.get(data.getTableId())); } else { logger.debug("table id:{}, name space:{}, 脱敏配置信息为空,因此不执行脱敏.", data.getTableId(), data.getNameSpace()); } return ums.toString(); }
Example #29
Source File: KafkaIO.java From flink-statefun with Apache License 2.0 | 5 votes |
@Override public VerificationMessages.Command deserialize(ConsumerRecord<byte[], byte[]> input) { try { return VerificationMessages.Command.parseFrom(input.value()); } catch (Exception e) { throw new RuntimeException(e); } }
Example #30
Source File: OffsetBlockingMessageQueueTest.java From ja-micro with Apache License 2.0 | 5 votes |
@Test public void queue_addRecord_executed() { ConsumerRecord record = new ConsumerRecord<>(topic, 0, 0, defaultKey, defaultValue); messageQueue.add(record); ArgumentCaptor<ConsumerRecord> captor = ArgumentCaptor.forClass(ConsumerRecord.class); verify(messageExecutor).execute(captor.capture()); assertThat(captor.getValue()).isEqualTo(record); }