org.apache.kafka.clients.consumer.ConsumerRecord Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecord. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestStreamApplicationIntegrationTestHarness.java    From samza with Apache License 2.0 6 votes vote down vote up
@Test
public void testTheTestHarness() {
  List<String> inputMessages = Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10");
  // create input topic and produce the first batch of input messages
  boolean topicCreated = createTopic(INPUT_TOPIC, 1);
  if (!topicCreated) {
    fail("Could not create input topic.");
  }
  inputMessages.forEach(m -> produceMessage(INPUT_TOPIC, 0, m, m));

  // verify that the input messages were produced successfully
  if (inputMessages.size() > 0) {
    List<ConsumerRecord<String, String>> inputRecords =
        consumeMessages(Collections.singletonList(INPUT_TOPIC), inputMessages.size());
    List<String> readInputMessages = inputRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList());
    Assert.assertEquals(inputMessages, readInputMessages);
  }
}
 
Example #2
Source File: EventListener.java    From eventapis with Apache License 2.0 6 votes vote down vote up
public void onEventMessage(ConsumerRecord<String, Serializable> record, PublishedEventWrapper eventWrapper) {
    try {
        String topic = record.topic();
        if (topic.equals("operation-events")) {
            log.warn("Topic must not be operation-events");
            return;
        }
        String opId = eventWrapper.getContext().getOpId();
        log.info("opId:" + opId + " EventMessage -> Topic: " + topic
                + " - Sender: " + eventWrapper.getSender() + " - aggregateId: " + eventWrapper.getContext().getCommandContext());

        topicsMap.submitToKey(topic, new EndOffsetSetter(record.partition(), record.offset() + 1));

    } catch (Exception e) {
        log.error("Error While Handling Event:" + e.getMessage(), e);
    }
}
 
Example #3
Source File: ConsumerRecordsDesc.java    From pinpoint with Apache License 2.0 6 votes vote down vote up
static ConsumerRecordsDesc create(Iterator consumerRecordIterator) {
    Set<String> topicSet = new HashSet<String>(1);
    String remoteAddress = null;
    int count = 0;

    while (consumerRecordIterator.hasNext()) {
        Object consumerRecord = consumerRecordIterator.next();
        if (consumerRecord instanceof ConsumerRecord) {
            if (remoteAddress == null) {
                remoteAddress = getRemoteAddress(consumerRecord);
            }

            String topic = ((ConsumerRecord) consumerRecord).topic();
            topicSet.add(topic);
            count++;
        }
    }

    if (count > 0) {
        return new ConsumerRecordsDesc(topicSet, remoteAddress, count);
    }

    return null;
}
 
Example #4
Source File: GeoLocationConsumer.java    From Microservices-Deployment-Cookbook with MIT License 6 votes vote down vote up
public void run() {
	Properties props = new Properties();
	props.put("bootstrap.servers", "192.168.99.100:9092");
	props.put("group.id", "geolocationConsumer");
	props.put("key.deserializer", StringDeserializer.class.getName());
	props.put("value.deserializer", StringDeserializer.class.getName());

	try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
		consumer.subscribe(Arrays.asList("geolocations"));
		while (true) {
			ConsumerRecords<String, String> records = consumer.poll(100);
			for (ConsumerRecord<String, String> record : records) {
				System.out.printf("offset = %d, key = %s, value = %s%n", 
						record.offset(), 
						record.key(), 
						record.value());

				REPO.addGeoLocation(GSON.fromJson(record.value(), GeoLocation.class));
			}
		}
	} catch (Exception e) {
		System.err.println("Error while consuming geolocations. Details: " + e.getMessage());
	}
}
 
Example #5
Source File: JSONKeyValueDeserializationSchemaTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeserializeWithMetadata() throws Exception {
	ObjectMapper mapper = new ObjectMapper();
	ObjectNode initialKey = mapper.createObjectNode();
	initialKey.put("index", 4);
	byte[] serializedKey = mapper.writeValueAsBytes(initialKey);

	ObjectNode initialValue = mapper.createObjectNode();
	initialValue.put("word", "world");
	byte[] serializedValue = mapper.writeValueAsBytes(initialValue);

	JSONKeyValueDeserializationSchema schema = new JSONKeyValueDeserializationSchema(true);
	final ConsumerRecord<byte[], byte[]> consumerRecord =
			newConsumerRecord("topic#1", 3, 4L, serializedKey, serializedValue);
	ObjectNode deserializedValue = schema.deserialize(consumerRecord);

	Assert.assertEquals(4, deserializedValue.get("key").get("index").asInt());
	Assert.assertEquals("world", deserializedValue.get("value").get("word").asText());
	Assert.assertEquals("topic#1", deserializedValue.get("metadata").get("topic").asText());
	Assert.assertEquals(4, deserializedValue.get("metadata").get("offset").asInt());
	Assert.assertEquals(3, deserializedValue.get("metadata").get("partition").asInt());
}
 
Example #6
Source File: KafkaRecordsConsumerTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldInterceptMessage() {
    // given
    final KafkaRecordsConsumer consumer = someKafkaRecordsConsumer(fromHorizon());

    final ConsumerRecord<String, String> record = someRecord(0, 42L, Clock.systemDefaultZone());

    // when
    registry.register(allChannelsWith((m) -> {
        return TextMessage.of(m.getKey(), m.getHeader(), "intercepted");
    }));

    final ConsumerRecords<String,String> records = new ConsumerRecords<>(ImmutableMap.of(
            new TopicPartition("foo", 0),
            singletonList(record))
    );
    consumer.apply(records);

    // then
    verify(dispatcher).accept(of(Key.of("key"), of(fromPosition("0", "42")), "intercepted"));
}
 
Example #7
Source File: AbstractKafkaInputOperator.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
public void emitTuples()
{
  int count = consumerWrapper.messageSize();
  if (maxTuplesPerWindow > 0) {
    count = Math.min(count, maxTuplesPerWindow - emitCount);
  }
  for (int i = 0; i < count; i++) {
    Pair<String, ConsumerRecord<byte[], byte[]>> tuple = consumerWrapper.pollMessage();
    ConsumerRecord<byte[], byte[]> msg = tuple.getRight();
    emitTuple(tuple.getLeft(), msg);
    AbstractKafkaPartitioner.PartitionMeta pm = new AbstractKafkaPartitioner.PartitionMeta(tuple.getLeft(),
        msg.topic(), msg.partition());
    offsetTrack.put(pm, msg.offset() + 1);
    if (isIdempotent() && !windowStartOffset.containsKey(pm)) {
      windowStartOffset.put(pm, msg.offset());
    }
  }
  emitCount += count;
  processConsumerError();
}
 
Example #8
Source File: ProcessingServiceBackend.java    From java-11-examples with Apache License 2.0 6 votes vote down vote up
public void start() {
    Collection<String> topics = Collections.singletonList(TOPIC_SERVICE_REQUESTS);
    this.consumer.subscribe(topics);
    LOG.info("Waiting for requests {} ...", serviceId);
    this.running = true;
    while (running) {
        ConsumerRecords<String, Bytes> records = consumer.poll(Duration.ofMillis(10));
        if (!records.isEmpty()) {
            for (ConsumerRecord<String, Bytes> record: records) {
                try {
                    ServiceRequest request = dataMapper.deserialize(record.value(), ServiceRequest.class);
                    LOG.info("Received Request: {}:{}:{}", record.key(), request.getClientId(), request.getTaskId());
                    ServiceResponse response =
                            new ServiceResponse(request.getTaskId(), request.getClientId(), request.getData(), "response:" + request.getData());
                    Bytes bytes = dataMapper.serialize(response);
                    ProducerRecord<String, Bytes> recordReply = new ProducerRecord<>(TOPIC_SERVICE_RESPONSES, response.getTaskId(), bytes);
                    producer.send(recordReply);
                    LOG.info("Response has been send !");
                } catch (IOException e) {
                    LOG.error("Exception: ", e);
                }
            }
        }
    }
    LOG.info("done {}.", serviceId);
}
 
Example #9
Source File: FirstMultiConsumerThreadDemo.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    try {
        while (true) {
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record : records) {
                //process record.
                System.out.println(getName() + " -> " + record.value());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        kafkaConsumer.close();
    }
}
 
Example #10
Source File: OffsetCommitSync.java    From kafka_book_demo with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = initConfig();
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    try {
        while (running.get()) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                //do some logical processing.
            }
            consumer.commitSync();
        }
    } finally {
        consumer.close();
    }
}
 
Example #11
Source File: KafkaShortRetentionTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public String deserialize(ConsumerRecord<byte[], byte[]> record) {
	final long offset = record.offset();
	if (offset != nextExpected) {
		numJumps++;
		nextExpected = offset;
		LOG.info("Registered now jump at offset {}", offset);
	}
	nextExpected++;
	try {
		Thread.sleep(10); // slow down data consumption to trigger log eviction
	} catch (InterruptedException e) {
		throw new RuntimeException("Stopping it");
	}
	return "";
}
 
Example #12
Source File: EarliestNativeTest.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {

    Map<String, String> config = new HashMap<>();
    config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    config.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

    KafkaConsumer consumer = new KafkaConsumer(config);
    consumer.subscribe(Collections.singleton("my-topic"));

    while (true) {
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
      for (ConsumerRecord<String, String> record: records) {
        System.out.println(record);
      }
    }
  }
 
Example #13
Source File: KafkaTestUtil.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Polls a {@link Consumer} until it has either polled too many times without hitting the target number
 * of results, or it hits the target number of results.
 *
 * @param pollMs - How long each poll could take.
 * @param pollIterations - The maximum number of polls that will be attempted.
 * @param targetSize - The number of results to read before stopping.
 * @param consumer - The consumer that will be polled.
 * @return The results that were read from the consumer.
 * @throws Exception If the poll failed.
 */
public static <K, V> List<V> pollForResults(
        final int pollMs,
        final int pollIterations,
        final int targetSize,
        final Consumer<K, V> consumer) throws Exception {
    requireNonNull(consumer);

    final List<V> values = new ArrayList<>();

    int i = 0;
    while(values.size() < targetSize && i < pollIterations) {
        for(final ConsumerRecord<K, V> record : consumer.poll(pollMs)) {
            values.add( record.value() );
        }
        i++;
    }

    return values;
}
 
Example #14
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
private void processRecords(final ConsumerRecords<byte[], byte[]> records) {
    records.partitions().stream().forEach(partition -> {
        List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition);
        if (!messages.isEmpty()) {
            //update maximum offset map for this topic partition
            long maxOffset = messages.stream()
                    .mapToLong(record -> record.offset())
                    .max()
                    .getAsLong();

            //write records to content repository and session
            if (demarcatorBytes != null) {
                writeDemarcatedData(getProcessSession(), messages, partition);
            } else if (readerFactory != null && writerFactory != null) {
                writeRecordData(getProcessSession(), messages, partition);
            } else {
                messages.stream().forEach(message -> {
                    writeData(getProcessSession(), message, partition);
                });
            }

            totalMessages += messages.size();
            uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L));
        }
    });
}
 
Example #15
Source File: SimpleKafkaConsumer.java    From joyqueue with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:50088");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "test_app");
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "test_app");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList("test_topic_0"));

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000 * 1));
        for (ConsumerRecord<String, String> record : records) {
            System.out.println(String.format("record, key: %s, value: %s, offset: %s", record.key(), record.value(), record.offset()));
        }
    }
}
 
Example #16
Source File: KafkaShortRetentionTestBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public String deserialize(ConsumerRecord<byte[], byte[]> record) {
	final long offset = record.offset();
	if (offset != nextExpected) {
		numJumps++;
		nextExpected = offset;
		LOG.info("Registered now jump at offset {}", offset);
	}
	nextExpected++;
	try {
		Thread.sleep(10); // slow down data consumption to trigger log eviction
	} catch (InterruptedException e) {
		throw new RuntimeException("Stopping it");
	}
	return "";
}
 
Example #17
Source File: KafkaHelper.java    From kafka-junit with Apache License 2.0 6 votes vote down vote up
@Override
public List<ConsumerRecord<K, V>> call() throws Exception {
    try {
        Map<TopicPartition, OffsetAndMetadata> commitBuffer = Maps.newHashMap();
        List<ConsumerRecord<K, V>> polledMessages = new ArrayList<>(numRecordsToPoll);
        while ((polledMessages.size() < numRecordsToPoll) && (!Thread.currentThread().isInterrupted())) {
            ConsumerRecords<K, V> records = consumer.poll(0);
            for (ConsumerRecord<K, V> rec : records) {
                polledMessages.add(rec);
                commitBuffer.put(
                        new TopicPartition(rec.topic(), rec.partition()),
                        new OffsetAndMetadata(rec.offset() + 1)
                );

                if (polledMessages.size() == numRecordsToPoll) {
                    consumer.commitSync(commitBuffer);
                    break;
                }
            }
        }
        return polledMessages;
    } finally {
        consumer.close();
    }
}
 
Example #18
Source File: KafkaDecoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldDecodeBrokenCompoundKeysAsMessageKey() {
    final KafkaDecoder decoder = new KafkaDecoder();
    final ConsumerRecord<String,String> record = new ConsumerRecord<>(
            "ch01",
            0,
            42L,
            1234L, TimestampType.CREATE_TIME,
            -1L, -1, -1,
            "record-key",
            null,
            new RecordHeaders(asList(
                    new RecordHeader("_synapse_msg_partitionKey", "1234".getBytes(UTF_8)),
                    new RecordHeader("_synapse_msg_compactionKey", "key-1234".getBytes(UTF_8))
            ))
    );

    // when
    final TextMessage decodedMessage = decoder.apply(record);

    // then
    assertThat(decodedMessage.getKey().isCompoundKey(), is(false));
    assertThat(decodedMessage.getKey().compactionKey(), is("record-key"));
}
 
Example #19
Source File: StreamsSelectAndProjectIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldUseTimestampExtractedFromDDLStatement() throws Exception {
  final String outputStream = "DDL_TIMESTAMP";
  ksqlContext.sql("CREATE STREAM "+  outputStream
      + " WITH(timestamp='ordertime')"
      + " AS SELECT ORDERID, ORDERTIME FROM "
      + avroTimestampStreamName
      + " WHERE ITEMID='ITEM_4';");

  final List<ConsumerRecord> records = testHarness.consumerRecords(outputStream,
      1,
      IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS);

  final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
  final long timestamp = records.get(0).timestamp();
  assertThat(timestamp, equalTo(dateFormat.parse("2018-01-04").getTime()));
}
 
Example #20
Source File: ConsumerMultiRecordEntryPointInterceptorTest.java    From pinpoint with Apache License 2.0 6 votes vote down vote up
@Test
public void createTraceTest1() {
    List<ConsumerRecord> consumerRecordList = new ArrayList<ConsumerRecord>();
    consumerRecordList.add(new ConsumerRecord("Test", 1, 1, "hello", "hello too"));

    doReturn(trace).when(traceContext).newTraceObject();
    doReturn(true).when(trace).canSampled();
    doReturn(recorder).when(trace).getSpanRecorder();
    doReturn(consumerRecordList.iterator()).when(consumerRecords).iterator();

    ConsumerMultiRecordEntryPointInterceptor interceptor = new ConsumerMultiRecordEntryPointInterceptor(traceContext, descriptor, 0);
    interceptor.createTrace(new Object(), new Object[]{consumerRecords});

    verify(recorder).recordAcceptorHost("Unknown");
    verify(recorder).recordAttribute(KafkaConstants.KAFKA_TOPIC_ANNOTATION_KEY, "Test");
    verify(recorder).recordAttribute(KafkaConstants.KAFKA_BATCH_ANNOTATION_KEY, 1);
    verify(recorder).recordRpcName("kafka://topic=Test?batch=1");
}
 
Example #21
Source File: SettleListenerOfPayment.java    From reliable with Apache License 2.0 6 votes vote down vote up
@ReliableOnConsumed(svc = "cat-settle",nextTopic = "CAT_SETTLE_CREATED", nextRetryMax = 2, nextSvcs = {"cat-statement"})
@KafkaListener(topics = "CAT_PAID_TCC_TRY")
public CatStatement onCatPaid_TCC_TRY(ConsumerRecord<String, String> record) {

    ReliableDto dto = dtoConverter.convertOnConsumed(record);
    //--------------

    CatSettle catSettle = new CatSettle();
    catSettle.setId("CAT_SETTLE_TEST");
    catSettle.setName("TRY");

    settleController.create(catSettle);

    CatStatement catStatement = new CatStatement();
    catStatement.setId("TEST_STATEMENT");
    catStatement.setTest("CAT_SETTLE_CREATED");

    return catStatement;

}
 
Example #22
Source File: BqIntegrationTest.java    From beast with Apache License 2.0 5 votes vote down vote up
@Ignore
@Test
public void shouldPushTestNestedRepeatedMessages() throws InvalidProtocolBufferException {
    Instant now = Instant.now();
    long second = now.getEpochSecond();
    ProtoParser protoParser = new ProtoParser(StencilClientFactory.getClient(), TestNestedRepeatedMessage.class.getName());
    TestNestedRepeatedMessage protoMessage = TestNestedRepeatedMessage.newBuilder()
            .addRepeatedMessage(ProtoUtil.generateTestMessage(now))
            .addRepeatedMessage(ProtoUtil.generateTestMessage(now))
            .build();

    TableId tableId = TableId.of("bqsinktest", "nested_messages");
    BqSink bqSink = new BqSink(authenticatedBQ(), tableId, new BQResponseParser(), gcsSinkHandler, bqRow);

    ColumnMapping columnMapping = new ColumnMapping();
    ColumnMapping nested = new ColumnMapping();
    nested.put("record_name", "messsages");
    nested.put("1", "order_number");
    nested.put("2", "order_url");
    columnMapping.put("2", nested);
    ConsumerRecordConverter customConverter = new ConsumerRecordConverter(new RowMapper(columnMapping), protoParser, clock);


    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>("topic", 1, 1, second, TimestampType.CREATE_TIME,
            0, 0, 1, null, protoMessage.toByteArray());

    List<Record> records = customConverter.convert(Collections.singleton(consumerRecord));
    Status push = bqSink.push(new Records(records));
    assertTrue(push.isSuccess());
}
 
Example #23
Source File: KafkaConsumerUtil.java    From beast with Apache License 2.0 5 votes vote down vote up
public ConsumerRecord<byte[], byte[]> createConsumerRecord(String orderNumber, String orderUrl, String orderDetails) {
    TestKey key = TestKey.newBuilder()
            .setOrderNumber(orderNumber)
            .setOrderUrl(orderUrl)
            .build();
    TestMessage message = TestMessage.newBuilder()
            .setOrderNumber(orderNumber)
            .setOrderUrl(orderUrl)
            .setOrderDetails(orderDetails)
            .build();
    return new ConsumerRecord<>(topic, partition, offset++, timestamp, TimestampType.CREATE_TIME, 0, 0, 0, key.toByteArray(), message.toByteArray());
}
 
Example #24
Source File: FrontServiceKafka.java    From MicroCommunity with Apache License 2.0 5 votes vote down vote up
@KafkaListener(topics = {"webSentMessageTopic"})
public void listen(ConsumerRecord<?, ?> record) {
    logger.info("kafka的key: " + record.key());
    logger.info("kafka的value: " + record.value().toString());

    JSONObject param = null;
    try {
        param = JSONObject.parseObject(record.value().toString());
        MessageWebsocket.sendInfo(param.toJSONString(), param.getString("userId"));
    } catch (Exception e) {
        logger.error("发送消息失败", e);
    } finally {

    }
}
 
Example #25
Source File: KafkaProducerTest.java    From quarkus with Apache License 2.0 5 votes vote down vote up
@Test
public void test() throws Exception {
    KafkaConsumer<Integer, String> consumer = createConsumer();
    RestAssured.with().body("hello").post("/kafka");
    ConsumerRecord<Integer, String> records = consumer.poll(Duration.ofMillis(10000)).iterator().next();
    Assertions.assertEquals(records.key(), (Integer) 0);
    Assertions.assertEquals(records.value(), "hello");
}
 
Example #26
Source File: ConsumerPoolTest.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({"rawtypes", "unchecked"})
static ConsumerRecords<byte[], byte[]> createConsumerRecords(final String topic, final int partition, final long startingOffset, final byte[][] rawRecords) {
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> map = new HashMap<>();
    final TopicPartition tPart = new TopicPartition(topic, partition);
    final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
    long offset = startingOffset;
    for (final byte[] rawRecord : rawRecords) {
        final ConsumerRecord<byte[], byte[]> rec = new ConsumerRecord(topic, partition, offset++, UUID.randomUUID().toString().getBytes(), rawRecord);
        records.add(rec);
    }
    map.put(tPart, records);
    return new ConsumerRecords(map);
}
 
Example #27
Source File: MobilePraseLog.java    From javabase with Apache License 2.0 5 votes vote down vote up
@Override
public void doWork() {
    consumer.subscribe(Collections.singletonList(TOPIC));
    ConsumerRecords<String, String> records = consumer.poll(1000);
    for (ConsumerRecord<String, String> record : records) {
        //// TODO: 2016/5/24  可以将log加工处理
        System.out.println("得到log:"+record.value());
    }
}
 
Example #28
Source File: ProcessingKafkaConsumerTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void ack() {
    long previousAckCount = ProcessingKafkaConsumer.ACK_METER.count();

    Optional<ConsumerRecord<String, String>> optional = processingConsumer.nextRecord(POLL_TIME);
    assertThat("optional is not present", optional.isPresent(), is(true));

    assertThat(processingConsumer.ack(topicPartition, offset), is(true));
    assertThat(processingConsumer.getCommittableOffsets().get(topicPartition), is(new OffsetAndMetadata(offset + 1)));
    assertThat(ProcessingKafkaConsumer.ACK_METER.count(), is(previousAckCount + 1));
}
 
Example #29
Source File: KafkaConsumer.java    From myth with Apache License 2.0 5 votes vote down vote up
/**
 * Kafka listener.
 *
 * @param record the record
 */
@KafkaListener(topics = {"inventory"})
public void kafkaListener(ConsumerRecord<?, byte[]> record) {
    Optional<?> messages = Optional.ofNullable(record.value());
    if (messages.isPresent()) {
        byte[] msg = (byte[]) messages.get();
        LogUtil.debug(LOGGER, "接收到Myth分布式框架消息对象:{}", () -> msg);
        mythMqReceiveService.processMessage(msg);

    }
}
 
Example #30
Source File: ChangelogListenerImpl.java    From mapr-music with Apache License 2.0 5 votes vote down vote up
@Override
public void listen() {

    if (this.onInsert == null && this.onUpdate == null && this.onDelete == null) {
        log.warn("There is no callbacks set. Listening change data records without callbacks has no effect.");
    }

    this.consumer.subscribe(Collections.singletonList(this.changelog));
    log.info("Start listening changelog '{}'", this.changelog);

    new Thread(() -> {
        while (true) {

            ConsumerRecords<byte[], ChangeDataRecord> changeRecords = consumer.poll(KAFKA_CONSUMER_POLL_TIMEOUT);
            for (ConsumerRecord<byte[], ChangeDataRecord> consumerRecord : changeRecords) {

                // The ChangeDataRecord contains all the changes made to a document
                ChangeDataRecord changeDataRecord = consumerRecord.value();
                ChangeDataRecordType recordType = changeDataRecord.getType();
                switch (recordType) {
                    case RECORD_INSERT:
                        handleInsert(changeDataRecord);
                        break;
                    case RECORD_UPDATE:
                        handleUpdate(changeDataRecord);
                        break;
                    case RECORD_DELETE:
                        handleDelete(changeDataRecord);
                        break;
                    default:
                        log.warn("Get record of unknown type '{}'. Ignoring ...", recordType);
                }
            }

        }
    }).start();
}