org.apache.kafka.clients.consumer.ConsumerConfig Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DeserializtionErrorHandlerByBinderTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 11 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
	System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers",
			embeddedKafka.getBrokersAsString());
	System.setProperty("server.port", "0");
	System.setProperty("spring.jmx.enabled", "false");

	Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("kafka-streams-dlq-tests", "false",
			embeddedKafka);
	consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
	DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(
			consumerProps);
	consumer = cf.createConsumer();
	embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
}
 
Example #2
Source File: KafkaConfig.java    From Mastering-Distributed-Tracing with MIT License 7 votes vote down vote up
private ConsumerFactory<String, Message> consumerFactory() throws Exception {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, app.name);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");

    return new TracingConsumerFactory<>( //
            new DefaultKafkaConsumerFactory<String, Message>( //
                    props, //
                    new StringDeserializer(), //
                    new JsonDeserializer<>(Message.class)));
}
 
Example #3
Source File: KafkaAdminClientTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void getConsumerGroupSummary() {
    client.createTopic(testName.getMethodName(), 1, 1);

    Properties properties = new Properties();
    properties.putAll(KafkaTests.getProps());
    properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName());
    properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName());
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testName.getMethodName());
    properties.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testName.getMethodName() + "-client-id");

    try (Consumer<Object, Object> consumer = new KafkaConsumer<>(properties)) {
        consumer.subscribe(Arrays.asList(testName.getMethodName()));
        consumer.poll(Duration.ofSeconds(5L));

        AdminClient.ConsumerGroupSummary summary = client.getConsumerGroupSummary(testName.getMethodName());
        assertThat("Expected only 1 consumer summary when getConsumerGroupSummaries(" + testName.getMethodName() + ")",
                convertToJavaSet(summary.consumers().get().iterator()).size(), is(1));

        assertThat(summary.state(), is(notNullValue()));
        assertThat(summary.coordinator(), is(notNullValue()));
        assertThat(summary.assignmentStrategy(), is(notNullValue()));
    }
}
 
Example #4
Source File: KafkaRyaStreamsClientFactory.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link Consumer} that has a unique group ID and reads everything from a topic in Kafka
 * starting at the earliest point by default.
 *
 * @param kafkaHostname - The Kafka broker hostname. (not null)
 * @param kafkaPort - The Kafka broker port.
 * @param keyDeserializerClass - Deserializes the keys. (not null)
 * @param valueDeserializerClass - Deserializes the values. (not null)
 * @return A {@link Consumer} that can be used to read records from a topic.
 */
private static <K, V> Consumer<K, V> fromStartConsumer(
        final String kafkaHostname,
        final int kakfaPort,
        final Class<? extends Deserializer<K>> keyDeserializerClass,
        final Class<? extends Deserializer<V>> valueDeserializerClass) {
    requireNonNull(kafkaHostname);
    requireNonNull(keyDeserializerClass);
    requireNonNull(valueDeserializerClass);

    final Properties consumerProps = new Properties();
    consumerProps.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaHostname + ":" + kakfaPort);
    consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumerProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString());
    consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass.getName());
    consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass.getName());
    return new KafkaConsumer<>(consumerProps);
}
 
Example #5
Source File: Consumer.java    From joyqueue with Apache License 2.0 6 votes vote down vote up
public Consumer(String topic) {
        //super("KafkaConsumerExample", false);
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfigs.BOOTSTRAP);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaConfigs.GROUP_ID);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG, KafkaConfigs.GROUP_ID);
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        consumer = new KafkaConsumer<>(props);
        this.topic = topic;
    }
 
Example #6
Source File: KeycloakClientCredentialsWithJwtValidationAuthzTest.java    From strimzi-kafka-oauth with Apache License 2.0 6 votes vote down vote up
static Properties buildConsumerConfig(String accessToken) {
    Properties p = new Properties();
    p.setProperty("security.protocol", "SASL_PLAINTEXT");
    p.setProperty("sasl.mechanism", "OAUTHBEARER");
    p.setProperty("sasl.jaas.config", "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required " +
            " oauth.access.token=\"" + accessToken + "\";");
    p.setProperty("sasl.login.callback.handler.class", "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler");

    p.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka:9092");
    p.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    p.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

    p.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "consumer-group");
    p.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10");
    p.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");

    return p;
}
 
Example #7
Source File: ConsumerTTL.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
            StringDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
            StringDeserializer.class.getName());
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG,
            ConsumerInterceptorTTL.class.getName());

    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Collections.singletonList(topic));

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord<String, String> record : records) {
            System.out.println(record.partition() + ":" + record.offset() + ":" + record.value());
        }
    }
}
 
Example #8
Source File: Kafka08PartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public Kafka08PartitionDiscoverer(
		KafkaTopicsDescriptor topicsDescriptor,
		int indexOfThisSubtask,
		int numParallelSubtasks,
		Properties kafkaProperties) {

	super(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks);

	checkNotNull(kafkaProperties);

	String seedBrokersConfString = kafkaProperties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
	checkArgument(seedBrokersConfString != null && !seedBrokersConfString.isEmpty(),
			"Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);

	this.seedBrokerAddresses = seedBrokersConfString.split(",");

	// evenly distribute seed brokers across subtasks, to
	// avoid too much pressure on a single broker on startup
	this.currentContactSeedBrokerIndex = indexOfThisSubtask % seedBrokerAddresses.length;

	this.numRetries = getInt(kafkaProperties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES);
	this.soTimeout = getInt(kafkaProperties, "socket.timeout.ms", 30000);
	this.bufferSize = getInt(kafkaProperties, "socket.receive.buffer.bytes", 65536);
}
 
Example #9
Source File: ConsumerManual.java    From javatech with Creative Commons Attribution Share Alike 4.0 International 6 votes vote down vote up
public static void main(String[] args) {
	Properties props = new Properties();
	props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
	props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringDeserializer");
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringDeserializer");

	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	consumer.subscribe(Arrays.asList("t1", "t2"));
	final int minBatchSize = 200;
	List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
	while (true) {
		ConsumerRecords<String, String> records = consumer.poll(100);
		for (ConsumerRecord<String, String> record : records) {
			buffer.add(record);
		}
		if (buffer.size() >= minBatchSize) {
			// 逻辑处理,例如保存到数据库
			consumer.commitSync();
			buffer.clear();
		}
	}
}
 
Example #10
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testPollTimeout(TestContext ctx) throws Exception {
  Async async = ctx.async();
  String topicName = "testPollTimeout";
  Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0);
  KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config);

  int pollingTimeout = 1500;
  // Set the polling timeout to 1500 ms (default is 1000)
  consumerWithCustomTimeout.pollTimeout(Duration.ofMillis(pollingTimeout));
  // Subscribe to the empty topic (we want the poll() call to timeout!)
  consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> {
    consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once
    long beforeSeek = System.currentTimeMillis();
    consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> {
      long durationWShortTimeout = System.currentTimeMillis() - beforeSeek;
      ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout");
      consumerWithCustomTimeout.close();
      async.countDown();
    });
  });
}
 
Example #11
Source File: PhysicalPlanBuilderTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
private PhysicalPlanBuilder buildPhysicalPlanBuilder(Map<String, Object> overrideProperties) {
  final StreamsBuilder streamsBuilder = new StreamsBuilder();
  final FunctionRegistry functionRegistry = new FunctionRegistry();
  Map<String, Object> configMap = new HashMap<>();
  configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
  configMap.put("application.id", "KSQL");
  configMap.put("commit.interval.ms", 0);
  configMap.put("cache.max.bytes.buffering", 0);
  configMap.put("auto.offset.reset", "earliest");
  ksqlConfig = new KsqlConfig(configMap);
  return new PhysicalPlanBuilder(streamsBuilder,
      ksqlConfig,
      new FakeKafkaTopicClient(),
      functionRegistry,
      overrideProperties,
      false,
      metaStore,
      new MockSchemaRegistryClient(),
      testKafkaStreamsBuilder
  );

}
 
Example #12
Source File: KafkaBinderUnitTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void testMergedConsumerProperties() {
	KafkaProperties bootProps = new TestKafkaProperties();
	bootProps.getConsumer().getProperties()
			.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "bar");
	KafkaBinderConfigurationProperties props = new KafkaBinderConfigurationProperties(
			bootProps);
	assertThat(props.mergedConsumerConfiguration()
			.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("bar");
	props.getConfiguration().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "baz");
	assertThat(props.mergedConsumerConfiguration()
			.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("baz");
	props.getConsumerProperties().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "qux");
	assertThat(props.mergedConsumerConfiguration()
			.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("qux");
}
 
Example #13
Source File: KafkaStreamsTests.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void someTest() throws ExecutionException, InterruptedException {
    Map<String, Object> consumerProps = consumerProps("someTestGroup", "true", this.embeddedKafka);
    consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

    ConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);

    Consumer<String, String> consumer = cf.createConsumer();
    this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, STREAMING_TOPIC2);

    template.send(STREAMING_TOPIC2, "someTestMessage", "foo").get();

    ConsumerRecord<String, String> replies = getSingleRecord(consumer, STREAMING_TOPIC2, 250L);
    assertThat(replies.key(), is("someTestMessage"));
}
 
Example #14
Source File: JoinNode.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
SchemaKTable tableForJoin(
    final StreamsBuilder builder,
    final KsqlConfig ksqlConfig,
    final KafkaTopicClient kafkaTopicClient,
    final FunctionRegistry functionRegistry,
    final Map<String, Object> props,
    final SchemaRegistryClient schemaRegistryClient) {

  Map<String, Object> joinTableProps = new HashMap<>(props);
  joinTableProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

  final SchemaKStream schemaKStream = right.buildStream(
      builder,
      ksqlConfig,
      kafkaTopicClient,
      functionRegistry,
      joinTableProps, schemaRegistryClient);
  if (!(schemaKStream instanceof SchemaKTable)) {
    throw new KsqlException("Unsupported Join. Only stream-table joins are supported, but was "
        + getLeft() + "-" + getRight());
  }

  return (SchemaKTable) schemaKStream;
}
 
Example #15
Source File: KafkaAbstractSourceTest.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Test
public final void loadFromYamlFileTest() throws IOException {
    File yamlFile = getFile("kafkaSourceConfig.yaml");
    KafkaSourceConfig config = KafkaSourceConfig.load(yamlFile.getAbsolutePath());
    assertNotNull(config);
    assertEquals("localhost:6667", config.getBootstrapServers());
    assertEquals("test", config.getTopic());
    assertEquals(Long.parseLong("10000"), config.getSessionTimeoutMs());
    assertEquals(Boolean.parseBoolean("false"), config.isAutoCommitEnabled());
    assertNotNull(config.getConsumerConfigProperties());
    Properties props = new Properties();
    props.putAll(config.getConsumerConfigProperties());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, config.getGroupId());
    assertEquals("test-pulsar-consumer", props.getProperty("client.id"));
    assertEquals("SASL_PLAINTEXT", props.getProperty("security.protocol"));
    assertEquals("test-pulsar-io", props.getProperty(ConsumerConfig.GROUP_ID_CONFIG));
}
 
Example #16
Source File: CleanupTest.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
// Regression test for ISS-73: undeployment of a verticle with unassigned consumer fails
public void testUndeployUnassignedConsumer(TestContext ctx) {
  Properties config = kafkaCluster.useTo().getConsumerProperties("testUndeployUnassignedConsumer_consumer",
    "testUndeployUnassignedConsumer_consumer", OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  Async async = ctx.async(1);
  vertx.deployVerticle(new AbstractVerticle() {
    @Override
    public void start() {
      KafkaConsumer<String, String> consumer = KafkaConsumer.create(vertx, config);
    }
  }, ctx.asyncAssertSuccess(id -> {
    vertx.undeploy(id, ctx.asyncAssertSuccess(v2 -> async.complete()));
  }));

  async.awaitSuccess(10000);
  waitUntil("Expected " + countThreads("vert.x-kafka-consumer-thread") + " == " + numVertxKafkaConsumerThread, () -> countThreads("vert.x-kafka-consumer-thread") == numKafkaConsumerNetworkThread);
}
 
Example #17
Source File: MetricCollectorsTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldAggregateErrorRatesAcrossProducersAndConsumers() {
  ConsumerCollector consumerCollector = new ConsumerCollector();
  consumerCollector.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "groupfoo1"));

  ProducerCollector producerCollector = new ProducerCollector();
  producerCollector.configure(ImmutableMap.of(ProducerConfig.CLIENT_ID_CONFIG, "clientfoo2"));

  for (int i = 0; i < 1000; i++) {
    consumerCollector.recordError(TEST_TOPIC);
    producerCollector.recordError(TEST_TOPIC);
  }

  // we have 2000 errors in one sample out of a 100. So the effective error rate computed
  // should be 20 for this run.
  assertEquals(20.0, Math.floor(MetricCollectors.currentErrorRate()), 0.1);
}
 
Example #18
Source File: AlarmConfigLogger.java    From phoebus with Eclipse Public License 1.0 6 votes vote down vote up
public AlarmConfigLogger(String topic, String location, String remoteLocation) {
    super();
    this.topic = topic;
    this.remoteLocation = remoteLocation;

    group_id = "Alarm-" + UUID.randomUUID();

    props = PropertiesHelper.getProperties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "AlarmConfigLogger-streams-" + this.topic);
    if (!props.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) {
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    }
    props.put("group.id", group_id);
    // make sure to consume the complete topic via "auto.offset.reset = earliest"
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    root = new File(location, this.topic);
    root.mkdirs();

    model = new AlarmClient(props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG), this.topic);
    model.start();

    initialize();
}
 
Example #19
Source File: ConsumeKafkaTest.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void validateCustomValidatorSettings() throws Exception {
    ConsumeKafka_1_0 consumeKafka = new ConsumeKafka_1_0();
    TestRunner runner = TestRunners.newTestRunner(consumeKafka);
    runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, "okeydokey:1234");
    runner.setProperty(ConsumeKafka_1_0.TOPICS, "foo");
    runner.setProperty(ConsumeKafka_1_0.GROUP_ID, "foo");
    runner.setProperty(ConsumeKafka_1_0.AUTO_OFFSET_RESET, ConsumeKafka_1_0.OFFSET_EARLIEST);
    runner.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    runner.assertValid();
    runner.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "Foo");
    runner.assertNotValid();
    runner.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    runner.assertValid();
    runner.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    runner.assertValid();
    runner.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    runner.assertNotValid();
}
 
Example #20
Source File: ConsumeKafka_0_10.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Override
protected PropertyDescriptor getSupportedDynamicPropertyDescriptor(final String propertyDescriptorName) {
    return new PropertyDescriptor.Builder()
            .description("Specifies the value for '" + propertyDescriptorName + "' Kafka Configuration.")
            .name(propertyDescriptorName).addValidator(new KafkaProcessorUtils.KafkaConfigValidator(ConsumerConfig.class)).dynamic(true)
            .build();
}
 
Example #21
Source File: KafkaUtils.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static KafkaConsumer<byte[], byte[]> getKafkaConsumer(String zkUrl,
                                             String keyDeserializer,
                                             String valueDeserializer,
                                             int maxPoolRecords,
                                             SecurityProtocol securityProtocol,
                                             Map<String, String> otherConsumerConfigs) {
  String key = zkUrl;
  if (!kafkaConsumers.containsKey(key)) {
    String brokers = getBrokers(zkUrl, securityProtocol);
    LOG.info("ZkUrl: {}, Brokers: {}", zkUrl, brokers);
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "doctorkafka");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
    props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPoolRecords);
    props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 1048576 * 4);

    if (otherConsumerConfigs != null) {
      for (Map.Entry<String, String> entry : otherConsumerConfigs.entrySet()) {
        props.put(entry.getKey(), entry.getValue());
      }
    }
    kafkaConsumers.put(key, new KafkaConsumer<>(props));
  }
  return kafkaConsumers.get(key);
}
 
Example #22
Source File: UnionListStateExample.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 1 分钟一次CheckPoint
        env.enableCheckpointing(TimeUnit.SECONDS.toMillis(15));
        env.setParallelism(3);

        CheckpointConfig checkpointConf = env.getCheckpointConfig();
        // CheckPoint 语义 EXACTLY ONCE
        checkpointConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        checkpointConf.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, UnionListStateUtil.broker_list);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "app-pv-stat");

        FlinkKafkaConsumer011<String> kafkaConsumer011 = new FlinkKafkaConsumer011<>(
                // kafka topic, String 序列化
                UnionListStateUtil.topic, new SimpleStringSchema(), props);

        env.addSource(kafkaConsumer011)
                .uid(UnionListStateUtil.topic)
                .addSink(new MySink())
                .uid("MySink")
                .name("MySink");

        env.execute("Flink unionListState");
    }
 
Example #23
Source File: KafkaRecordsStorage.java    From liiklus with MIT License 5 votes vote down vote up
@Override
public CompletionStage<Map<Integer, Long>> getEndOffsets(String topic) {
    return Mono.fromCallable(() -> {
        var properties = new HashMap<String, Object>();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "0");
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");

        try (
                var consumer = new KafkaConsumer<ByteBuffer, ByteBuffer>(
                        properties,
                        new ByteBufferDeserializer(),
                        new ByteBufferDeserializer()
                )
        ) {
            consumer.subscribe(List.of(topic));

            var endOffsets = consumer.endOffsets(
                    consumer.partitionsFor(topic).stream()
                            .map(it -> new TopicPartition(topic, it.partition()))
                            .collect(Collectors.toSet())
            );

            return endOffsets.entrySet().stream().collect(Collectors.toMap(
                    it -> it.getKey().partition(),
                    it -> it.getValue() - 1
            ));
        }
    }).subscribeOn(Schedulers.elastic()).toFuture();
}
 
Example #24
Source File: KafkaConsumerConfig.java    From samza with Apache License 2.0 5 votes vote down vote up
public String getClientId() {
  String clientId = (String) get(ConsumerConfig.CLIENT_ID_CONFIG);
  if (StringUtils.isBlank(clientId)) {
    throw new SamzaException("client Id is not set for consumer for system=" + systemName);
  }
  return clientId;
}
 
Example #25
Source File: OptimizationStream.java    From micronaut-kafka with Apache License 2.0 5 votes vote down vote up
@Singleton
@Named(STREAM_OPTIMIZATION_ON)
KStream<String, String> optimizationOn(
        @Named(STREAM_OPTIMIZATION_ON) ConfiguredStreamBuilder builder) {
    // set default serdes
    Properties props = builder.getConfiguration();
    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    KTable<String, String> table = builder
            .table(OPTIMIZATION_ON_INPUT, Materialized.as(OPTIMIZATION_ON_STORE));

    return table.toStream();
}
 
Example #26
Source File: KafkaEventsHandler.java    From apicurio-studio with Apache License 2.0 5 votes vote down vote up
public synchronized void start() {
    if (consumer != null) {
        return;
    }

    executorService = new ScheduledThreadPoolExecutor(configuration.getKafkaThreads());

    String bootstrapServers = configuration.getKafkaBootstrapServers();
    Properties properties = new Properties();
    properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

    JsonSerde serde = new JsonSerde();

    Properties producerProperties = new Properties();
    producerProperties.putAll(properties);
    producer = new AsyncProducer<>(
        producerProperties,
        new StringSerializer(),
        serde
    );

    String groupId = configuration.getKafkaGroupId();
    Properties consumerProperties = new Properties();
    consumerProperties.putAll(properties);
    // each consumer has it's own UNIQUE group, so they all consume all messages
    // if possible, use repeatable (but UNIQUE) string, e.g. for a restarted node
    consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    consumer = new ConsumerContainer.DynamicPool<>(
        consumerProperties,
        new StringDeserializer(),
        serde,
        configuration.getKafkaTopic(),
        configuration.getKafkaConsumers(),
        Oneof2.first(this::consume)
    );
    consumer.start();
}
 
Example #27
Source File: KafkaExactlyOnceSink.java    From beam with Apache License 2.0 5 votes vote down vote up
/**
 * Opens a generic consumer that is mainly meant for metadata operations like fetching number of
 * partitions for a topic rather than for fetching messages.
 */
private static Consumer<?, ?> openConsumer(WriteRecords<?, ?> spec) {
  return spec.getConsumerFactoryFn()
      .apply(
          ImmutableMap.of(
              ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
              spec.getProducerConfig().get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG),
              ConsumerConfig.GROUP_ID_CONFIG,
              spec.getSinkGroupId(),
              ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
              ByteArrayDeserializer.class,
              ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
              ByteArrayDeserializer.class));
}
 
Example #28
Source File: OffsetFetcher.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
OffsetFetcher(final WorkerConfig config, Converter internalConverter) {
  String topic = config.getString(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG);
  if ("".equals(topic)) {
    throw new ConfigException("Offset storage topic must be specified");
  }

  Map<String, Object> producerProps = new HashMap<>(config.originals());
  producerProps.put(
      ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
  producerProps.put(
      ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
  producerProps.put(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE);

  Map<String, Object> consumerProps = new HashMap<>(config.originals());
  consumerProps.put(
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
  consumerProps.put(
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());

  Callback<ConsumerRecord<byte[], byte[]>> consumedCallback =
      (error, record) -> {
        ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
        ByteBuffer value = record.value() != null ? ByteBuffer.wrap(record.value()) : null;
        data.put(key, value);
      };
  this.offsetLog =
      new KafkaBasedLog<>(
          topic, producerProps, consumerProps, consumedCallback, Time.SYSTEM, null);
  this.internalConverter = internalConverter;
}
 
Example #29
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testPosition() throws Exception {
  String topic = "testSeek";
  createTopic(topic);
  TopicPartition tp = new TopicPartition(topic, 0);
  TopicPartition tp1 = new TopicPartition(topic, 1);
  produceSyntheticMessages(topic);

  // Reset to earliest
  Properties props = new Properties();
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPosition1");
  try (LiKafkaConsumer<String, String> consumer = createConsumer(props)) {
    consumer.assign(Arrays.asList(tp, tp1));
    assertEquals(0, consumer.position(tp));
  }

  // Reset to latest
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPosition2");
  try (LiKafkaConsumer<String, String> consumer = createConsumer(props)) {
    consumer.assign(Arrays.asList(tp, tp1));
    assertEquals(consumer.position(tp), 10);
  }

  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPosition3");
  try (LiKafkaConsumer<String, String> consumer = createConsumer(props)) {
    consumer.assign(Arrays.asList(tp, tp1));
    consumer.position(tp);
    fail("Should have thrown NoOffsetForPartitionException");
  } catch (NoOffsetForPartitionException nofpe) {
    // let it go.
  }
}
 
Example #30
Source File: KafkaIO.java    From DataflowTemplates with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void populateDisplayData(DisplayData.Builder builder) {
  super.populateDisplayData(builder);
  ValueProvider<List<String>> topics = getTopics();
  List<TopicPartition> topicPartitions = getTopicPartitions();
  if (topics != null) {
    if (topics.isAccessible()) {
      builder.add(DisplayData.item("topics", Joiner.on(",").join(topics.get()))
                      .withLabel("Topic/s"));
    } else {
      builder.add(DisplayData.item("topics", topics).withLabel("Topic/s"));
    }
  } else if (topicPartitions.size() > 0) {
    builder.add(
        DisplayData.item("topicPartitions", Joiner.on(",").join(topicPartitions))
            .withLabel("Topic Partition/s"));
  }
  builder.add(DisplayData.item(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers()));
  Set<String> ignoredConsumerPropertiesKeys = IGNORED_CONSUMER_PROPERTIES.keySet();
  for (Map.Entry<String, Object> conf : getConsumerConfig().entrySet()) {
    String key = conf.getKey();
    if (!ignoredConsumerPropertiesKeys.contains(key)) {
      Object value =
          DisplayData.inferType(conf.getValue()) != null
              ? conf.getValue()
              : String.valueOf(conf.getValue());
      builder.add(DisplayData.item(key, ValueProvider.StaticValueProvider.of(value)));
    }
  }
}