Java Code Examples for org.apache.kafka.clients.consumer.ConsumerConfig

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerConfig. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: flink-statefun   Source File: KafkaIngressBuilderTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void namedMethodConfigValuesOverwriteProperties() {
  Properties properties = new Properties();
  properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "should-be-overwritten");

  KafkaIngressBuilder<String> builder =
      KafkaIngressBuilder.forIdentifier(DUMMY_ID)
          .withKafkaAddress("localhost:8082")
          .withTopic("topic")
          .withConsumerGroupId("test-group")
          .withDeserializer(NoOpDeserializer.class)
          .withProperties(properties);

  KafkaIngressSpec<String> spec = builder.build();

  assertThat(
      spec.properties(), hasProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8082"));
}
 
Example 2
static Properties buildConsumerConfig(String accessToken) {
    Properties p = new Properties();
    p.setProperty("security.protocol", "SASL_PLAINTEXT");
    p.setProperty("sasl.mechanism", "OAUTHBEARER");
    p.setProperty("sasl.jaas.config", "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required " +
            " oauth.access.token=\"" + accessToken + "\";");
    p.setProperty("sasl.login.callback.handler.class", "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler");

    p.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka:9092");
    p.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    p.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

    p.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "consumer-group");
    p.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10");
    p.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");

    return p;
}
 
Example 3
Source Project: Flink-CEPplus   Source File: Kafka08PartitionDiscoverer.java    License: Apache License 2.0 6 votes vote down vote up
public Kafka08PartitionDiscoverer(
		KafkaTopicsDescriptor topicsDescriptor,
		int indexOfThisSubtask,
		int numParallelSubtasks,
		Properties kafkaProperties) {

	super(topicsDescriptor, indexOfThisSubtask, numParallelSubtasks);

	checkNotNull(kafkaProperties);

	String seedBrokersConfString = kafkaProperties.getProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
	checkArgument(seedBrokersConfString != null && !seedBrokersConfString.isEmpty(),
			"Configuration property %s not set", ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);

	this.seedBrokerAddresses = seedBrokersConfString.split(",");

	// evenly distribute seed brokers across subtasks, to
	// avoid too much pressure on a single broker on startup
	this.currentContactSeedBrokerIndex = indexOfThisSubtask % seedBrokerAddresses.length;

	this.numRetries = getInt(kafkaProperties, GET_PARTITIONS_RETRIES_KEY, DEFAULT_GET_PARTITIONS_RETRIES);
	this.soTimeout = getInt(kafkaProperties, "socket.timeout.ms", 30000);
	this.bufferSize = getInt(kafkaProperties, "socket.receive.buffer.bytes", 65536);
}
 
Example 4
private PhysicalPlanBuilder buildPhysicalPlanBuilder(Map<String, Object> overrideProperties) {
  final StreamsBuilder streamsBuilder = new StreamsBuilder();
  final FunctionRegistry functionRegistry = new FunctionRegistry();
  Map<String, Object> configMap = new HashMap<>();
  configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
  configMap.put("application.id", "KSQL");
  configMap.put("commit.interval.ms", 0);
  configMap.put("cache.max.bytes.buffering", 0);
  configMap.put("auto.offset.reset", "earliest");
  ksqlConfig = new KsqlConfig(configMap);
  return new PhysicalPlanBuilder(streamsBuilder,
      ksqlConfig,
      new FakeKafkaTopicClient(),
      functionRegistry,
      overrideProperties,
      false,
      metaStore,
      new MockSchemaRegistryClient(),
      testKafkaStreamsBuilder
  );

}
 
Example 5
Source Project: pulsar   Source File: KafkaAbstractSourceTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public final void loadFromYamlFileTest() throws IOException {
    File yamlFile = getFile("kafkaSourceConfig.yaml");
    KafkaSourceConfig config = KafkaSourceConfig.load(yamlFile.getAbsolutePath());
    assertNotNull(config);
    assertEquals("localhost:6667", config.getBootstrapServers());
    assertEquals("test", config.getTopic());
    assertEquals(Long.parseLong("10000"), config.getSessionTimeoutMs());
    assertEquals(Boolean.parseBoolean("false"), config.isAutoCommitEnabled());
    assertNotNull(config.getConsumerConfigProperties());
    Properties props = new Properties();
    props.putAll(config.getConsumerConfigProperties());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, config.getGroupId());
    assertEquals("test-pulsar-consumer", props.getProperty("client.id"));
    assertEquals("SASL_PLAINTEXT", props.getProperty("security.protocol"));
    assertEquals("test-pulsar-io", props.getProperty(ConsumerConfig.GROUP_ID_CONFIG));
}
 
Example 6
Source Project: phoebus   Source File: AlarmConfigLogger.java    License: Eclipse Public License 1.0 6 votes vote down vote up
public AlarmConfigLogger(String topic, String location, String remoteLocation) {
    super();
    this.topic = topic;
    this.remoteLocation = remoteLocation;

    group_id = "Alarm-" + UUID.randomUUID();

    props = PropertiesHelper.getProperties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "AlarmConfigLogger-streams-" + this.topic);
    if (!props.containsKey(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG)) {
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    }
    props.put("group.id", group_id);
    // make sure to consume the complete topic via "auto.offset.reset = earliest"
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    root = new File(location, this.topic);
    root.mkdirs();

    model = new AlarmClient(props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG), this.topic);
    model.start();

    initialize();
}
 
Example 7
Source Project: Mastering-Distributed-Tracing   Source File: KafkaConfig.java    License: MIT License 6 votes vote down vote up
private ConsumerFactory<String, Message> consumerFactory() throws Exception {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId());
    props.put(ConsumerConfig.GROUP_ID_CONFIG, app.name);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");

    return new TracingConsumerFactory<>( //
            new DefaultKafkaConsumerFactory<String, Message>( //
                    props, //
                    new StringDeserializer(), //
                    new JsonDeserializer<>(Message.class)));
}
 
Example 8
Source Project: nifi   Source File: ConsumeKafkaTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void validateCustomValidatorSettings() throws Exception {
    ConsumeKafka_1_0 consumeKafka = new ConsumeKafka_1_0();
    TestRunner runner = TestRunners.newTestRunner(consumeKafka);
    runner.setProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS, "okeydokey:1234");
    runner.setProperty(ConsumeKafka_1_0.TOPICS, "foo");
    runner.setProperty(ConsumeKafka_1_0.GROUP_ID, "foo");
    runner.setProperty(ConsumeKafka_1_0.AUTO_OFFSET_RESET, ConsumeKafka_1_0.OFFSET_EARLIEST);
    runner.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    runner.assertValid();
    runner.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "Foo");
    runner.assertNotValid();
    runner.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    runner.assertValid();
    runner.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    runner.assertValid();
    runner.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    runner.assertNotValid();
}
 
Example 9
Source Project: common-kafka   Source File: KafkaAdminClientTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void getConsumerGroupSummary() {
    client.createTopic(testName.getMethodName(), 1, 1);

    Properties properties = new Properties();
    properties.putAll(KafkaTests.getProps());
    properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName());
    properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName());
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testName.getMethodName());
    properties.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testName.getMethodName() + "-client-id");

    try (Consumer<Object, Object> consumer = new KafkaConsumer<>(properties)) {
        consumer.subscribe(Arrays.asList(testName.getMethodName()));
        consumer.poll(Duration.ofSeconds(5L));

        AdminClient.ConsumerGroupSummary summary = client.getConsumerGroupSummary(testName.getMethodName());
        assertThat("Expected only 1 consumer summary when getConsumerGroupSummaries(" + testName.getMethodName() + ")",
                convertToJavaSet(summary.consumers().get().iterator()).size(), is(1));

        assertThat(summary.state(), is(notNullValue()));
        assertThat(summary.coordinator(), is(notNullValue()));
        assertThat(summary.assignmentStrategy(), is(notNullValue()));
    }
}
 
Example 10
@Test
public void shouldAggregateErrorRatesAcrossProducersAndConsumers() {
  ConsumerCollector consumerCollector = new ConsumerCollector();
  consumerCollector.configure(ImmutableMap.of(ConsumerConfig.GROUP_ID_CONFIG, "groupfoo1"));

  ProducerCollector producerCollector = new ProducerCollector();
  producerCollector.configure(ImmutableMap.of(ProducerConfig.CLIENT_ID_CONFIG, "clientfoo2"));

  for (int i = 0; i < 1000; i++) {
    consumerCollector.recordError(TEST_TOPIC);
    producerCollector.recordError(TEST_TOPIC);
  }

  // we have 2000 errors in one sample out of a 100. So the effective error rate computed
  // should be 20 for this run.
  assertEquals(20.0, Math.floor(MetricCollectors.currentErrorRate()), 0.1);
}
 
Example 11
SchemaKTable tableForJoin(
    final StreamsBuilder builder,
    final KsqlConfig ksqlConfig,
    final KafkaTopicClient kafkaTopicClient,
    final FunctionRegistry functionRegistry,
    final Map<String, Object> props,
    final SchemaRegistryClient schemaRegistryClient) {

  Map<String, Object> joinTableProps = new HashMap<>(props);
  joinTableProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

  final SchemaKStream schemaKStream = right.buildStream(
      builder,
      ksqlConfig,
      kafkaTopicClient,
      functionRegistry,
      joinTableProps, schemaRegistryClient);
  if (!(schemaKStream instanceof SchemaKTable)) {
    throw new KsqlException("Unsupported Join. Only stream-table joins are supported, but was "
        + getLeft() + "-" + getRight());
  }

  return (SchemaKTable) schemaKStream;
}
 
Example 12
@Test
public void testMergedConsumerProperties() {
	KafkaProperties bootProps = new TestKafkaProperties();
	bootProps.getConsumer().getProperties()
			.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "bar");
	KafkaBinderConfigurationProperties props = new KafkaBinderConfigurationProperties(
			bootProps);
	assertThat(props.mergedConsumerConfiguration()
			.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("bar");
	props.getConfiguration().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "baz");
	assertThat(props.mergedConsumerConfiguration()
			.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("baz");
	props.getConsumerProperties().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "qux");
	assertThat(props.mergedConsumerConfiguration()
			.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("qux");
}
 
Example 13
Source Project: synapse   Source File: KafkaStreamsTests.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void someTest() throws ExecutionException, InterruptedException {
    Map<String, Object> consumerProps = consumerProps("someTestGroup", "true", this.embeddedKafka);
    consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
    consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

    ConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);

    Consumer<String, String> consumer = cf.createConsumer();
    this.embeddedKafka.consumeFromAnEmbeddedTopic(consumer, STREAMING_TOPIC2);

    template.send(STREAMING_TOPIC2, "someTestMessage", "foo").get();

    ConsumerRecord<String, String> replies = getSingleRecord(consumer, STREAMING_TOPIC2, 250L);
    assertThat(replies.key(), is("someTestMessage"));
}
 
Example 14
Source Project: vertx-kafka-client   Source File: ConsumerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPollTimeout(TestContext ctx) throws Exception {
  Async async = ctx.async();
  String topicName = "testPollTimeout";
  Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  io.vertx.kafka.client.common.TopicPartition topicPartition = new io.vertx.kafka.client.common.TopicPartition(topicName, 0);
  KafkaConsumer<Object, Object> consumerWithCustomTimeout = KafkaConsumer.create(vertx, config);

  int pollingTimeout = 1500;
  // Set the polling timeout to 1500 ms (default is 1000)
  consumerWithCustomTimeout.pollTimeout(Duration.ofMillis(pollingTimeout));
  // Subscribe to the empty topic (we want the poll() call to timeout!)
  consumerWithCustomTimeout.subscribe(topicName, subscribeRes -> {
    consumerWithCustomTimeout.handler(rec -> {}); // Consumer will now immediately poll once
    long beforeSeek = System.currentTimeMillis();
    consumerWithCustomTimeout.seekToBeginning(topicPartition, seekRes -> {
      long durationWShortTimeout = System.currentTimeMillis() - beforeSeek;
      ctx.assertTrue(durationWShortTimeout >= pollingTimeout, "Operation must take at least as long as the polling timeout");
      consumerWithCustomTimeout.close();
      async.countDown();
    });
  });
}
 
Example 15
public static void main(String[] args) {
	Properties props = new Properties();
	props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
	props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringDeserializer");
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringDeserializer");

	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	consumer.subscribe(Arrays.asList("t1", "t2"));
	final int minBatchSize = 200;
	List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
	while (true) {
		ConsumerRecords<String, String> records = consumer.poll(100);
		for (ConsumerRecord<String, String> record : records) {
			buffer.add(record);
		}
		if (buffer.size() >= minBatchSize) {
			// 逻辑处理,例如保存到数据库
			consumer.commitSync();
			buffer.clear();
		}
	}
}
 
Example 16
Source Project: rya   Source File: KafkaRyaStreamsClientFactory.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link Consumer} that has a unique group ID and reads everything from a topic in Kafka
 * starting at the earliest point by default.
 *
 * @param kafkaHostname - The Kafka broker hostname. (not null)
 * @param kafkaPort - The Kafka broker port.
 * @param keyDeserializerClass - Deserializes the keys. (not null)
 * @param valueDeserializerClass - Deserializes the values. (not null)
 * @return A {@link Consumer} that can be used to read records from a topic.
 */
private static <K, V> Consumer<K, V> fromStartConsumer(
        final String kafkaHostname,
        final int kakfaPort,
        final Class<? extends Deserializer<K>> keyDeserializerClass,
        final Class<? extends Deserializer<V>> valueDeserializerClass) {
    requireNonNull(kafkaHostname);
    requireNonNull(keyDeserializerClass);
    requireNonNull(valueDeserializerClass);

    final Properties consumerProps = new Properties();
    consumerProps.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaHostname + ":" + kakfaPort);
    consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
    consumerProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString());
    consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass.getName());
    consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass.getName());
    return new KafkaConsumer<>(consumerProps);
}
 
Example 17
Source Project: joyqueue   Source File: Consumer.java    License: Apache License 2.0 6 votes vote down vote up
public Consumer(String topic) {
        //super("KafkaConsumerExample", false);
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfigs.BOOTSTRAP);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaConfigs.GROUP_ID);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG, KafkaConfigs.GROUP_ID);
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        consumer = new KafkaConsumer<>(props);
        this.topic = topic;
    }
 
Example 18
Source Project: BigData-In-Practice   Source File: ConsumerTTL.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
            StringDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
            StringDeserializer.class.getName());
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG,
            ConsumerInterceptorTTL.class.getName());

    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Collections.singletonList(topic));

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord<String, String> record : records) {
            System.out.println(record.partition() + ":" + record.offset() + ":" + record.value());
        }
    }
}
 
Example 19
Source Project: vertx-kafka-client   Source File: CleanupTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
// Regression test for ISS-73: undeployment of a verticle with unassigned consumer fails
public void testUndeployUnassignedConsumer(TestContext ctx) {
  Properties config = kafkaCluster.useTo().getConsumerProperties("testUndeployUnassignedConsumer_consumer",
    "testUndeployUnassignedConsumer_consumer", OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

  Async async = ctx.async(1);
  vertx.deployVerticle(new AbstractVerticle() {
    @Override
    public void start() {
      KafkaConsumer<String, String> consumer = KafkaConsumer.create(vertx, config);
    }
  }, ctx.asyncAssertSuccess(id -> {
    vertx.undeploy(id, ctx.asyncAssertSuccess(v2 -> async.complete()));
  }));

  async.awaitSuccess(10000);
  waitUntil("Expected " + countThreads("vert.x-kafka-consumer-thread") + " == " + numVertxKafkaConsumerThread, () -> countThreads("vert.x-kafka-consumer-thread") == numKafkaConsumerNetworkThread);
}
 
Example 20
/**
 * initialize properties<BR/>
 *
 * @return Properties
 */
Properties initProperties(Map propertiesMap) {
    Properties properties = PropertyUtils.getProperties(propertiesMap);
    //default properties
    if (!properties.containsKey(ConsumerConfig.GROUP_ID_CONFIG))
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, getGroupId());
    return properties;
}
 
Example 21
Source Project: DataflowTemplates   Source File: KafkaIO.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void populateDisplayData(DisplayData.Builder builder) {
  super.populateDisplayData(builder);
  ValueProvider<List<String>> topics = getTopics();
  List<TopicPartition> topicPartitions = getTopicPartitions();
  if (topics != null) {
    if (topics.isAccessible()) {
      builder.add(DisplayData.item("topics", Joiner.on(",").join(topics.get()))
                      .withLabel("Topic/s"));
    } else {
      builder.add(DisplayData.item("topics", topics).withLabel("Topic/s"));
    }
  } else if (topicPartitions.size() > 0) {
    builder.add(
        DisplayData.item("topicPartitions", Joiner.on(",").join(topicPartitions))
            .withLabel("Topic Partition/s"));
  }
  builder.add(DisplayData.item(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers()));
  Set<String> ignoredConsumerPropertiesKeys = IGNORED_CONSUMER_PROPERTIES.keySet();
  for (Map.Entry<String, Object> conf : getConsumerConfig().entrySet()) {
    String key = conf.getKey();
    if (!ignoredConsumerPropertiesKeys.contains(key)) {
      Object value =
          DisplayData.inferType(conf.getValue()) != null
              ? conf.getValue()
              : String.valueOf(conf.getValue());
      builder.add(DisplayData.item(key, ValueProvider.StaticValueProvider.of(value)));
    }
  }
}
 
Example 22
@Test
public void shouldRunQueryAgainstKafkaClusterOverSsl() throws Exception {
  // Given:
  givenAllowAcl(ALL_USERS, ResourceType.CLUSTER, "kafka-cluster",
                ImmutableSet.of(AclOperation.DESCRIBE_CONFIGS, AclOperation.CREATE));

  givenAllowAcl(ALL_USERS, ResourceType.TOPIC, "*",
                ImmutableSet.of(AclOperation.DESCRIBE, AclOperation.READ,
                                AclOperation.WRITE, AclOperation.DELETE));

  givenAllowAcl(ALL_USERS, ResourceType.GROUP, "*",
                ImmutableSet.of(AclOperation.DESCRIBE, AclOperation.READ));

  final Map<String, Object> configs = getBaseKsqlConfig();
  configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
              SECURE_CLUSTER.bootstrapServers(SecurityProtocol.SSL));

  // Additional Properties required for KSQL to talk to cluster over SSL:
  configs.put("security.protocol", "SSL");
  configs.put("ssl.truststore.location", ClientTrustStore.trustStorePath());
  configs.put("ssl.truststore.password", ClientTrustStore.trustStorePassword());

  givenTestSetupWithConfig(configs);

  // Then:
  assertCanRunSimpleKsqlQuery();
}
 
Example 23
Source Project: quarkus   Source File: KafkaProducerTest.java    License: Apache License 2.0 5 votes vote down vote up
public static KafkaConsumer<Integer, String> createConsumer() {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:19092");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    KafkaConsumer<Integer, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Collections.singletonList("test"));
    return consumer;
}
 
Example 24
Source Project: uReplicator   Source File: KafkaClusterObserver.java    License: Apache License 2.0 5 votes vote down vote up
public KafkaClusterObserver(String bootstrapServer) {
  Properties properties = new Properties();
  properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
  properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, String.valueOf(false));
  properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
      DESERIALIZER_CLASS);
  properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
      DESERIALIZER_CLASS);
  this.kafkaConsumer = new KafkaConsumer(properties);
}
 
Example 25
Source Project: twister2   Source File: KafkaExample.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Properties getConsumerProperties() {
  Properties props = new Properties();
  props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cfg.getStringValue(CLI_SERVER));
  props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
      "org.apache.kafka.common.serialization.StringDeserializer");
  props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
      "org.apache.kafka.common.serialization.StringDeserializer");
  return props;
}
 
Example 26
Source Project: kafka-examples   Source File: BasicConsumeLoop.java    License: Apache License 2.0 5 votes vote down vote up
private static Properties getConsumerConfigs(Namespace result) {
	Properties configs = new Properties();
	configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, result.getString("bootstrap.servers"));
	configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, result.getString("auto.offset.reset"));
	configs.put(ConsumerConfig.GROUP_ID_CONFIG, result.getString("groupId"));
	configs.put(ConsumerConfig.CLIENT_ID_CONFIG, result.getString("clientId"));
	configs.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, result.getString("max.partition.fetch.bytes"));

	configs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
	configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, CustomDeserializer.class.getName());
	configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, CustomDeserializer.class.getName());
	return configs;
}
 
Example 27
Source Project: blog   Source File: SchemaRegistryConsumer.java    License: MIT License 5 votes vote down vote up
public static void main(String[] args) {

    /** TODO: 设置 Consumer 属性 */
    Properties properties = new Properties();
    /** TODO: Kafka 服务地址 */
    properties.put(
        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
    /** TODO: Key 序列化类 */
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    /** TODO: Value 序列化类 */
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class);
    /** TODO: Consumer 组 */
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_schema");

    /** TODO: 设置 schema.registry */
    properties.put("schema.registry.url", "http://node-160:8081");

    /** TODO: 创建 Consumer */
    KafkaConsumer<String, GenericRecord> consumer = new KafkaConsumer<>(properties);

    /** TODO: 订阅主题:可以使用 Pattern.compile("") 正则表达式 */
    consumer.subscribe(Arrays.asList("topic01"));

    /** TODO: 遍历消息队列 */
    try {
      while (true) {
        /** TODO: 设置间隔多长时间获取消息 */
        ConsumerRecords<String, GenericRecord> consumerRecords =
            consumer.poll(Duration.ofSeconds(1));
        consumerRecords.forEach(
            r ->
                System.out.printf(
                    "partition = %d, offset = %d, key = %s, value = %s%n",
                    r.partition(), r.offset(), r.key(), r.value()));
      }
    } finally {
      /** TODO: 关闭 Consumer */
      consumer.close();
    }
  }
 
Example 28
Source Project: skywalking   Source File: ConsumerConstructorInterceptor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void onConstruct(EnhancedInstance objInst, Object[] allArguments) {
    ConsumerConfig config = (ConsumerConfig) allArguments[0];
    // set the bootstrap server address
    ConsumerEnhanceRequiredInfo requiredInfo = new ConsumerEnhanceRequiredInfo();
    requiredInfo.setBrokerServers(config.getList("bootstrap.servers"));
    requiredInfo.setGroupId(config.getString("group.id"));
    objInst.setSkyWalkingDynamicField(requiredInfo);
}
 
Example 29
Source Project: pitchfork   Source File: HaystackKafkaForwarderTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create consumer and subscribe to spans topic.
 */
private KafkaConsumer<String, byte[]> setupConsumer() {
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(
            ImmutableMap.of(
                    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaContainer.getBootstrapServers(),
                    ConsumerConfig.GROUP_ID_CONFIG, "test-group",
                    ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"
            ),
            new StringDeserializer(),
            new ByteArrayDeserializer()
    );
    consumer.subscribe(singletonList("proto-spans"));

    return consumer;
}
 
Example 30
Source Project: pitchfork   Source File: KafkaConsumerLoop.java    License: Apache License 2.0 5 votes vote down vote up
private KafkaConsumer<String, byte[]> kafkaConsumer(String kafkaBrokers, Map<String, String> propertiesOverrides) {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBrokers);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "pitchfork");

    props.putAll(propertiesOverrides);

    return new KafkaConsumer<>(props, new StringDeserializer(), new ByteArrayDeserializer());
}