org.apache.kafka.common.utils.AppInfoParser Java Examples

The following examples show how to use org.apache.kafka.common.utils.AppInfoParser. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KarelDbLeaderElector.java    From kareldb with Apache License 2.0 5 votes vote down vote up
private void stop(boolean swallowException) {
    LOG.trace("Stopping the group member.");

    // Interrupt any outstanding poll calls
    if (client != null) {
        client.wakeup();
    }

    // Wait for processing thread to complete
    if (executor != null) {
        executor.shutdown();
        try {
            executor.awaitTermination(30, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(
                "Interrupted waiting for group processing thread to exit",
                e
            );
        }
    }

    // Do final cleanup
    AtomicReference<Throwable> firstException = new AtomicReference<>();
    this.stopped.set(true);
    closeQuietly(coordinator, "coordinator", firstException);
    closeQuietly(metrics, "consumer metrics", firstException);
    closeQuietly(client, "consumer network client", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
    if (firstException.get() != null && !swallowException) {
        throw new KafkaException(
            "Failed to stop the group member",
            firstException.get()
        );
    } else {
        LOG.debug("The group member has stopped.");
    }
}
 
Example #2
Source File: WorkerGroupMember.java    From DataLink with Apache License 2.0 5 votes vote down vote up
private void stop(boolean swallowException) {
    log.trace("Stopping the Connect group member.");
    AtomicReference<Throwable> firstException = new AtomicReference<Throwable>();
    this.stopped = true;
    ClientUtils.closeQuietly(coordinator, "coordinator", firstException);
    ClientUtils.closeQuietly(metrics, "consumer metrics", firstException);
    ClientUtils.closeQuietly(client, "consumer network client", firstException);
    AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId);
    if (firstException.get() != null && !swallowException) {
        throw new KafkaException("Failed to stop the Connect group member", firstException.get());
    } else {
        log.debug("The Connect group member has stopped.");
    }
}
 
Example #3
Source File: IgniteSourceConnector.java    From ignite with Apache License 2.0 4 votes vote down vote up
/** {@inheritDoc} */
@Override public String version() {
    return AppInfoParser.getVersion();
}
 
Example #4
Source File: KarelDbLeaderElector.java    From kareldb with Apache License 2.0 4 votes vote down vote up
public KarelDbLeaderElector(KarelDbConfig config, KarelDbEngine engine) throws KarelDbElectionException {
    try {
        this.engine = engine;
        this.clientId = "kdb-" + KDB_CLIENT_ID_SEQUENCE.getAndIncrement();

        this.myIdentity = findIdentity(
            config.getList(KarelDbConfig.LISTENERS_CONFIG),
            config.getBoolean(KarelDbConfig.LEADER_ELIGIBILITY_CONFIG));

        Map<String, String> metricsTags = new LinkedHashMap<>();
        metricsTags.put("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().tags(metricsTags);
        List<MetricsReporter> reporters = Collections.singletonList(new JmxReporter(JMX_PREFIX));
        Time time = Time.SYSTEM;

        ClientConfig clientConfig = new ClientConfig(config.originalsWithPrefix("kafkacache."), false);

        this.metrics = new Metrics(metricConfig, reporters, time);
        this.retryBackoffMs = clientConfig.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
        String groupId = config.getString(KarelDbConfig.CLUSTER_GROUP_ID_CONFIG);
        LogContext logContext = new LogContext("[KarelDB clientId=" + clientId + ", groupId="
            + groupId + "] ");
        this.metadata = new Metadata(
            retryBackoffMs,
            clientConfig.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG),
            logContext,
            new ClusterResourceListeners()
        );
        List<String> bootstrapServers
            = config.getList(KarelDbConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG);
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(bootstrapServers,
            clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG));
        this.metadata.bootstrap(addresses);
        String metricGrpPrefix = "kareldb";

        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(clientConfig, time);
        long maxIdleMs = clientConfig.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG);

        NetworkClient netClient = new NetworkClient(
            new Selector(maxIdleMs, metrics, time, metricGrpPrefix, channelBuilder, logContext),
            this.metadata,
            clientId,
            100, // a fixed large enough value will suffice
            clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
            clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
            clientConfig.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
            clientConfig.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
            clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
            ClientDnsLookup.forConfig(clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG)),
            time,
            true,
            new ApiVersions(),
            logContext);

        this.client = new ConsumerNetworkClient(
            logContext,
            netClient,
            metadata,
            time,
            retryBackoffMs,
            clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
            Integer.MAX_VALUE
        );
        this.coordinator = new KarelDbCoordinator(
            logContext,
            this.client,
            groupId,
            300000, // Default MAX_POLL_INTERVAL_MS_CONFIG
            10000, // Default SESSION_TIMEOUT_MS_CONFIG)
            3000, // Default HEARTBEAT_INTERVAL_MS_CONFIG
            metrics,
            metricGrpPrefix,
            time,
            retryBackoffMs,
            myIdentity,
            this
        );

        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds());

        initTimeout = config.getInt(KarelDbConfig.KAFKACACHE_INIT_TIMEOUT_CONFIG);

        LOG.debug("Group member created");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak. see KAFKA-2121
        stop(true);
        // now propagate the exception
        throw new KarelDbElectionException("Failed to construct kafka consumer", t);
    }
}
 
Example #5
Source File: IgniteSinkConnector.java    From ignite with Apache License 2.0 4 votes vote down vote up
/** {@inheritDoc} */
@Override public String version() {
    return AppInfoParser.getVersion();
}
 
Example #6
Source File: InfluxDbSinkConnector.java    From kafka-metrics with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #7
Source File: MySqlSourceConnector.java    From kafka-mysql-connector with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #8
Source File: FileStreamSinkConnector.java    From kafka-connector-skeleton with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #9
Source File: FileStreamSourceConnector.java    From kafka-connector-skeleton with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #10
Source File: KafkaIO.java    From beam with Apache License 2.0 4 votes vote down vote up
@Override
public PCollection<KafkaRecord<K, V>> expand(PBegin input) {
  checkArgument(
      getConsumerConfig().get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG) != null,
      "withBootstrapServers() is required");
  checkArgument(
      getTopics().size() > 0 || getTopicPartitions().size() > 0,
      "Either withTopic(), withTopics() or withTopicPartitions() is required");
  checkArgument(getKeyDeserializerProvider() != null, "withKeyDeserializer() is required");
  checkArgument(getValueDeserializerProvider() != null, "withValueDeserializer() is required");

  ConsumerSpEL consumerSpEL = new ConsumerSpEL();
  if (!consumerSpEL.hasOffsetsForTimes()) {
    LOG.warn(
        "Kafka client version {} is too old. Versions before 0.10.1.0 are deprecated and "
            + "may not be supported in next release of Apache Beam. "
            + "Please upgrade your Kafka client version.",
        AppInfoParser.getVersion());
  }
  if (getStartReadTime() != null) {
    checkArgument(
        consumerSpEL.hasOffsetsForTimes(),
        "Consumer.offsetsForTimes is only supported by Kafka Client 0.10.1.0 onwards, "
            + "current version of Kafka Client is "
            + AppInfoParser.getVersion()
            + ". If you are building with maven, set \"kafka.clients.version\" "
            + "maven property to 0.10.1.0 or newer.");
  }
  if (isCommitOffsetsInFinalizeEnabled()) {
    checkArgument(
        getConsumerConfig().get(ConsumerConfig.GROUP_ID_CONFIG) != null,
        "commitOffsetsInFinalize() is enabled, but group.id in Kafka consumer config "
            + "is not set. Offset management requires group.id.");
    if (Boolean.TRUE.equals(
        getConsumerConfig().get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG))) {
      LOG.warn(
          "'{}' in consumer config is enabled even though commitOffsetsInFinalize() "
              + "is set. You need only one of them.",
          ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
    }
  }

  // Infer key/value coders if not specified explicitly
  CoderRegistry coderRegistry = input.getPipeline().getCoderRegistry();

  Coder<K> keyCoder = getKeyCoder(coderRegistry);
  Coder<V> valueCoder = getValueCoder(coderRegistry);

  // Handles unbounded source to bounded conversion if maxNumRecords or maxReadTime is set.
  Unbounded<KafkaRecord<K, V>> unbounded =
      org.apache.beam.sdk.io.Read.from(
          toBuilder().setKeyCoder(keyCoder).setValueCoder(valueCoder).build().makeSource());

  PTransform<PBegin, PCollection<KafkaRecord<K, V>>> transform = unbounded;

  if (getMaxNumRecords() < Long.MAX_VALUE || getMaxReadTime() != null) {
    transform =
        unbounded.withMaxReadTime(getMaxReadTime()).withMaxNumRecords(getMaxNumRecords());
  }

  return input.getPipeline().apply(transform);
}
 
Example #11
Source File: CloudPubSubSourceConnector.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
  return AppInfoParser.getVersion();
}
 
Example #12
Source File: CloudPubSubSinkConnector.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
  return AppInfoParser.getVersion();
}
 
Example #13
Source File: ConsoleSourceConnect.java    From KafkaExample with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #14
Source File: ConsoleSinkTask.java    From KafkaExample with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #15
Source File: ConsoleSinkConnect.java    From KafkaExample with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #16
Source File: MongoDbSinkConnector.java    From MongoDb-Sink-Connector with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #17
Source File: IRCFeedConnector.java    From hello-kafka-streams with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #18
Source File: SchemaSourceConnector.java    From streamx with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
  return AppInfoParser.getVersion();
}
 
Example #19
Source File: FirehoseSinkConnector.java    From kinesis-kafka-connector with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
	// Currently using Kafka version, in future release use Kinesis-Kafka version
	return AppInfoParser.getVersion();
}
 
Example #20
Source File: AmazonKinesisSinkConnector.java    From kinesis-kafka-connector with Apache License 2.0 4 votes vote down vote up
@Override
public String version() {
	// Currently using Kafka version, in future release use Kinesis-Kafka version
	return AppInfoParser.getVersion();

}
 
Example #21
Source File: KafkaIO.java    From DataflowTemplates with Apache License 2.0 4 votes vote down vote up
@Override
public PCollection<KafkaRecord<K, V>> expand(PBegin input) {
  checkArgument(getBootstrapServers() != null,
                "withBootstrapServers() is required");
  checkArgument(
      getTopics() != null || getTopicPartitions().size() > 0,
      "Either withTopic(), withTopics() or withTopicPartitions() is required");
  checkArgument(getKeyDeserializer() != null, "withKeyDeserializer() is required");
  checkArgument(getValueDeserializer() != null, "withValueDeserializer() is required");
  ConsumerSpEL consumerSpEL = new ConsumerSpEL();

  if (!consumerSpEL.hasOffsetsForTimes()) {
    LOG.warn(
        "Kafka client version {} is too old. Versions before 0.10.1.0 are deprecated and "
            + "may not be supported in next release of Apache Beam. "
            + "Please upgrade your Kafka client version.",
        AppInfoParser.getVersion());
  }
  if (getStartReadTime() != null) {
    checkArgument(
        consumerSpEL.hasOffsetsForTimes(),
        "Consumer.offsetsForTimes is only supported by Kafka Client 0.10.1.0 onwards, "
            + "current version of Kafka Client is "
            + AppInfoParser.getVersion()
            + ". If you are building with maven, set \"kafka.clients.version\" "
            + "maven property to 0.10.1.0 or newer.");
  }
  if (isCommitOffsetsInFinalizeEnabled()) {
    checkArgument(
        getConsumerConfig().get(ConsumerConfig.GROUP_ID_CONFIG) != null,
        "commitOffsetsInFinalize() is enabled, but group.id in Kafka consumer config "
            + "is not set. Offset management requires group.id.");
    if (Boolean.TRUE.equals(
        getConsumerConfig().get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG))) {
      LOG.warn(
          "'{}' in consumer config is enabled even though commitOffsetsInFinalize() "
              + "is set. You need only one of them.",
          ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
    }
  }

  // Infer key/value coders if not specified explicitly
  CoderRegistry registry = input.getPipeline().getCoderRegistry();

  Coder<K> keyCoder =
      getKeyCoder() != null ? getKeyCoder() : inferCoder(registry, getKeyDeserializer());
  checkArgument(
      keyCoder != null,
      "Key coder could not be inferred from key deserializer. Please provide"
          + "key coder explicitly using withKeyDeserializerAndCoder()");

  Coder<V> valueCoder =
      getValueCoder() != null ? getValueCoder() : inferCoder(registry, getValueDeserializer());
  checkArgument(
      valueCoder != null,
      "Value coder could not be inferred from value deserializer. Please provide"
          + "value coder explicitly using withValueDeserializerAndCoder()");

  // Handles unbounded source to bounded conversion if maxNumRecords or maxReadTime is set.
  Unbounded<KafkaRecord<K, V>> unbounded =
      org.apache.beam.sdk.io.Read.from(
          toBuilder().setKeyCoder(keyCoder).setValueCoder(valueCoder).build().makeSource());

  PTransform<PBegin, PCollection<KafkaRecord<K, V>>> transform = unbounded;

  if (getMaxNumRecords() < Long.MAX_VALUE || getMaxReadTime() != null) {
    transform =
        unbounded.withMaxReadTime(getMaxReadTime()).withMaxNumRecords(getMaxNumRecords());
  }

  return input.getPipeline().apply(transform);
}
 
Example #22
Source File: WorkerGroupMember.java    From DataLink with Apache License 2.0 4 votes vote down vote up
public WorkerGroupMember(WorkerConfig config,
                         String restUrl,
                         TaskConfigManager jobTaskConfigManager,
                         WorkerRebalanceListener listener,
                         Time time) {
    try {
        this.time = time;

        String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG);
        clientId = clientIdConfig.length() <= 0 ? "datalink-worker-" + DATALINK_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig;
        Map<String, String> metricsTags = new LinkedHashMap<>();
        metricsTags.put("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG))
                .timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
                .tags(metricsTags);
        List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class);
        reporters.add(new JmxReporter(JMX_PREFIX));
        this.metrics = new Metrics(metricConfig, reporters, time);
        this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
        this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG));
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
        this.metadata.update(Cluster.bootstrap(addresses), 0);
        String metricGrpPrefix = "datalink.worker";
        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values());
        NetworkClient netClient = new NetworkClient(
                new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder),
                this.metadata,
                clientId,
                100, // a fixed large enough value will suffice
                config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
                config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
                config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG), time);
        this.client = new ConsumerNetworkClient(netClient, metadata, time, retryBackoffMs,
                config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG)){
            @Override
            public boolean awaitMetadataUpdate(long timeout) {
                metadata.update(Cluster.bootstrap(addresses),time.milliseconds());
                return super.awaitMetadataUpdate(timeout);
            }
        };
        this.coordinator = new WorkerCoordinator(this.client,
                config.getString(WorkerConfig.GROUP_ID_CONFIG),
                config.getInt(WorkerConfig.REBALANCE_TIMEOUT_MS_CONFIG),
                config.getInt(WorkerConfig.SESSION_TIMEOUT_MS_CONFIG),
                config.getInt(WorkerConfig.HEARTBEAT_INTERVAL_MS_CONFIG),
                metrics,
                metricGrpPrefix,
                this.time,
                retryBackoffMs,
                restUrl,
                jobTaskConfigManager,
                listener);

        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId);
        log.debug("datalink worker group member created");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak.
        stop(true);
        // now propagate the errors
        throw new DatalinkException("Failed to construct datalink worker", t);
    }
}
 
Example #23
Source File: MongodbSourceConnector.java    From kafka-connect-mongodb with Apache License 2.0 2 votes vote down vote up
/**
 * Get the version of this connector.
 *
 * @return the version, formatted as a String
 */
@Override
public String version() {
    return AppInfoParser.getVersion();
}
 
Example #24
Source File: MongodbSinkConnector.java    From kafka-connect-mongodb with Apache License 2.0 2 votes vote down vote up
/**
 * Get the version of this connector.
 *
 * @return the version, formatted as a string
 */
@Override
public String version() {
    return AppInfoParser.getVersion();
}