org.apache.kafka.clients.ClientUtils Java Examples

The following examples show how to use org.apache.kafka.clients.ClientUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaNodeClient.java    From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
public KafkaNodeClient(int id, String host, int port) {
	node = new Node(id, host, port);
	
	//
	LogContext logContext = new LogContext("ctx");

	ConfigDef defConf = new ConfigDef();
	defConf.define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, ConfigDef.Type.STRING,
			CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, ConfigDef.Importance.MEDIUM,
			CommonClientConfigs.SECURITY_PROTOCOL_DOC);

	defConf.define(SaslConfigs.SASL_MECHANISM, ConfigDef.Type.STRING, SaslConfigs.DEFAULT_SASL_MECHANISM,
			ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_MECHANISM_DOC);

	metrics = new Metrics(Time.SYSTEM);

	AbstractConfig config = new AbstractConfig(defConf, new Properties());
	channelBuilder = ClientUtils.createChannelBuilder(config);
	selector = new Selector(1000L, metrics, Time.SYSTEM, "cc", channelBuilder, logContext);
	client = new NetworkClient(selector, new Metadata(0, Long.MAX_VALUE, false),
			CLIENT_ID, 10, 1000L, 1000L, 1, 1024, 1000, Time.SYSTEM, true, new ApiVersions(),
			null, logContext);
}
 
Example #2
Source File: MetadataClient.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
public MetadataClient(KafkaCruiseControlConfig config,
                      Metadata metadata,
                      long metadataTTL,
                      Time time) {
  _metadataGeneration = new AtomicInteger(0);
  _metadata = metadata;
  _refreshMetadataTimeout = config.getLong(MonitorConfig.METADATA_MAX_AGE_CONFIG);
  _time = time;
  List<InetSocketAddress> addresses =
      ClientUtils.parseAndValidateAddresses(config.getList(MonitorConfig.BOOTSTRAP_SERVERS_CONFIG),
                                            ClientDnsLookup.DEFAULT);
  Cluster bootstrapCluster = Cluster.bootstrap(addresses);
  MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(bootstrapCluster.nodes(),
                                                                                      bootstrapCluster.clusterResource().clusterId(),
                                                                                      MetadataResponse.NO_CONTROLLER_ID,
                                                                                      Collections.emptyList());

  _metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, time.milliseconds());
  ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config, time);
  NetworkClientProvider provider = config.getConfiguredInstance(MonitorConfig.NETWORK_CLIENT_PROVIDER_CLASS_CONFIG,
                                                                NetworkClientProvider.class);

  _networkClient = provider.createNetworkClient(config.getLong(MonitorConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG),
                                                new Metrics(),
                                                time,
                                                "load-monitor",
                                                channelBuilder,
                                                _metadata,
                                                config.getString(MonitorConfig.CLIENT_ID_CONFIG),
                                                DEFAULT_MAX_IN_FLIGHT_REQUEST,
                                                config.getLong(MonitorConfig.RECONNECT_BACKOFF_MS_CONFIG),
                                                config.getLong(MonitorConfig.RECONNECT_BACKOFF_MS_CONFIG),
                                                config.getInt(MonitorConfig.SEND_BUFFER_CONFIG),
                                                config.getInt(MonitorConfig.RECEIVE_BUFFER_CONFIG),
                                                config.getInt(MonitorConfig.REQUEST_TIMEOUT_MS_CONFIG),
                                                true,
                                                new ApiVersions());
  _metadataTTL = metadataTTL;
}
 
Example #3
Source File: KarelDbLeaderElector.java    From kareldb with Apache License 2.0 4 votes vote down vote up
public KarelDbLeaderElector(KarelDbConfig config, KarelDbEngine engine) throws KarelDbElectionException {
    try {
        this.engine = engine;
        this.clientId = "kdb-" + KDB_CLIENT_ID_SEQUENCE.getAndIncrement();

        this.myIdentity = findIdentity(
            config.getList(KarelDbConfig.LISTENERS_CONFIG),
            config.getBoolean(KarelDbConfig.LEADER_ELIGIBILITY_CONFIG));

        Map<String, String> metricsTags = new LinkedHashMap<>();
        metricsTags.put("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().tags(metricsTags);
        List<MetricsReporter> reporters = Collections.singletonList(new JmxReporter(JMX_PREFIX));
        Time time = Time.SYSTEM;

        ClientConfig clientConfig = new ClientConfig(config.originalsWithPrefix("kafkacache."), false);

        this.metrics = new Metrics(metricConfig, reporters, time);
        this.retryBackoffMs = clientConfig.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
        String groupId = config.getString(KarelDbConfig.CLUSTER_GROUP_ID_CONFIG);
        LogContext logContext = new LogContext("[KarelDB clientId=" + clientId + ", groupId="
            + groupId + "] ");
        this.metadata = new Metadata(
            retryBackoffMs,
            clientConfig.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG),
            logContext,
            new ClusterResourceListeners()
        );
        List<String> bootstrapServers
            = config.getList(KarelDbConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG);
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(bootstrapServers,
            clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG));
        this.metadata.bootstrap(addresses);
        String metricGrpPrefix = "kareldb";

        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(clientConfig, time);
        long maxIdleMs = clientConfig.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG);

        NetworkClient netClient = new NetworkClient(
            new Selector(maxIdleMs, metrics, time, metricGrpPrefix, channelBuilder, logContext),
            this.metadata,
            clientId,
            100, // a fixed large enough value will suffice
            clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
            clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
            clientConfig.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
            clientConfig.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
            clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
            ClientDnsLookup.forConfig(clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG)),
            time,
            true,
            new ApiVersions(),
            logContext);

        this.client = new ConsumerNetworkClient(
            logContext,
            netClient,
            metadata,
            time,
            retryBackoffMs,
            clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
            Integer.MAX_VALUE
        );
        this.coordinator = new KarelDbCoordinator(
            logContext,
            this.client,
            groupId,
            300000, // Default MAX_POLL_INTERVAL_MS_CONFIG
            10000, // Default SESSION_TIMEOUT_MS_CONFIG)
            3000, // Default HEARTBEAT_INTERVAL_MS_CONFIG
            metrics,
            metricGrpPrefix,
            time,
            retryBackoffMs,
            myIdentity,
            this
        );

        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds());

        initTimeout = config.getInt(KarelDbConfig.KAFKACACHE_INIT_TIMEOUT_CONFIG);

        LOG.debug("Group member created");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak. see KAFKA-2121
        stop(true);
        // now propagate the exception
        throw new KarelDbElectionException("Failed to construct kafka consumer", t);
    }
}