Java Code Examples for org.apache.kafka.common.utils.Time#SYSTEM

The following examples show how to use org.apache.kafka.common.utils.Time#SYSTEM . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaMetricsTest.java    From micrometer with Apache License 2.0 7 votes vote down vote up
@Test
void shouldKeepMetersWhenMetricsDoNotChange() {
    //Given
    Supplier<Map<MetricName, ? extends Metric>> supplier = () -> {
        MetricName metricName = new MetricName("a", "b", "c", new LinkedHashMap<>());
        KafkaMetric metric = new KafkaMetric(this, metricName, new Value(), new MetricConfig(), Time.SYSTEM);
        return Collections.singletonMap(metricName, metric);
    };
    kafkaMetrics = new KafkaMetrics(supplier);
    MeterRegistry registry = new SimpleMeterRegistry();

    kafkaMetrics.bindTo(registry);
    assertThat(registry.getMeters()).hasSize(1);

    kafkaMetrics.checkAndBindMetrics(registry);
    assertThat(registry.getMeters()).hasSize(1);
}
 
Example 2
Source File: ServiceKafkaClient.java    From ranger with Apache License 2.0 6 votes vote down vote up
private List<String> getTopicList(List<String> ignoreTopicList) throws Exception {
	List<String> ret = new ArrayList<String>();

	int sessionTimeout = 5000;
	int connectionTimeout = 10000;
	ZooKeeperClient zookeeperClient = new ZooKeeperClient(zookeeperConnect, sessionTimeout, connectionTimeout,
			1, Time.SYSTEM, "kafka.server", "SessionExpireListener", Option.empty());
	try (KafkaZkClient kafkaZkClient = new KafkaZkClient(zookeeperClient, true, Time.SYSTEM)) {
		Iterator<String> iter = kafkaZkClient.getAllTopicsInCluster().iterator();
		while (iter.hasNext()) {
			String topic = iter.next();
			if (ignoreTopicList == null || !ignoreTopicList.contains(topic)) {
				ret.add(topic);
			}
		}
	}
	return ret;
}
 
Example 3
Source File: KafkaMetricsTest.java    From micrometer with Apache License 2.0 6 votes vote down vote up
@Test
void shouldRemoveOlderMeterWithLessTags() {
    Map<String, String> tags = new LinkedHashMap<>();
    Supplier<Map<MetricName, ? extends Metric>> supplier = () -> {
        MetricName metricName = new MetricName("a", "b", "c", tags);
        KafkaMetric metric = new KafkaMetric(this, metricName, new Value(), new MetricConfig(), Time.SYSTEM);
        return Collections.singletonMap(metricName, metric);
    };
    kafkaMetrics = new KafkaMetrics(supplier);
    MeterRegistry registry = new SimpleMeterRegistry();

    kafkaMetrics.bindTo(registry);
    assertThat(registry.getMeters()).hasSize(1);
    assertThat(registry.getMeters().get(0).getId().getTags()).hasSize(1); //only version

    tags.put("key0", "value0");
    kafkaMetrics.checkAndBindMetrics(registry);
    assertThat(registry.getMeters()).hasSize(1);
    assertThat(registry.getMeters().get(0).getId().getTags()).hasSize(2);
}
 
Example 4
Source File: KafkaMetricsTest.java    From micrometer with Apache License 2.0 6 votes vote down vote up
@Issue("#1968")
@Test
void shouldBindMetersWithDifferentClientIds() {
    Supplier<Map<MetricName, ? extends Metric>> supplier = () -> {
        Map<String, String> firstTags = new LinkedHashMap<>();
        firstTags.put("key0", "value0");
        firstTags.put("client-id", "client0");
        MetricName firstName = new MetricName("a", "b", "c", firstTags);
        KafkaMetric firstMetric = new KafkaMetric(this, firstName, new Value(), new MetricConfig(), Time.SYSTEM);
        return Collections.singletonMap(firstName, firstMetric);
    };

    kafkaMetrics = new KafkaMetrics(supplier);
    MeterRegistry registry = new SimpleMeterRegistry();
    registry.counter("kafka.b.a", "client-id", "client1", "key0", "value0");

    kafkaMetrics.bindTo(registry);
    assertThat(registry.getMeters()).hasSize(2);
}
 
Example 5
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
/**
 * Delete a topic.
 *
 * @param topicName The name of the topic to delete
 */
public void deleteTopic(String topicName) {
    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: DeleteTopic " + topicName);
        adminZkClient.deleteTopic(topicName);
    } finally {
        zkClient.close();
    }
}
 
Example 6
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
/**
 * @return All topic names
 */
public List<String> listTopics() {
    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: ListTopics ");

            return JavaConversions.asJavaCollection(adminZkClient.getAllTopicConfigs().keys())
                    .stream()
                    .collect(Collectors.toList());

    } finally {
        zkClient.close();
    }
}
 
Example 7
Source File: KafkaMetricsTest.java    From micrometer with Apache License 2.0 5 votes vote down vote up
@Test
void shouldNotAddAppInfoMetrics() {
    Supplier<Map<MetricName, ? extends Metric>> supplier = () -> {
        Map<MetricName, KafkaMetric> metrics = new LinkedHashMap<>();
        MetricName metricName = new MetricName("a0", "b0", "c0", new LinkedHashMap<>());
        KafkaMetric metric = new KafkaMetric(this, metricName, new Value(), new MetricConfig(), Time.SYSTEM);
        metrics.put(metricName, metric);
        MetricName appInfoMetricName =
                new MetricName("a1", KafkaMetrics.METRIC_GROUP_APP_INFO, "c0",
                        new LinkedHashMap<>());
        KafkaMetric appInfoMetric =
                new KafkaMetric(this, appInfoMetricName, new Value(), new MetricConfig(), Time.SYSTEM);
        metrics.put(appInfoMetricName, appInfoMetric);
        return metrics;
    };
    kafkaMetrics = new KafkaMetrics(supplier);
    MeterRegistry registry = new SimpleMeterRegistry();

    kafkaMetrics.bindTo(registry);
    assertThat(registry.getMeters()).hasSize(1);

    kafkaMetrics.checkAndBindMetrics(registry);
    assertThat(registry.getMeters()).hasSize(1);
}
 
Example 8
Source File: ConnectRunner.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 5 votes vote down vote up
void start() {
    final Map<String, String> workerProps = new HashMap<>();
    workerProps.put("bootstrap.servers", bootstrapServers);

    workerProps.put("offset.flush.interval.ms", Integer.toString(offsetFlushInterval));

    // These don't matter much (each connector sets its own converters), but need to be filled with valid classes.
    workerProps.put("key.converter", "org.apache.kafka.connect.converters.ByteArrayConverter");
    workerProps.put("value.converter", "org.apache.kafka.connect.converters.ByteArrayConverter");
    workerProps.put("internal.key.converter", "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("internal.key.converter.schemas.enable", "false");
    workerProps.put("internal.value.converter", "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("internal.value.converter.schemas.enable", "false");

    // Don't need it since we'll memory MemoryOffsetBackingStore.
    workerProps.put("offset.storage.file.filename", "");

    workerProps.put("plugin.path", pluginDir.getPath());

    final Time time = Time.SYSTEM;
    final String workerId = "test-worker";

    final Plugins plugins = new Plugins(workerProps);
    final StandaloneConfig config = new StandaloneConfig(workerProps);

    final Worker worker = new Worker(
        workerId, time, plugins, config, new MemoryOffsetBackingStore());
    herder = new StandaloneHerder(worker);

    final RestServer rest = new RestServer(config);

    connect = new Connect(herder, rest);

    connect.start();
}
 
Example 9
Source File: ConnectStandalone.java    From mongo-kafka with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
ConnectStandalone(final Properties workerProperties) {
  Time time = Time.SYSTEM;
  LOGGER.info("Kafka Connect standalone worker initializing ...");
  long initStart = time.hiResClockMs();
  WorkerInfo initInfo = new WorkerInfo();
  initInfo.logAll();

  Map<String, String> workerProps = (Map) workerProperties;

  LOGGER.info("Scanning for plugin classes. This might take a moment ...");
  Plugins plugins = new Plugins(workerProps);
  plugins.compareAndSwapWithDelegatingLoader();
  StandaloneConfig config = new StandaloneConfig(workerProps);

  String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
  LOGGER.debug("Kafka cluster ID: {}", kafkaClusterId);

  RestServer rest = new RestServer(config);
  URI advertisedUrl = rest.advertisedUrl();
  String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();

  Worker worker = new Worker(workerId, time, plugins, config, new FileOffsetBackingStore());
  this.herder = new StandaloneHerder(worker, kafkaClusterId);
  connectionString = advertisedUrl.toString() + herder.kafkaClusterId();

  this.connect = new Connect(herder, rest);
  LOGGER.info(
      "Kafka Connect standalone worker initialization took {}ms",
      time.hiResClockMs() - initStart);
}
 
Example 10
Source File: KafkaConnectRunner.java    From camel-kafka-connector with Apache License 2.0 5 votes vote down vote up
/**
 * here does not seem to be a public interface for embedding a Kafka connect runtime,
 * therefore, this code is modeled from the behavior taken from
 * https://github.com/apache/kafka/blob/2.1/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java
 * and performs the initialization in a roughly similar manner.
 *
 */
private void init() {
    LOG.info("Started worked initialization");

    Time time = Time.SYSTEM;

    // Initializes the system runtime information and logs some of the information
    WorkerInfo initInfo = new WorkerInfo();
    initInfo.logAll();

    Properties props = kafkaConnectPropertyFactory.getProperties();

    Map<String, String> standAloneProperties = Utils.propsToStringMap(props);

    // Not needed, but we need this one to initialize the worker
    Plugins plugins = new Plugins(standAloneProperties);

    StandaloneConfig config = new StandaloneConfig(standAloneProperties);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
    AllConnectorClientConfigOverridePolicy allConnectorClientConfigOverridePolicy = new AllConnectorClientConfigOverridePolicy();

    RestServer rest = new RestServer(config);
    rest.initializeServer();

    /*
     According to the Kafka source code "... Worker runs a (dynamic) set of tasks
     in a set of threads, doing the work of actually moving data to/from Kafka ..."
     */
    Worker worker = new Worker(bootstrapServer, time, plugins, config, new FileOffsetBackingStore(), allConnectorClientConfigOverridePolicy);

    /*
    From Kafka source code: " ... The herder interface tracks and manages workers
    and connectors ..."
     */
    herder = new StandaloneHerder(worker, kafkaClusterId, allConnectorClientConfigOverridePolicy);
    connect = new Connect(herder, rest);
    LOG.info("Finished initializing the worker");
}
 
Example 11
Source File: WorkersMetrics.java    From kafka-workers with Apache License 2.0 4 votes vote down vote up
public WorkersMetrics(WorkersConfig config) {
    List<MetricsReporter> reporters = config.getConfiguredInstances(WorkersConfig.METRIC_REPORTER_CLASSES, MetricsReporter.class);
    reporters.add(new JmxReporter(JMX_PREFIX));
    this.metrics = new Metrics(new MetricConfig(), reporters, Time.SYSTEM);
}
 
Example 12
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 */
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 13
Source File: InputFileDequeue.java    From kafka-connect-spooldir with Apache License 2.0 4 votes vote down vote up
/**
 * @param minimumFileAgeMS Minimum time since last write in milliseconds.
 */
MinimumFileAgePredicate(long minimumFileAgeMS) {
  this(minimumFileAgeMS, Time.SYSTEM);
}
 
Example 14
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 4 votes vote down vote up
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 15
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 */
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 16
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 4 votes vote down vote up
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 17
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 */
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 18
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 */
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 19
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
 
Example 20
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed).
 */
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
	Properties kafkaProperties = new Properties();

	// properties have to be Strings
	kafkaProperties.put("advertised.host.name", KAFKA_HOST);
	kafkaProperties.put("broker.id", Integer.toString(brokerId));
	kafkaProperties.put("log.dir", tmpFolder.toString());
	kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
	kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
	kafkaProperties.put("transaction.max.timeout.ms", Integer.toString(1000 * 60 * 60 * 2)); // 2hours

	// for CI stability, increase zookeeper session timeout
	kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
	kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
	if (config.getKafkaServerProperties() != null) {
		kafkaProperties.putAll(config.getKafkaServerProperties());
	}

	final int numTries = 5;

	for (int i = 1; i <= numTries; i++) {
		int kafkaPort = NetUtils.getAvailablePort();
		kafkaProperties.put("port", Integer.toString(kafkaPort));

		if (config.isHideKafkaBehindProxy()) {
			NetworkFailuresProxy proxy = createProxy(KAFKA_HOST, kafkaPort);
			kafkaProperties.put("advertised.port", proxy.getLocalPort());
		}

		//to support secure kafka cluster
		if (config.isSecureMode()) {
			LOG.info("Adding Kafka secure configurations");
			kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
			kafkaProperties.putAll(getSecureProperties());
		}

		KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);

		try {
			scala.Option<String> stringNone = scala.Option.apply(null);
			KafkaServer server = new KafkaServer(kafkaConfig, Time.SYSTEM, stringNone, new ArraySeq<KafkaMetricsReporter>(0));
			server.startup();
			return server;
		}
		catch (KafkaException e) {
			if (e.getCause() instanceof BindException) {
				// port conflict, retry...
				LOG.info("Port conflict when starting Kafka Broker. Retrying...");
			}
			else {
				throw e;
			}
		}
	}

	throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}