kafka.zk.KafkaZkClient Java Examples

The following examples show how to use kafka.zk.KafkaZkClient. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 7 votes vote down vote up
public void createTopic(String topicName, int numPartitions) {
    // setup

    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: CreateTopic " + topicName);
        adminZkClient.createTopic(topicName,numPartitions, 1,new Properties(), RackAwareMode.Disabled$.MODULE$);
    } finally {
        zkClient.close();
    }

}
 
Example #2
Source File: ZkServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Zookeeper get command. */
public String get(String clusterAlias, String cmd) {
	String ret = "";
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	if (zkc.pathExists(cmd)) {
		Tuple2<Option<byte[]>, Stat> tuple2 = zkc.getDataAndStat(cmd);
		ret += new String(tuple2._1.get()) + "\n";
		ret += "cZxid = " + tuple2._2.getCzxid() + "\n";
		ret += "ctime = " + tuple2._2.getCtime() + "\n";
		ret += "mZxid = " + tuple2._2.getMzxid() + "\n";
		ret += "mtime = " + tuple2._2.getMtime() + "\n";
		ret += "pZxid = " + tuple2._2.getPzxid() + "\n";
		ret += "cversion = " + tuple2._2.getCversion() + "\n";
		ret += "dataVersion = " + tuple2._2.getVersion() + "\n";
		ret += "aclVersion = " + tuple2._2.getAversion() + "\n";
		ret += "ephemeralOwner = " + tuple2._2.getEphemeralOwner() + "\n";
		ret += "dataLength = " + tuple2._2.getDataLength() + "\n";
		ret += "numChildren = " + tuple2._2.getNumChildren() + "\n";
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return ret;
}
 
Example #3
Source File: ServiceKafkaClient.java    From ranger with Apache License 2.0 6 votes vote down vote up
private List<String> getTopicList(List<String> ignoreTopicList) throws Exception {
	List<String> ret = new ArrayList<String>();

	int sessionTimeout = 5000;
	int connectionTimeout = 10000;
	ZooKeeperClient zookeeperClient = new ZooKeeperClient(zookeeperConnect, sessionTimeout, connectionTimeout,
			1, Time.SYSTEM, "kafka.server", "SessionExpireListener", Option.empty());
	try (KafkaZkClient kafkaZkClient = new KafkaZkClient(zookeeperClient, true, Time.SYSTEM)) {
		Iterator<String> iter = kafkaZkClient.getAllTopicsInCluster().iterator();
		while (iter.hasNext()) {
			String topic = iter.next();
			if (ignoreTopicList == null || !ignoreTopicList.contains(topic)) {
				ret.add(topic);
			}
		}
	}
	return ret;
}
 
Example #4
Source File: KafkaTopicRepository.java    From nakadi with MIT License 6 votes vote down vote up
private KafkaZkClient createZkClient() {
    // The calling method should make sure to close connection
    return new KafkaZkClient(
            new ZooKeeperClient(
                    kafkaZookeeper.getZookeeperConnectionString(),
                    zookeeperSettings.getZkSessionTimeoutMs(),
                    zookeeperSettings.getZkConnectionTimeoutMs(),
                    zookeeperSettings.getMaxInFlightRequests(),
                    Time.SYSTEM,
                    ZookeeperSettings.METRIC_GROUP,
                    ZookeeperSettings.METRIC_TYPE
            ),
            false,
            Time.SYSTEM
    );
}
 
Example #5
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
void maybeElectLeader() throws Exception {
  if (!_preferredLeaderElectionRequested) {
    return;
  }

  try (KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), com.linkedin.kmf.common.Utils.ZK_SESSION_TIMEOUT_MS,
      com.linkedin.kmf.common.Utils.ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener", null)) {
    if (!zkClient.reassignPartitionsInProgress()) {
      List<TopicPartitionInfo> partitionInfoList = _adminClient
          .describeTopics(Collections.singleton(_topic)).all().get().get(_topic).partitions();
      LOGGER.info(
          "MultiClusterTopicManagementService will trigger requested preferred leader election for the"
              + " topic {} in cluster.", _topic);
      triggerPreferredLeaderElection(partitionInfoList, _topic);
      _preferredLeaderElectionRequested = false;
    }
  }
}
 
Example #6
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/**
 * Judge whether the zkcli is active.
 */
public JSONObject zkCliStatus(String clusterAlias) {
	JSONObject target = new JSONObject();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	if (zkc != null) {
		target.put("live", true);
		target.put("list", SystemConfigUtils.getProperty(clusterAlias + ".zk.list"));
	} else {
		target.put("live", false);
		target.put("list", SystemConfigUtils.getProperty(clusterAlias + ".zk.list"));
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return target;
}
 
Example #7
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/**
 * Get kafka active consumer topic.
 */
public Set<String> getActiveTopic(String clusterAlias, String group) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	Set<String> activeTopics = new HashSet<>();
	try {
		Seq<String> topics = zkc.getChildren(CONSUMERS_PATH + "/" + group + OWNERS);
		for (String topic : JavaConversions.seqAsJavaList(topics)) {
			activeTopics.add(topic);
		}
	} catch (Exception ex) {
		LOG.error("Get kafka active topic has error, msg is " + ex.getMessage());
		LOG.error(ex.getMessage());
	} finally {
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return activeTopics;
}
 
Example #8
Source File: ZkServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Judge whether the zkcli is active. */
public JSONObject zkCliStatus(String clusterAlias) {
	JSONObject target = new JSONObject();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	if (zkc != null) {
		target.put("live", true);
		target.put("list", SystemConfigUtils.getProperty(clusterAlias + ".zk.list"));
	} else {
		target.put("live", false);
		target.put("list", SystemConfigUtils.getProperty(clusterAlias + ".zk.list"));
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return target;
}
 
Example #9
Source File: ZkServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Zookeeper delete command. */
public String delete(String clusterAlias, String cmd) {
	String ret = "";
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	boolean status = zkc.pathExists(cmd);
	if (status) {
		if (zkc.deleteRecursive(cmd)) {
			ret = "[" + cmd + "] has delete success";
		} else {
			ret = "[" + cmd + "] has delete failed";
		}
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return ret;
}
 
Example #10
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get broker id list. */
public List<Object> getBrokerIdList(String clusterAlias) {
	List<Object> brokerIds = new ArrayList<>();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			for (String id : JavaConversions.seqAsJavaList(subBrokerIdsPaths)) {
				brokerIds.add(Integer.parseInt(id));
			}
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker id has error, msg is ", e);
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return brokerIds;
}
 
Example #11
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get topic list {@link #topicList()} include cgroups from zookeeper. */
public List<String> topicList(String clusterAlias) {
	List<String> topics = new ArrayList<>();
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.cgroup.enable")) {
		topics = SystemConfigUtils.getPropertyArrayList(clusterAlias + ".kafka.eagle.sasl.cgroup.topics", ",");
	} else if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.cgroup.enable")) {
		topics = SystemConfigUtils.getPropertyArrayList(clusterAlias + ".kafka.eagle.ssl.cgroup.topics", ",");
	} else {
		KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
		try {
			if (zkc.pathExists(BROKER_TOPICS_PATH)) {
				Seq<String> subBrokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH);
				topics = JavaConversions.seqAsJavaList(subBrokerTopicsPaths);
				excludeTopic(topics);
			}
		} catch (Exception e) {
			LOG.error("Get topic list has error, msg is " + e.getCause().getMessage());
			e.printStackTrace();
		}
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return topics;
}
 
Example #12
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Get kafka broker numbers from zookeeper. */
public long brokerNumbers(String clusterAlias) {
	long count = 0;
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_IDS_PATH)) {
			Seq<String> subBrokerIdsPaths = zkc.getChildren(BROKER_IDS_PATH);
			count = JavaConversions.seqAsJavaList(subBrokerIdsPaths).size();
		}
	} catch (Exception e) {
		LOG.error("Get kafka broker numbers has error, msg is " + e.getCause().getMessage());
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return count;
}
 
Example #13
Source File: BrokerServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
/** Statistics topic partitions total used as page. */
public long partitionNumbers(String clusterAlias, String topic) {
	long count = 0L;
	if (Kafka.CONSUMER_OFFSET_TOPIC.equals(topic)) {
		return count;
	}
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	try {
		if (zkc.pathExists(BROKER_TOPICS_PATH + "/" + topic + "/partitions")) {
			Seq<String> subBrokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH + "/" + topic + "/partitions");
			count = JavaConversions.seqAsJavaList(subBrokerTopicsPaths).size();
		}
	} catch (Exception e) {
		LOG.error("Get topic partition numbers has error, msg is " + e.getCause().getMessage());
		e.printStackTrace();
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return count;
}
 
Example #14
Source File: KafkaMetricsServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
private String describeTopicConfig(String clusterAlias, String topic) {
	String target = "";
	try {
		KafkaZkClient kafkaZkCli = kafkaZKPool.getZkClient(clusterAlias);
		if (kafkaZkCli.pathExists(CONFIG_TOPIC_PATH + topic)) {
			Tuple2<Option<byte[]>, Stat> tuple = kafkaZkCli.getDataAndStat(CONFIG_TOPIC_PATH + topic);
			target = new String(tuple._1.get());
		}
		if (kafkaZkCli != null) {
			kafkaZKPool.release(clusterAlias, kafkaZkCli);
			kafkaZkCli = null;
		}
	} catch (Exception e) {
		e.printStackTrace();
		LOG.error("Describe topic[" + topic + "] config has error, msg is " + e.getMessage());
	}
	return target;
}
 
Example #15
Source File: ExecutorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testReplicaReassignmentProgressWithThrottle() throws InterruptedException, OngoingExecutionException {
  KafkaZkClient kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(zookeeper().connectionString(),
                                                                            "ExecutorTestMetricGroup",
                                                                            "ReplicaReassignmentProgressWithThrottle",
                                                                            false);
  try {
    List<ExecutionProposal> proposalsToExecute = new ArrayList<>();
    List<ExecutionProposal> proposalsToCheck = new ArrayList<>();
    populateProposals(proposalsToExecute, proposalsToCheck, PRODUCE_SIZE_IN_BYTES);
    // Throttle rate is set to the half of the produce size.
    executeAndVerifyProposals(kafkaZkClient, proposalsToExecute, proposalsToCheck, false, PRODUCE_SIZE_IN_BYTES / 2, true);
  } finally {
    KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(kafkaZkClient);
  }
}
 
Example #16
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
/**
 * @return All topic names
 */
public List<String> listTopics() {
    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: ListTopics ");

            return JavaConversions.asJavaCollection(adminZkClient.getAllTopicConfigs().keys())
                    .stream()
                    .collect(Collectors.toList());

    } finally {
        zkClient.close();
    }
}
 
Example #17
Source File: KafkaUnit.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
/**
 * Delete a topic.
 *
 * @param topicName The name of the topic to delete
 */
public void deleteTopic(String topicName) {
    String zookeeperHost = zookeeperString;
    Boolean isSucre = false;
    int sessionTimeoutMs = 200000;
    int connectionTimeoutMs = 15000;
    int maxInFlightRequests = 10;
    Time time = Time.SYSTEM;
    String metricGroup = "myGroup";
    String metricType = "myType";
    KafkaZkClient zkClient = KafkaZkClient.apply(zookeeperHost,isSucre,sessionTimeoutMs,
            connectionTimeoutMs,maxInFlightRequests,time,metricGroup,metricType);
    AdminZkClient adminZkClient = new AdminZkClient(zkClient);
    try {
        // run
        LOGGER.info("Executing: DeleteTopic " + topicName);
        adminZkClient.deleteTopic(topicName);
    } finally {
        zkClient.close();
    }
}
 
Example #18
Source File: ReplicationThrottleHelperTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testIsNoOpWhenThrottleIsNull() {
  KafkaZkClient mockKafkaZkClient = EasyMock.strictMock(KafkaZkClient.class);
  EasyMock.replay(mockKafkaZkClient);

  // Test would fail on any unexpected interactions with the kafkaZkClient
  ReplicationThrottleHelper throttleHelper = new ReplicationThrottleHelper(mockKafkaZkClient, null);
  ExecutionProposal proposal = new ExecutionProposal(
      new TopicPartition("topic", 0),
      100,
      new ReplicaPlacementInfo(0),
      Arrays.asList(new ReplicaPlacementInfo(0), new ReplicaPlacementInfo(1)),
      Arrays.asList(new ReplicaPlacementInfo(0), new ReplicaPlacementInfo(2)));

  ExecutionTask task = completedTaskForProposal(0, proposal);

  throttleHelper.setThrottles(Collections.singletonList(proposal));
  throttleHelper.clearThrottles(Collections.singletonList(task), Collections.emptyList());
}
 
Example #19
Source File: KafkaTopicRepository.java    From nakadi with MIT License 5 votes vote down vote up
@Override
public void deleteTopic(final String topic) throws TopicDeletionException {
    try (KafkaZkClient zkClient = createZkClient()) {
        // this will only trigger topic deletion, but the actual deletion is asynchronous
        final AdminZkClient adminZkClient = new AdminZkClient(zkClient);
        adminZkClient.deleteTopic(topic);
    } catch (final Exception e) {
        throw new TopicDeletionException("Unable to delete topic " + topic, e);
    }
}
 
Example #20
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * According to topic and partition to obtain Replicas & Isr.
 */
public String getReplicasIsr(String clusterAlias, String topic, int partitionid) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	TopicPartition tp = new TopicPartition(topic, partitionid);
	Seq<Object> replis = zkc.getReplicasForPartition(tp);
	List<Object> targets = JavaConversions.seqAsJavaList(replis);
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return targets.toString();
}
 
Example #21
Source File: KafkaTopicConfigProvider.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Map<String, Properties> allTopicConfigs() {
  KafkaZkClient kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(_connectString,
                                                                            ZK_KAFKA_TOPIC_CONFIG_PROVIDER_METRIC_GROUP,
                                                                            ZK_KAFKA_TOPIC_CONFIG_PROVIDER_METRIC_TYPE,
                                                                            _zkSecurityEnabled);
  try {
    AdminZkClient adminZkClient = new AdminZkClient(kafkaZkClient);
    return JavaConversions.mapAsJavaMap(adminZkClient.getAllTopicConfigs());
  } finally {
    KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(kafkaZkClient);
  }
}
 
Example #22
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Get kafka 0.10.x sasl topic metadata.
 */
public List<MetadataInfo> findKafkaLeader(String clusterAlias, String topic) {
	List<MetadataInfo> targets = new ArrayList<>();
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	if (zkc.pathExists(BROKER_TOPICS_PATH + "/" + topic)) {
		Tuple2<Option<byte[]>, Stat> tuple = zkc.getDataAndStat(BROKER_TOPICS_PATH + "/" + topic);
		String tupleString = new String(tuple._1.get());
		JSONObject partitionObject = JSON.parseObject(tupleString).getJSONObject("partitions");
		for (String partition : partitionObject.keySet()) {
			String path = String.format(TOPIC_ISR, topic, Integer.valueOf(partition));
			Tuple2<Option<byte[]>, Stat> tuple2 = zkc.getDataAndStat(path);
			String tupleString2 = new String(tuple2._1.get());
			JSONObject topicMetadata = JSON.parseObject(tupleString2);
			MetadataInfo metadate = new MetadataInfo();
			try {
				metadate.setLeader(topicMetadata.getInteger("leader"));
			} catch (Exception e) {
				LOG.error("Parse string brokerid to int has error, brokerid[" + topicMetadata.getString("leader") + "]");
				e.printStackTrace();
			}
			metadate.setPartitionId(Integer.valueOf(partition));
			targets.add(metadate);
		}
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return targets;
}
 
Example #23
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Get broker host and jmx_port info from ids.
 */
public String getBrokerJMXFromIds(String clusterAlias, int ids) {
	String jni = "";
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	if (zkc.pathExists(BROKER_IDS_PATH)) {
		try {
			Tuple2<Option<byte[]>, Stat> tuple = zkc.getDataAndStat(BROKER_IDS_PATH + "/" + ids);
			String tupleString = new String(tuple._1.get());
			String host = "";
			if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable") || SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
				String endpoints = JSON.parseObject(tupleString).getString("endpoints");
				String tmp = endpoints.split("//")[1];
				host = tmp.substring(0, tmp.length() - 2).split(":")[0];
			} else {
				host = JSON.parseObject(tupleString).getString("host");
			}
			int jmxPort = JSON.parseObject(tupleString).getInteger("jmx_port");
			jni = host + ":" + jmxPort;
		} catch (Exception ex) {
			LOG.error("Get broker from ids has error, msg is " + ex.getCause().getMessage());
			ex.printStackTrace();
		}
	}
	if (zkc != null) {
		kafkaZKPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return jni;
}
 
Example #24
Source File: TestKafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
public List<String> findTopicPartition(String clusterAlias, String topic) {
	KafkaZkClient zkc = zkPool.getZkClient(clusterAlias);
	Seq<String> brokerTopicsPaths = zkc.getChildren(BROKER_TOPICS_PATH + "/" + topic + "/partitions");
	List<String> topicAndPartitions = JavaConversions.seqAsJavaList(brokerTopicsPaths);
	if (zkc != null) {
		zkPool.release(clusterAlias, zkc);
		zkc = null;
	}
	return topicAndPartitions;
}
 
Example #25
Source File: KafkaStarterUtils.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
public static void createTopic(String kafkaTopic, int numOfPartitions, String zkStr, String replicatorFactor) {
  // TopicCommand.main() will call System.exit() finally, which will break maven-surefire-plugin
  try {
    String[] args = new String[]{"--create", "--zookeeper", zkStr, "--replication-factor", replicatorFactor,
        "--partitions", String.valueOf(numOfPartitions), "--topic", kafkaTopic};
    KafkaZkClient zkClient = KafkaZkClient
        .apply(zkStr, false, 3000, 3000, Integer.MAX_VALUE, Time.SYSTEM, "kafka.server",
            "SessionExpireListener");
    TopicCommand.TopicCommandOptions opts = new TopicCommand.TopicCommandOptions(args);
    TopicCommand.createTopic(zkClient, opts);
  } catch (TopicExistsException e) {
    // Catch TopicExistsException otherwise it will break maven-surefire-plugin
    System.out.println("Topic already existed");
  }
}
 
Example #26
Source File: KafkaTopicConfigProvider.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Properties topicConfigs(String topic) {
  KafkaZkClient kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(_connectString,
                                                                            ZK_KAFKA_TOPIC_CONFIG_PROVIDER_METRIC_GROUP,
                                                                            ZK_KAFKA_TOPIC_CONFIG_PROVIDER_METRIC_TYPE,
                                                                            _zkSecurityEnabled);
  try {
    AdminZkClient adminZkClient = new AdminZkClient(kafkaZkClient);
    return adminZkClient.fetchEntityConfig(ConfigType.Topic(), topic);
  } finally {
    KafkaCruiseControlUtils.closeKafkaZkClientWithTimeout(kafkaZkClient);
  }
}
 
Example #27
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
private static void reassignPartitions(KafkaZkClient zkClient, Collection<Node> brokers, String topic, int partitionCount, int replicationFactor) {
  scala.collection.mutable.ArrayBuffer<BrokerMetadata> brokersMetadata = new scala.collection.mutable.ArrayBuffer<>(brokers.size());
  for (Node broker : brokers) {
    brokersMetadata.$plus$eq(new BrokerMetadata(broker.id(), Option$.MODULE$.apply(broker.rack())));
  }
  scala.collection.Map<Object, Seq<Object>> assignedReplicas =
      AdminUtils.assignReplicasToBrokers(brokersMetadata, partitionCount, replicationFactor, 0, 0);

  scala.collection.immutable.Map<TopicPartition, Seq<Object>> newAssignment = new scala.collection.immutable.HashMap<>();
  scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = assignedReplicas.iterator();
  while (it.hasNext()) {
    scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
    TopicPartition tp = new TopicPartition(topic, (Integer) scalaTuple._1);
    newAssignment = newAssignment.$plus(new scala.Tuple2<>(tp, scalaTuple._2));
  }

  scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
  scala.collection.Map<Object, scala.collection.Seq<Object>> currentAssignment =
      zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
  String currentAssignmentJson = formatAsNewReassignmentJson(topic, currentAssignment);
  String newAssignmentJson = formatAsNewReassignmentJson(topic, assignedReplicas);

  LOGGER.info("Reassign partitions for topic " + topic);
  LOGGER.info("Current partition replica assignment " + currentAssignmentJson);
  LOGGER.info("New partition replica assignment " + newAssignmentJson);
  zkClient.createPartitionReassignment(newAssignment);
}
 
Example #28
Source File: KafkaJunitExtensionTest.java    From kafka-junit with Apache License 2.0 5 votes vote down vote up
@Test
void testKafkaServerIsUp(KafkaHelper kafkaHelper) {
    // Setup Zookeeper client
    final String zkConnectionString = kafkaHelper.zookeeperConnectionString();
    final ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zkConnectionString, 2000, 8000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener" );
    final KafkaZkClient zkClient = new KafkaZkClient(zooKeeperClient, JaasUtils.isZkSaslEnabled(), Time.SYSTEM);
    final AdminZkClient adminZkClient = new AdminZkClient(zkClient);

    // Create topic
    adminZkClient.createTopic(TOPIC, 1, 1, new Properties(), null);

    // Produce/consume test
    try (KafkaProducer<String, String> producer = kafkaHelper.createStringProducer()) {
        producer.send(new ProducerRecord<>(TOPIC, "keyA", "valueA"));
    }

    try (KafkaConsumer<String, String> consumer = kafkaHelper.createStringConsumer()) {
        consumer.subscribe(Lists.newArrayList(TOPIC));
        ConsumerRecords<String, String> records = consumer.poll(10000);
        Assertions.assertAll(() -> assertThat(records).isNotNull(),
                             () -> assertThat(records.isEmpty()).isFalse());

        ConsumerRecord<String, String> msg = records.iterator().next();
        Assertions.assertAll(() -> assertThat(msg).isNotNull(),
                             () -> assertThat(msg.key()).isEqualTo("keyA"),
                             () -> assertThat(msg.value()).isEqualTo("valueA"));
    }
}
 
Example #29
Source File: KafkaJunitRuleTest.java    From kafka-junit with Apache License 2.0 5 votes vote down vote up
@Test
public void testKafkaServerIsUp() {
    // Setup Zookeeper client
    final String zkConnectionString = kafkaRule.helper().zookeeperConnectionString();
    final ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zkConnectionString, 2000, 8000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener" );
    final KafkaZkClient zkClient = new KafkaZkClient(zooKeeperClient, JaasUtils.isZkSaslEnabled(), Time.SYSTEM);
    final AdminZkClient adminZkClient = new AdminZkClient(zkClient);

    // Create topic
    adminZkClient.createTopic(TOPIC, 1, 1, new Properties(), null);

    // Produce/consume test
    try (KafkaProducer<String, String> producer = kafkaRule.helper().createStringProducer()) {
        producer.send(new ProducerRecord<>(TOPIC, "keyA", "valueA"));
    }

    try (KafkaConsumer<String, String> consumer = kafkaRule.helper().createStringConsumer()) {
        consumer.subscribe(Lists.newArrayList(TOPIC));
        ConsumerRecords<String, String> records = consumer.poll(TEN_SECONDS);
        assertThat(records).isNotNull();
        assertThat(records.isEmpty()).isFalse();

        ConsumerRecord<String, String> msg = records.iterator().next();
        assertThat(msg).isNotNull();
        assertThat(msg.key()).isEqualTo("keyA");
        assertThat(msg.value()).isEqualTo("valueA");
    }
}
 
Example #30
Source File: KafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 5 votes vote down vote up
/**
 * Obtaining kafka consumer information from zookeeper.
 */
public Map<String, List<String>> getConsumers(String clusterAlias) {
	KafkaZkClient zkc = kafkaZKPool.getZkClient(clusterAlias);
	Map<String, List<String>> consumers = new HashMap<String, List<String>>();
	try {
		Seq<String> subConsumerPaths = zkc.getChildren(CONSUMERS_PATH);
		List<String> groups = JavaConversions.seqAsJavaList(subConsumerPaths);
		for (String group : groups) {
			String path = CONSUMERS_PATH + "/" + group + "/owners";
			if (zkc.pathExists(path)) {
				Seq<String> owners = zkc.getChildren(path);
				List<String> ownersSerialize = JavaConversions.seqAsJavaList(owners);
				consumers.put(group, ownersSerialize);
			} else {
				LOG.error("Consumer Path[" + path + "] is not exist.");
			}
		}
	} catch (Exception ex) {
		LOG.error(ex.getMessage());
	} finally {
		if (zkc != null) {
			kafkaZKPool.release(clusterAlias, zkc);
			zkc = null;
		}
	}
	return consumers;
}