kafka.utils.ZkUtils Java Examples

The following examples show how to use kafka.utils.ZkUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaEmbedded.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
/**
 * Create a Kafka topic with the given parameters.
 *
 * @param topic       The name of the topic.
 * @param partitions  The number of partitions for this topic.
 * @param replication The replication factor for (partitions of) this topic.
 * @param topicConfig Additional topic-level configuration settings.
 */
public void createTopic(String topic,
                        int partitions,
                        int replication,
                        Properties topicConfig) {
  log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }",
      topic, partitions, replication, topicConfig);
  // Note: You must initialize the ZkClient with ZKStringSerializer.  If you don't, then
  // registerTopic() will only seem to work (it will return without error).  The topic will exist in
  // only ZooKeeper and will be returned when listing topics, but Kafka itself does not create the
  // topic.
  ZkClient zkClient = new ZkClient(
      zookeeperConnect(),
      DEFAULT_ZK_SESSION_TIMEOUT_MS,
      DEFAULT_ZK_CONNECTION_TIMEOUT_MS,
      ZKStringSerializer$.MODULE$);
  ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect()), false);
  AdminUtils.createTopic(zkUtils, topic, partitions, replication, topicConfig, RackAwareMode.Enforced$.MODULE$);
  zkClient.close();
}
 
Example #2
Source File: KafkaStoreUtils.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void verifyTopic(ZkUtils zkUtils, String topic) {
  Set topics = new HashSet();
  topics.add(topic);

  // check # partition and the replication factor
  scala.collection.mutable.Map partitionAssignmentForTopics = zkUtils
      .getPartitionAssignmentForTopics(JavaConversions.asScalaSet(topics).toSeq());
  scala.collection.Map partitionAssignment = (scala.collection.Map) partitionAssignmentForTopics.get(topic).get();

  if (partitionAssignment.size() != 1) {
    throw new RuntimeException(String.format("The schema topic %s should have only 1 partition.", topic));
  }

  // check the retention policy
  Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
  String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
  if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
    throw new RuntimeException(String.format("The retention policy of the schema topic %s must be compact.", topic));
  }
}
 
Example #3
Source File: TrafficControlIntegrationTest.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@Test
public void topic_created_with_throttle_props() throws Exception {
  JsonNode model = mapper
      .readTree(
          "{\"topicName\":\"test_topic5\",\"status\":{\"topicCreated\":true,\"partitions\":4,\"replicationFactor\":1}}");
  KafkaModelReader modelReader = context.getBean(KafkaModelReader.class);
  TrafficControl agent = context.getBean(TrafficControl.class);
  agent.inspectModel("test", modelReader.read(model));

  ZkUtils zkUtils = ZkUtils.apply(kafka.zKConnectString(), 60000, 60000, false);
  Properties config = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), "test_topic5");
  zkUtils.close();

  assertThat(config.getProperty("leader.replication.throttled.replicas"), is("*"));
  assertThat(config.getProperty("follower.replication.throttled.replicas"), is("*"));
}
 
Example #4
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public int getLeaderToShutDown(String topic) throws Exception {
	ZkUtils zkUtils = getZkUtils();
	try {
		MetadataResponse.PartitionMetadata firstPart = null;
		do {
			if (firstPart != null) {
				LOG.info("Unable to find leader. error code {}", firstPart.error().code());
				// not the first try. Sleep a bit
				Thread.sleep(150);
			}

			List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata();
			firstPart = partitionMetadata.get(0);
		}
		while (firstPart.error().code() != 0);

		return firstPart.leader().id();
	} finally {
		zkUtils.close();
	}
}
 
Example #5
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteTestTopic(String topic) {
	ZkUtils zkUtils = getZkUtils();
	try {
		LOG.info("Deleting topic {}", topic);

		ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
			Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());

		AdminUtils.deleteTopic(zkUtils, topic);

		zk.close();
	} finally {
		zkUtils.close();
	}
}
 
Example #6
Source File: KafkaUtilsTest.java    From incubator-samoa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws IOException {
    // setup Zookeeper
    zkServer = new EmbeddedZookeeper();
    zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    ZkUtils zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafkaUtils-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    KafkaConfig config = new KafkaConfig(brokerProps);
    Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);

    // create topics
    AdminUtils.createTopic(zkUtils, TOPIC_R, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    AdminUtils.createTopic(zkUtils, TOPIC_S, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

}
 
Example #7
Source File: KafkaComponent.java    From metron with Apache License 2.0 6 votes vote down vote up
public void createTopic(String name, int numPartitions, long waitThisLongForMetadataToPropagate) throws InterruptedException {
  ZkUtils zkUtils = null;
  Level oldLevel = UnitTestHelper.getJavaLoggingLevel();
  try {
    UnitTestHelper.setJavaLoggingLevel(Level.OFF);
    zkUtils = ZkUtils.apply(zookeeperConnectString, 30000, 30000, false);
    AdminUtilsWrapper.createTopic(zkUtils, name, numPartitions, 1, new Properties());
    if (waitThisLongForMetadataToPropagate > 0) {
      waitUntilMetadataIsPropagated(name, numPartitions, waitThisLongForMetadataToPropagate);
    }
  }catch(TopicExistsException tee) {
  }finally {
    if(zkUtils != null){
      zkUtils.close();
    }
    UnitTestHelper.setJavaLoggingLevel(oldLevel);
  }
}
 
Example #8
Source File: KafkaNodeListener.java    From kmanager with Apache License 2.0 6 votes vote down vote up
public void startListener() {
	LOG.info("Starting Kafka ZK node listener...");
	exec.execute(new Runnable() {

		@Override
		public void run() {
			ZKUtils.getZKClient().subscribeChildChanges(ZkUtils.BrokerIdsPath(), new IZkChildListener() {

				@Override
				public void handleChildChange(String parentPath, List<String> currentChilds) throws Exception {
					
				}
				
			});
		}

	});
}
 
Example #9
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteTestTopic(String topic) {
	ZkUtils zkUtils = getZkUtils();
	try {
		LOG.info("Deleting topic {}", topic);

		ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
			Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());

		AdminUtils.deleteTopic(zkUtils, topic);

		zk.close();
	} finally {
		zkUtils.close();
	}
}
 
Example #10
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteTestTopic(String topic) {
	ZkUtils zkUtils = getZkUtils();
	try {
		LOG.info("Deleting topic {}", topic);

		ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
			Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());

		AdminUtils.deleteTopic(zkUtils, topic);

		zk.close();
	} finally {
		zkUtils.close();
	}
}
 
Example #11
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public int getLeaderToShutDown(String topic) throws Exception {
	ZkUtils zkUtils = getZkUtils();
	try {
		PartitionMetadata firstPart = null;
		do {
			if (firstPart != null) {
				LOG.info("Unable to find leader. error code {}", firstPart.errorCode());
				// not the first try. Sleep a bit
				Thread.sleep(150);
			}

			Seq<PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionsMetadata();
			firstPart = partitionMetadata.head();
		}
		while (firstPart.errorCode() != 0);

		return firstPart.leader().get().id();
	} finally {
		zkUtils.close();
	}
}
 
Example #12
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteTestTopic(String topic) {
	ZkUtils zkUtils = getZkUtils();
	try {
		LOG.info("Deleting topic {}", topic);

		ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
			Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());

		AdminUtils.deleteTopic(zkUtils, topic);

		zk.close();
	} finally {
		zkUtils.close();
	}
}
 
Example #13
Source File: SamzaExecutor.java    From samza with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> listTables(ExecutionContext context) throws ExecutorException {
  String address = environmentVariableHandler.getEnvironmentVariable(SAMZA_SQL_SYSTEM_KAFKA_ADDRESS);
  if (address == null || address.isEmpty()) {
    address = DEFAULT_SERVER_ADDRESS;
  }
  try {
    ZkUtils zkUtils = new ZkUtils(new ZkClient(address, DEFAULT_ZOOKEEPER_CLIENT_TIMEOUT),
        new ZkConnection(address), false);
    return JavaConversions.seqAsJavaList(zkUtils.getAllTopics())
      .stream()
      .map(x -> SAMZA_SYSTEM_KAFKA + "." + x)
      .collect(Collectors.toList());
  } catch (ZkTimeoutException ex) {
    throw new ExecutorException(ex);
  }
}
 
Example #14
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 6 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param config       配置参数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              String... config) {

    StringBuffer updateOptions = new StringBuffer();

    updateOptions.append("--alter").append(space)
            .append("--topic").append(space).append(topic);

    for (int i = 0; i < config.length; i++) {

        if (config[i].indexOf("=") > 0)

            updateOptions.append(space).append("--config").append(space)
                    .append(config[i]);
        else
            updateOptions.append(space).append("--delete-config")
                    .append(space).append(config[i]);
    }

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            updateOptions.toString().split(space)));
}
 
Example #15
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 6 votes vote down vote up
public Map<String, Long> getConsumeOffset(String group, String topic) throws Exception {
    Map<String/* qid */, Long/* consume offset */> ret = new HashMap<>();
    StringBuilder sbConsumeOffsetDir = new StringBuilder();
    sbConsumeOffsetDir.append(ZkUtils.ConsumersPath()).append("/").append(group).append("/offsets/").append(topic);
    String consumeOffsetDir = sbConsumeOffsetDir.toString();

    if (!ZkUtils.pathExists(zkClient, consumeOffsetDir)) {
        return ret;
    }

    for (String id : JavaConverters.asJavaListConverter(ZkUtils.getChildren(zkClient, consumeOffsetDir)).asJava()) {
        try {
            ret.put(id, Long.parseLong(ZkUtils.readData(zkClient, consumeOffsetDir + "/" + id)._1()));
        } catch (Exception e) {
            ret.put(id, -1L);
        }
    }

    return ret;
}
 
Example #16
Source File: KafkaResourceController.java    From pubsub with Apache License 2.0 6 votes vote down vote up
@Override
protected void startAction() throws Exception {
  ZkClient zookeeperClient =
      new ZkClient(
          KafkaFlags.getInstance().zookeeperIp, 15000, 10000, ZKStringSerializer$.MODULE$);
  ZkUtils zookeeperUtils =
      new ZkUtils(zookeeperClient, new ZkConnection(KafkaFlags.getInstance().zookeeperIp), false);
  try {
    deleteTopic(zookeeperUtils);
    AdminUtils.createTopic(
        zookeeperUtils,
        topic,
        KafkaFlags.getInstance().partitions,
        KafkaFlags.getInstance().replicationFactor,
        AdminUtils.createTopic$default$5(),
        AdminUtils.createTopic$default$6());
    log.info("Created topic " + topic + ".");
  } finally {
    zookeeperClient.close();
  }
}
 
Example #17
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public int getLeaderToShutDown(String topic) throws Exception {
	ZkUtils zkUtils = getZkUtils();
	try {
		MetadataResponse.PartitionMetadata firstPart = null;
		do {
			if (firstPart != null) {
				LOG.info("Unable to find leader. error code {}", firstPart.error().code());
				// not the first try. Sleep a bit
				Thread.sleep(150);
			}

			List<MetadataResponse.PartitionMetadata> partitionMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils).partitionMetadata();
			firstPart = partitionMetadata.get(0);
		}
		while (firstPart.error().code() != 0);

		return firstPart.leader().id();
	} finally {
		zkUtils.close();
	}
}
 
Example #18
Source File: Topicutil.java    From dk-fitting with Apache License 2.0 6 votes vote down vote up
/**
     * 查询所有topic,包括已经被标记删除,还没有删除的topic。
     * @return topic的list
     */
    public static List<String> queryAllTopic(){
        ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
        ArrayList<String> topics = new ArrayList<String>();
//        AdminUtils.topicExists()
        scala.collection.Map<String, Properties> stringPropertiesMap = AdminUtils.fetchAllTopicConfigs(zkUtils);
        Map<String, Properties> javaMap = JavaConversions.mapAsJavaMap(stringPropertiesMap);
        Iterator<String> iterator = javaMap.keySet().iterator();
        while(iterator.hasNext()){
            String key = iterator.next();
            Properties properties = javaMap.get(key);
            topics.add(key);
        }
        zkUtils.close();
        return  topics;
    }
 
Example #19
Source File: KafkaStormIntegrationTest.java    From incubator-retired-pirk with Apache License 2.0 6 votes vote down vote up
private void startKafka() throws Exception
{
  FileUtils.deleteDirectory(new File(kafkaTmpDir));

  Properties props = new Properties();
  props.setProperty("zookeeper.session.timeout.ms", "100000");
  props.put("advertised.host.name", "localhost");
  props.put("port", 11111);
  // props.put("broker.id", "0");
  props.put("log.dir", kafkaTmpDir);
  props.put("enable.zookeeper", "true");
  props.put("zookeeper.connect", zookeeperLocalCluster.getConnectString());
  KafkaConfig kafkaConfig = KafkaConfig.fromProps(props);
  kafkaLocalBroker = new KafkaServer(kafkaConfig, new SystemTime(), scala.Option.apply("kafkaThread"));
  kafkaLocalBroker.startup();

  zkClient = new ZkClient(zookeeperLocalCluster.getConnectString(), 60000, 60000, ZKStringSerializer$.MODULE$);
  ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(zookeeperLocalCluster.getConnectString()), false);
  // ZkUtils zkUtils = ZkUtils.apply(zookeeperLocalCluster.getConnectString(), 60000, 60000, false);
  AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties());
}
 
Example #20
Source File: KafkaProduceOffsetFetcher.java    From DDMQ with Apache License 2.0 6 votes vote down vote up
public Map<String, Long> getConsumeOffset(String group, String topic) throws Exception {
    Map<String/* qid */, Long/* consume offset */> ret = new HashMap<>();
    StringBuilder sbConsumeOffsetDir = new StringBuilder();
    sbConsumeOffsetDir.append(ZkUtils.ConsumersPath()).append("/").append(group).append("/offsets/").append(topic);
    String consumeOffsetDir = sbConsumeOffsetDir.toString();

    if (!ZkUtils.pathExists(zkClient, consumeOffsetDir)) {
        return ret;
    }

    for (String id : JavaConverters.asJavaListConverter(ZkUtils.getChildren(zkClient, consumeOffsetDir)).asJava()) {
        try {
            ret.put(id, Long.parseLong(ZkUtils.readData(zkClient, consumeOffsetDir + "/" + id)._1()));
        } catch (Exception e) {
            ret.put(id, -1L);
        }
    }

    return ret;
}
 
Example #21
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteTestTopic(String topic) {
	ZkUtils zkUtils = getZkUtils();
	try {
		LOG.info("Deleting topic {}", topic);

		ZkClient zk = new ZkClient(zookeeperConnectionString, Integer.valueOf(standardProps.getProperty("zookeeper.session.timeout.ms")),
			Integer.valueOf(standardProps.getProperty("zookeeper.connection.timeout.ms")), new ZooKeeperStringSerializer());

		AdminUtils.deleteTopic(zkUtils, topic);

		zk.close();
	} finally {
		zkUtils.close();
	}
}
 
Example #22
Source File: KafkaBrokerTopicObserver.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
public KafkaBrokerTopicObserver(String brokerClusterName, String zkString, long refreshTimeIntervalInMillis) {
  LOGGER.info("Trying to init KafkaBrokerTopicObserver {} with ZK: {}", brokerClusterName,
      zkString);
  _kakfaClusterName = brokerClusterName;
  _refreshTimeIntervalInMillis = refreshTimeIntervalInMillis;
  _zkClient = new ZkClient(zkString, 30000, 30000, ZKStringSerializer$.MODULE$);
  _zkClient.subscribeChildChanges(KAFKA_TOPICS_PATH, this);
  _zkUtils = ZkUtils.apply(_zkClient, false);
  executorService.scheduleAtFixedRate(new Runnable() {
    @Override
    public void run() {
      tryToRefreshCache();
    }
  }, 0, refreshTimeIntervalInMillis, TimeUnit.MILLISECONDS);
  registerMetric();
}
 
Example #23
Source File: TopicTest.java    From hermes with Apache License 2.0 6 votes vote down vote up
@Test
public void createTopicInTestEnv() {
	String ZOOKEEPER_CONNECT = "";
	ZkClient zkClient = new ZkClient(new ZkConnection(ZOOKEEPER_CONNECT));
	ZkUtils zkUtils = new ZkUtils(zkClient, new ZkConnection(ZOOKEEPER_CONNECT), false);
	zkClient.setZkSerializer(new ZKStringSerializer());
	int partition = 1;
	int replication = 1;
	String topic = String.format("kafka.test_create_topic_p%s_r%s", partition, replication);
	if (AdminUtils.topicExists(zkUtils, topic)) {
		TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
		System.out.println(topicMetadata);
		AdminUtils.deleteTopic(zkUtils, topic);
	}
	AdminUtils.createTopic(zkUtils, topic, partition, replication, new Properties());
}
 
Example #24
Source File: KafkaExportITBase.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * setup mini kafka and call the super to setup mini fluo
 */
@Before
public void setupKafka() throws Exception {
    // Install an instance of Rya on the Accumulo cluster.
    installRyaInstance();

    // Setup Kafka.
    zkServer = new EmbeddedZookeeper();
    final String zkConnect = ZKHOST + ":" + zkServer.port();
    zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$);
    zkUtils = ZkUtils.apply(zkClient, false);

    // setup Broker
    final Properties brokerProps = new Properties();
    brokerProps.setProperty("zookeeper.connect", zkConnect);
    brokerProps.setProperty("broker.id", "0");
    brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString());
    brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT);
    final KafkaConfig config = new KafkaConfig(brokerProps);
    final Time mock = new MockTime();
    kafkaServer = TestUtils.createServer(config, mock);
}
 
Example #25
Source File: KafkaProducerServiceIntegrationTest.java    From vertx-kafka-service with Apache License 2.0 5 votes vote down vote up
private void createTopic(String topic) {
    final ZkClient zkClient = new ZkClient(zookeeper.getConnectString(), 30000, 30000, ZKStringSerializer$.MODULE$);
    final ZkConnection connection = new ZkConnection(zookeeper.getConnectString());
    final ZkUtils zkUtils = new ZkUtils(zkClient, connection, false);
    AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);
    TestUtils.waitUntilMetadataIsPropagated(JavaConversions.asScalaBuffer(Lists.newArrayList(kafkaServer)), topic, 0, 10000);
    zkClient.close();
}
 
Example #26
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 5 votes vote down vote up
private Map<Integer, String> getRackAssignment(ZkUtils zkUtils) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Map<Integer, String> rackAssignment = Maps.newHashMap();
    if (!disableRackAwareness) {
        for (Broker broker : brokers) {
            scala.Option<String> rack = broker.rack();
            if (rack.isDefined()) {
                rackAssignment.put(broker.id(), rack.get());
            }
        }
    }
    return rackAssignment;
}
 
Example #27
Source File: ZookeeperTestHarness.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
/**
 * Startup Zookeeper.
 *
 * @throws IOException if an error occurs during Zookeeper initialization.
 */
public void setUp() throws IOException {
    zookeeper = new EmbeddedZookeeper(zookeeperConnect);
    ZkClient zkClient = new ZkClient(zookeeperConnect, zkSessionTimeout, zkConnectionTimeout, ZKStringSerializer$.MODULE$);
    ZkConnection connection = new ZkConnection(zookeeperConnect, zkSessionTimeout);
    zkUtils = new ZkUtils(zkClient, connection, false);
}
 
Example #28
Source File: MockKafka.java    From kylin with Apache License 2.0 5 votes vote down vote up
/**
 * Delete may not work
 *
 * @param topic
 */
public void deleteTopic(String topic) {
    ZkClient zkClient = new ZkClient(zkConnection);
    ZkUtils zkUtils = new ZkUtils(zkClient, zkConnection, false);
    zkClient.setZkSerializer(new ZKStringSerializer());
    AdminUtils.deleteTopic(zkUtils, topic);
    zkClient.close();
}
 
Example #29
Source File: URPChecker.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zookeeper = commandLine.getOptionValue(ZOOKEEPER);

  ZkUtils zkUtils = KafkaUtils.getZkUtils(zookeeper);
  Seq<String> topicsSeq = zkUtils.getAllTopics();
  List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq);

  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
      partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

  Map<String, Integer> replicationFactors = new HashMap<>();
  Map<String, Integer> partitionCounts = new HashMap<>();

  topics.stream().forEach(topic -> {
    int partitionCount = partitionAssignments.get(topic).get().size();
    int factor = partitionAssignments.get(topic).get().head()._2().size();
    partitionCounts.put(topic, partitionCount);
    replicationFactors.put(topic, factor);
  });

  List<PartitionInfo> urps = KafkaClusterManager.getUnderReplicatedPartitions(
      zookeeper, SecurityProtocol.PLAINTEXT, null, topics, partitionAssignments, replicationFactors, partitionCounts);

  for (PartitionInfo partitionInfo : urps) {
    LOG.info("under-replicated : {}", partitionInfo);
  }
}
 
Example #30
Source File: Utils.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
public static int getPartitionNumByTopic(String zk,String topic){
    ZkUtils zkUtils=ZkUtils.apply(zk,ZK_SESSION_TIMEOUT_MS,ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());

    try {
        return zkUtils.getPartitionsForTopics(JavaConversions.asScalaBuffer(Arrays.asList(topic))).apply(topic).size();
    }finally {
        zkUtils.close();
    }
}