org.apache.kafka.common.security.JaasUtils Java Examples

The following examples show how to use org.apache.kafka.common.security.JaasUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: LocalKafkaServer.java    From Krackle with Apache License 2.0 6 votes vote down vote up
public LocalKafkaServer() throws IOException {

		while (new File(logDir).exists()) {
			FileUtils.deleteDirectory(new File(logDir));
		}

		Properties props = new Properties();
		props.put("broker.id", nodeId);
		props.put("port", port);
		props.put("log.dir", logDir);
		props.put("zookeeper.connect", zkConnect);
		props.put("host.name", "127.0.0.1");
		KafkaConfig conf = new KafkaConfig(props);

                zkUtils = ZkUtils.apply(props.getProperty("zookeeper.connect"),
                          30000,
                          30000,
                          JaasUtils.isZkSecurityEnabled());


		server = new KafkaServerStartable(conf);
		server.startup();
	}
 
Example #2
Source File: Topicutil.java    From dk-fitting with Apache License 2.0 6 votes vote down vote up
/**
     * 查询所有topic,包括已经被标记删除,还没有删除的topic。
     * @return topic的list
     */
    public static List<String> queryAllTopic(){
        ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
        ArrayList<String> topics = new ArrayList<String>();
//        AdminUtils.topicExists()
        scala.collection.Map<String, Properties> stringPropertiesMap = AdminUtils.fetchAllTopicConfigs(zkUtils);
        Map<String, Properties> javaMap = JavaConversions.mapAsJavaMap(stringPropertiesMap);
        Iterator<String> iterator = javaMap.keySet().iterator();
        while(iterator.hasNext()){
            String key = iterator.next();
            Properties properties = javaMap.get(key);
            topics.add(key);
        }
        zkUtils.close();
        return  topics;
    }
 
Example #3
Source File: KerberosLogin.java    From datacollector with Apache License 2.0 6 votes vote down vote up
private String getServiceName(Map<String, ?> configs, String loginContext) {
  String jaasServiceName = null;
  try {
    jaasServiceName = JaasUtils.jaasConfig(loginContext, JaasUtils.SERVICE_NAME);
  } catch (IOException e) {
    //throw new KafkaException("Jaas configuration not found", e);
    log.warn("Jaas configuration not found", e);
  }
  String configServiceName = (String) configs.get(SaslConfigs.SASL_KERBEROS_SERVICE_NAME);
  if (jaasServiceName != null && configServiceName != null && !jaasServiceName.equals(configServiceName)) {
    String message = "Conflicting serviceName values found in JAAS and Kafka configs " +
        "value in JAAS file " + jaasServiceName + ", value in Kafka config " + configServiceName;
    throw new IllegalArgumentException(message);
  }

  if (jaasServiceName != null)
    return jaasServiceName;
  if (configServiceName != null)
    return configServiceName;

  throw new IllegalArgumentException("No serviceName defined in either JAAS or Kafka config");
}
 
Example #4
Source File: Login.java    From datacollector with Apache License 2.0 6 votes vote down vote up
private synchronized LoginContext login(final String loginContextName) throws LoginException {
    String jaasConfigFile = System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
    if (jaasConfigFile == null) {
        throw new IllegalArgumentException("You must pass " + JaasUtils.JAVA_LOGIN_CONFIG_PARAM + " in secure mode.");
    }
    AppConfigurationEntry[] configEntries = Configuration.getConfiguration().getAppConfigurationEntry(loginContextName);
    if (configEntries == null) {
        String errorMessage = "Could not find a '" + loginContextName + "' entry in `" + jaasConfigFile + "`.";
        throw new IllegalArgumentException(errorMessage);
    }

    LoginContext loginContext = new LoginContext(loginContextName, callbackHandler);
    loginContext.login();
    log.info("Successfully logged in.");
    return loginContext;
}
 
Example #5
Source File: KafkaProducer09IT.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpClass() throws Exception {
  int zkConnectionTimeout = 6000;
  int zkSessionTimeout = 6000;

  zookeeper = new EmbeddedZookeeper();
  zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
  zkUtils = ZkUtils.apply(
      zkConnect, zkSessionTimeout, zkConnectionTimeout,
      JaasUtils.isZkSecurityEnabled());

  port = NetworkUtils.getRandomPort();
  kafkaServer = TestUtil09.createKafkaServer(port, zkConnect);
  for (int i = 0; i < topics.length; i++) {
    topics[i] = UUID.randomUUID().toString();
    AdminUtils.createTopic(zkUtils, topics[i], 1, 1, new Properties());

    TestUtils.waitUntilMetadataIsPropagated(
        scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(kafkaServer)),
        topics[i], 0, 5000);
  }
}
 
Example #6
Source File: EmbeddedKafkaBroker.java    From ameliant-tools with Apache License 2.0 6 votes vote down vote up
@Override
protected void before() throws Throwable {
    logDirectory = tempDir(perTest("kafka-log"));
    Properties properties = brokerDefinition.getProperties();
    properties.setProperty(KafkaConfig.LogDirProp(), logDirectory.getCanonicalPath());
    kafkaServer = new KafkaServer(new KafkaConfig(properties),
            SystemTime$.MODULE$, Some$.MODULE$.apply("kafkaServer"));
    kafkaServer.startup();

    List<TopicDefinition> topicDefinitions = brokerDefinition.getTopicDefinitions();
    if (!topicDefinitions.isEmpty()) {
        ZkUtils zkUtils = ZkUtils.apply(brokerDefinition.getZookeeperConnect(), 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        for (TopicDefinition topicDefinition : topicDefinitions) {
            String name = topicDefinition.getName();
            log.info("Creating topic {}", name);
            AdminUtils.createTopic(zkUtils,
                    name,
                    topicDefinition.getPartitions(),
                    topicDefinition.getReplicationFactor(),
                    topicDefinition.getProperties());
        }
    }
}
 
Example #7
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 6 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param partitions   分区个数
 * @param config       配置参数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              int partitions, String... config) {

    StringBuffer updateOptions = new StringBuffer();

    updateOptions.append("--alter").append(space)
            .append("--topic").append(space).append(topic).append(space)
            .append("--partitions").append(space).append(partitions);

    for (int i = 0; i < config.length; i++) {

        if (config[i].indexOf("=") > 0)

            updateOptions.append(space).append("--config").append(space)
                    .append(config[i]);
        else
            updateOptions.append(space).append("--delete-config")
                    .append(space).append(config[i]);
    }

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            updateOptions.toString().split(space)));
}
 
Example #8
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 6 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param config       配置参数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              String... config) {

    StringBuffer updateOptions = new StringBuffer();

    updateOptions.append("--alter").append(space)
            .append("--topic").append(space).append(topic);

    for (int i = 0; i < config.length; i++) {

        if (config[i].indexOf("=") > 0)

            updateOptions.append(space).append("--config").append(space)
                    .append(config[i]);
        else
            updateOptions.append(space).append("--delete-config")
                    .append(space).append(config[i]);
    }

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            updateOptions.toString().split(space)));
}
 
Example #9
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
void maybeElectLeader() throws Exception {
  if (!_preferredLeaderElectionRequested) {
    return;
  }

  try (KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), com.linkedin.kmf.common.Utils.ZK_SESSION_TIMEOUT_MS,
      com.linkedin.kmf.common.Utils.ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener", null)) {
    if (!zkClient.reassignPartitionsInProgress()) {
      List<TopicPartitionInfo> partitionInfoList = _adminClient
          .describeTopics(Collections.singleton(_topic)).all().get().get(_topic).partitions();
      LOGGER.info(
          "MultiClusterTopicManagementService will trigger requested preferred leader election for the"
              + " topic {} in cluster.", _topic);
      triggerPreferredLeaderElection(partitionInfoList, _topic);
      _preferredLeaderElectionRequested = false;
    }
  }
}
 
Example #10
Source File: Topicutil.java    From dk-fitting with Apache License 2.0 5 votes vote down vote up
/**
 * 创建kafka队列
 * @param topicName 队列名称
 * @param partitions 分区数量
 * @param replicationFactor topic中数据的副本数量,默认为3
 *
 */
public static void createTopic(String topicName, int partitions, int replicationFactor){

    ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
    // 创建一个单分区单副本名为t1的topic
    AdminUtils.createTopic(zkUtils, topicName, partitions, replicationFactor, new Properties(), RackAwareMode.Enforced$.MODULE$);
    zkUtils.close();

}
 
Example #11
Source File: KafkaValidationUtil09IT.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws IOException {
  int zkConnectionTimeout = 6000;
  int zkSessionTimeout = 6000;

  zookeeper = new EmbeddedZookeeper();
  zkConnect = String.format("127.0.0.1:%d", zookeeper.port());
  zkUtils = ZkUtils.apply(
    zkConnect, zkSessionTimeout, zkConnectionTimeout,
    JaasUtils.isZkSecurityEnabled());

  port = NetworkUtils.getRandomPort();
  kafkaServer = TestUtil09.createKafkaServer(port, zkConnect, false);
  sdcKafkaValidationUtil = SdcKafkaValidationUtilFactory.getInstance().create();
}
 
Example #12
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: deleteTopic</p>
 * <p>Description: 删除队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 */
public static void deleteTopic(String zookeeperStr, String topic) {

    TopicCommand.deleteTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--delete", "--topic", topic}));
}
 
Example #13
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: alterTopic</p>
 * <p>Description: 修改队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param partitions   分区个数
 */
public static void alterTopic(String zookeeperStr, String topic,
                              int partitions) {

    TopicCommand.alterTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--alter", "--topic", topic,
                    "--partitions", String.valueOf(partitions)}));
}
 
Example #14
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: describeTopic</p>
 * <p>Description: 查询队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 */
public static void describeTopic(String zookeeperStr, String topic) {

    TopicCommand.describeTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--describe", "--topic", topic}));
}
 
Example #15
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: createTopic</p>
 * <p>Description: 创建队列操作</p>
 *
 * @param zookeeperStr zookeeper地址
 * @param topic        队列名称
 * @param replications 复制个数
 * @param partitions   分区个数
 */
public static void createTopic(String zookeeperStr, String topic,
                               int replications, int partitions) {

    TopicCommand.createTopic(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--create", "--topic", topic,
                    "--replication-factor", String.valueOf(replications),
                    "--partitions", String.valueOf(partitions)}));
}
 
Example #16
Source File: KafkaJunitRuleTest.java    From kafka-junit with Apache License 2.0 5 votes vote down vote up
@Test
public void testKafkaServerIsUp() {
    // Setup Zookeeper client
    final String zkConnectionString = kafkaRule.helper().zookeeperConnectionString();
    final ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zkConnectionString, 2000, 8000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener" );
    final KafkaZkClient zkClient = new KafkaZkClient(zooKeeperClient, JaasUtils.isZkSaslEnabled(), Time.SYSTEM);
    final AdminZkClient adminZkClient = new AdminZkClient(zkClient);

    // Create topic
    adminZkClient.createTopic(TOPIC, 1, 1, new Properties(), null);

    // Produce/consume test
    try (KafkaProducer<String, String> producer = kafkaRule.helper().createStringProducer()) {
        producer.send(new ProducerRecord<>(TOPIC, "keyA", "valueA"));
    }

    try (KafkaConsumer<String, String> consumer = kafkaRule.helper().createStringConsumer()) {
        consumer.subscribe(Lists.newArrayList(TOPIC));
        ConsumerRecords<String, String> records = consumer.poll(TEN_SECONDS);
        assertThat(records).isNotNull();
        assertThat(records.isEmpty()).isFalse();

        ConsumerRecord<String, String> msg = records.iterator().next();
        assertThat(msg).isNotNull();
        assertThat(msg.key()).isEqualTo("keyA");
        assertThat(msg.value()).isEqualTo("valueA");
    }
}
 
Example #17
Source File: EmbeddedSingleNodeKafkaCluster.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private static void createServerJaasConfig() {
  try {
    final String jaasConfigContent = createJaasConfigContent();
    final File jaasConfig = TestUtils.tempFile();
    Files.write(jaasConfigContent, jaasConfig, StandardCharsets.UTF_8);

    System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasConfig.getAbsolutePath());
    System.setProperty(JaasUtils.ZK_SASL_CLIENT, "false");
  } catch (final Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example #18
Source File: KafkaConsumer.java    From cubeai with Apache License 2.0 5 votes vote down vote up
private void createKafkaTopics() {
    log.info("--------------------------------------------------------------------------");
    log.info("-----------------------Begin to create Kafka topics-----------------------");

    ZkUtils zkUtils = ZkUtils.apply(zkNodes + ":2181", 30000, 30000, JaasUtils.isZkSecurityEnabled());

    if (!AdminUtils.topicExists(zkUtils, "async-task-topic")) {
        AdminUtils.createTopic(zkUtils, "async-task-topic", 1, 1,  new Properties(), new RackAwareMode.Enforced$());
    }

    zkUtils.close();

    log.info("-----------------------Kafka topics created-------------------------------");
    log.info("--------------------------------------------------------------------------");
}
 
Example #19
Source File: KafkaConsumer.java    From cubeai with Apache License 2.0 5 votes vote down vote up
private void createKafkaTopics() {
    log.info("--------------------------------------------------------------------------");
    log.info("-----------------------Begin to create Kafka topics-----------------------");

    ZkUtils zkUtils = ZkUtils.apply(zkNodes + ":2181", 30000, 30000, JaasUtils.isZkSecurityEnabled());

    if (!AdminUtils.topicExists(zkUtils, "async-task-topic")) {
        AdminUtils.createTopic(zkUtils, "async-task-topic", 1, 1,  new Properties(), new RackAwareMode.Enforced$());
    }

    zkUtils.close();

    log.info("-----------------------Kafka topics created-------------------------------");
    log.info("--------------------------------------------------------------------------");
}
 
Example #20
Source File: Topicutil.java    From dk-fitting with Apache License 2.0 5 votes vote down vote up
/**
 * 判断一个topic是否已经存在。 包括已经被标记删除,还没有删除的topic。
 * @param topicName topic的名字
 * @return
 */
public static boolean topicIsExists(String topicName){
    ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
    boolean topicExists = AdminUtils.topicExists(zkUtils, topicName);
    zkUtils.close();
    return topicExists;
}
 
Example #21
Source File: Topicutil.java    From dk-fitting with Apache License 2.0 5 votes vote down vote up
/**
 * 删除一个topic,这个删除只是告知系统标记该topic要被删除。而不是立即删除。
 * @param topicName topic的名字
 */
public static void deleteTopic(String topicName) {
    ZkUtils zkUtils = ZkUtils.apply(zkUrl, sessionTimeout, connectionTimeout, JaasUtils.isZkSecurityEnabled());
    // 删除topic 't1'
    AdminUtils.deleteTopic(zkUtils, topicName);
    zkUtils.close();
}
 
Example #22
Source File: KafkaCommand.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * <p>Title: listTopics</p>
 * <p>Description: 查询队列列表操作</p>
 *
 * @param zookeeperStr zookeeper地址
 */
public static void listTopics(String zookeeperStr) {

    TopicCommand.listTopics(ZkUtils.apply(zookeeperStr,
            sessionTimeout, connectionTimeout,
            JaasUtils.isZkSecurityEnabled()), new TopicCommandOptions(
            new String[]{"--list"}));
}
 
Example #23
Source File: Utils.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
public static int getPartitionNumByTopic(String zk,String topic){
    ZkUtils zkUtils=ZkUtils.apply(zk,ZK_SESSION_TIMEOUT_MS,ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());

    try {
        return zkUtils.getPartitionsForTopics(JavaConversions.asScalaBuffer(Arrays.asList(topic))).apply(topic).size();
    }finally {
        zkUtils.close();
    }
}
 
Example #24
Source File: KafkaTestUtil.java    From AthenaX with Apache License 2.0 5 votes vote down vote up
public static boolean createKafkaTopicIfNecessary(String brokerUri, int replFactor, int numPartitions, String topic)
    throws IOException {
  URI zkUri = URI.create(brokerUri);
  Preconditions.checkArgument("zk".equals(zkUri.getScheme()));
  String zkServerList = zkUri.getAuthority() + zkUri.getPath();

  ZkUtils zkUtils = ZkUtils.apply(zkServerList, ZK_SESSION_TIMEOUT_MS,
      ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
  try {
    if (AdminUtils.topicExists(zkUtils, topic)) {
      return false;
    }

    try {
      AdminUtils.createTopic(zkUtils, topic, numPartitions, replFactor, new Properties());
    } catch (TopicExistsException ignored) {
      return false;
    } catch (RuntimeException e) {
      throw new IOException(e);
    }
  } finally {
    if (zkUtils != null) {
      zkUtils.close();
    }
  }
  return true;
}
 
Example #25
Source File: KafkaBridge.java    From atlas with Apache License 2.0 5 votes vote down vote up
public KafkaBridge(Configuration atlasConf, AtlasClientV2 atlasClientV2) throws Exception {
    String   zookeeperConnect    = getZKConnection(atlasConf);
    int      sessionTimeOutMs    = atlasConf.getInt(ZOOKEEPER_SESSION_TIMEOUT_MS, DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_MS) ;
    int      connectionTimeOutMs = atlasConf.getInt(ZOOKEEPER_CONNECTION_TIMEOUT_MS, DEFAULT_ZOOKEEPER_CONNECTION_TIMEOUT_MS);
    ZkClient zkClient            = new ZkClient(zookeeperConnect, sessionTimeOutMs, connectionTimeOutMs, ZKStringSerializer$.MODULE$);

    this.atlasClientV2     = atlasClientV2;
    this.metadataNamespace = getMetadataNamespace(atlasConf);
    this.zkUtils           = new ZkUtils(zkClient, new ZkConnection(zookeeperConnect), JaasUtils.isZkSecurityEnabled());
    this.availableTopics   = scala.collection.JavaConversions.seqAsJavaList(zkUtils.getAllTopics());
}
 
Example #26
Source File: KafkaJunitExtensionTest.java    From kafka-junit with Apache License 2.0 5 votes vote down vote up
@Test
void testKafkaServerIsUp(KafkaHelper kafkaHelper) {
    // Setup Zookeeper client
    final String zkConnectionString = kafkaHelper.zookeeperConnectionString();
    final ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zkConnectionString, 2000, 8000, Integer.MAX_VALUE, Time.SYSTEM,"kafka.server", "SessionExpireListener" );
    final KafkaZkClient zkClient = new KafkaZkClient(zooKeeperClient, JaasUtils.isZkSaslEnabled(), Time.SYSTEM);
    final AdminZkClient adminZkClient = new AdminZkClient(zkClient);

    // Create topic
    adminZkClient.createTopic(TOPIC, 1, 1, new Properties(), null);

    // Produce/consume test
    try (KafkaProducer<String, String> producer = kafkaHelper.createStringProducer()) {
        producer.send(new ProducerRecord<>(TOPIC, "keyA", "valueA"));
    }

    try (KafkaConsumer<String, String> consumer = kafkaHelper.createStringConsumer()) {
        consumer.subscribe(Lists.newArrayList(TOPIC));
        ConsumerRecords<String, String> records = consumer.poll(10000);
        Assertions.assertAll(() -> assertThat(records).isNotNull(),
                             () -> assertThat(records.isEmpty()).isFalse());

        ConsumerRecord<String, String> msg = records.iterator().next();
        Assertions.assertAll(() -> assertThat(msg).isNotNull(),
                             () -> assertThat(msg.key()).isEqualTo("keyA"),
                             () -> assertThat(msg.value()).isEqualTo("valueA"));
    }
}
 
Example #27
Source File: EmbeddedKafka.java    From mongo-kafka with Apache License 2.0 4 votes vote down vote up
/** Creates and starts the cluster. */
public void start() throws Exception {
  LOGGER.debug("Initiating embedded Kafka cluster startup");
  LOGGER.debug("Starting a ZooKeeper instance...");
  zookeeper = new ZooKeeperEmbedded();
  LOGGER.debug("ZooKeeper instance is running at {}", zookeeper.connectString());

  zkClient =
      KafkaZkClient.apply(
          zookeeper.connectString(),
          JaasUtils.isZkSecurityEnabled(),
          30000,
          30000,
          1000,
          new MockTime(),
          "kafka.server",
          "SessionExpireListener");

  final Properties effectiveBrokerConfig = effectiveBrokerConfigFrom(brokerConfig, zookeeper);
  LOGGER.debug(
      "Starting a Kafka instance on port {} ...",
      effectiveBrokerConfig.getProperty(KafkaConfig$.MODULE$.PortProp()));
  broker = new KafkaEmbedded(effectiveBrokerConfig, new MockTime());
  LOGGER.debug(
      "Kafka instance is running at {}, connected to ZooKeeper at {}",
      broker.brokerList(),
      broker.zookeeperConnect());

  final Properties schemaRegistryProps = new Properties();
  schemaRegistryProps.put(
      SchemaRegistryConfig.KAFKASTORE_TIMEOUT_CONFIG, KAFKASTORE_OPERATION_TIMEOUT_MS);
  schemaRegistryProps.put(SchemaRegistryConfig.DEBUG_CONFIG, KAFKASTORE_DEBUG);
  schemaRegistryProps.put(
      SchemaRegistryConfig.KAFKASTORE_INIT_TIMEOUT_CONFIG, KAFKASTORE_INIT_TIMEOUT);

  schemaRegistry =
      new RestApp(
          0,
          zookeeperConnect(),
          KAFKA_SCHEMAS_TOPIC,
          AVRO_COMPATIBILITY_TYPE,
          schemaRegistryProps);
  schemaRegistry.start();

  LOGGER.debug("Starting a Connect standalone instance...");
  connect = new ConnectStandalone(connectWorkerConfig());
  connect.start();
  LOGGER.debug("Connect standalone instance is running at {}", connect.getConnectionString());
  running = true;
}
 
Example #28
Source File: NewReceiverWithSpringTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());
        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #29
Source File: KafkaMessageReceiverImplTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        Properties kafkaProps2 = TestUtils.createBrokerConfig(brokerId + 1,
                zkConnect, false, false, (port - 1),
                noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true, false,
                TestUtils.RandomPort(), false, TestUtils.RandomPort(), false,
                TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps2.setProperty("auto.create.topics.enable", "true");
        kafkaProps2.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps2.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps2.setProperty("host.name", "localhost");
        kafkaProps2.setProperty("port", (port - 1) + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        KafkaConfig config2 = new KafkaConfig(kafkaProps2);

        Time mock = new SystemTime();
        Time mock2 = new SystemTime();

        kafkaServer = TestUtils.createServer(config, mock);
        KafkaServer kafkaServer2 = TestUtils.createServer(config2, mock2);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "2", "--partitions", "2"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        servers.add(kafkaServer2);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}
 
Example #30
Source File: KafkaMessageSenderImplTest.java    From message-queue-client-framework with Apache License 2.0 4 votes vote down vote up
@Before
public void before() {

    try {

        zkServer = new EmbeddedZookeeper();
        zkConnect = String.format("localhost:%d", zkServer.port());
        ZkUtils zkUtils = ZkUtils.apply(zkConnect, 30000, 30000,
                JaasUtils.isZkSecurityEnabled());
        zkClient = zkUtils.zkClient();

        Time mock = new SystemTime();
        final Option<File> noFile = scala.Option.apply(null);
        final Option<SecurityProtocol> noInterBrokerSecurityProtocol = scala.Option.apply(null);
        final Option<Properties> noPropertiesOption = scala.Option.apply(null);
        final Option<String> noStringOption = scala.Option.apply(null);

        kafkaProps = TestUtils.createBrokerConfig(brokerId, zkConnect, false,
                false, port, noInterBrokerSecurityProtocol, noFile, noPropertiesOption, true,
                false, TestUtils.RandomPort(), false, TestUtils.RandomPort(),
                false, TestUtils.RandomPort(), noStringOption, TestUtils.RandomPort());

        kafkaProps.setProperty("auto.create.topics.enable", "true");
        kafkaProps.setProperty("num.partitions", "1");
        // We *must* override this to use the port we allocated (Kafka currently
        // allocates one port
        // that it always uses for ZK
        kafkaProps.setProperty("zookeeper.connect", this.zkConnect);
        kafkaProps.setProperty("host.name", "localhost");
        kafkaProps.setProperty("port", port + "");

        KafkaConfig config = new KafkaConfig(kafkaProps);
        kafkaServer = TestUtils.createServer(config, mock);

        // create topic
        TopicCommand.TopicCommandOptions options = new TopicCommand.TopicCommandOptions(
                new String[]{"--create", "--topic", topic,
                        "--replication-factor", "1", "--partitions", "1"});

        TopicCommand.createTopic(zkUtils, options);

        List<KafkaServer> servers = new ArrayList<KafkaServer>();
        servers.add(kafkaServer);
        TestUtils.waitUntilMetadataIsPropagated(
                scala.collection.JavaConversions.asScalaBuffer(servers), topic,
                0, 5000);
    } catch (Exception e) {
    }
}