Java Code Examples for org.apache.kafka.common.utils.Utils#propsToStringMap()

The following examples show how to use org.apache.kafka.common.utils.Utils#propsToStringMap() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MirusOffsetTool.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private static MirusOffsetTool newOffsetTool(Args args) throws IOException {
  // This needs to be the admin topic properties.
  // By default these are in the worker properties file, as this has the has admin producer and
  // consumer settings.  Separating these might be wise - also useful for storing state in
  // source cluster if it proves necessary.
  final Map<String, String> properties =
      !args.propertiesFile.isEmpty()
          ? Utils.propsToStringMap(Utils.loadProps(args.propertiesFile))
          : Collections.emptyMap();
  final DistributedConfig config = new DistributedConfig(properties);
  final KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
  offsetBackingStore.configure(config);

  // Avoid initializing the entire Kafka Connect plugin system by assuming the
  // internal.[key|value].converter is org.apache.kafka.connect.json.JsonConverter
  final Converter internalConverter = new JsonConverter();
  internalConverter.configure(config.originalsWithPrefix("internal.key.converter."), true);

  final OffsetSetter offsetSetter = new OffsetSetter(internalConverter, offsetBackingStore);
  final OffsetFetcher offsetFetcher = new OffsetFetcher(config, internalConverter);
  final OffsetSerDe offsetSerDe = OffsetSerDeFactory.create(args.format);

  return new MirusOffsetTool(args, offsetFetcher, offsetSetter, offsetSerDe);
}
 
Example 2
Source File: ClusterStatusSASLTest.java    From common-docker with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyWithSASLAndSSL() throws Exception {
  Properties clientSecurityProps = kafka.getClientSecurityConfig();

  Map<String, String> config = Utils.propsToStringMap(clientSecurityProps);
  config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapBroker
      (SecurityProtocol.SASL_SSL));

  // Set password and enabled protocol as the Utils.propsToStringMap just returns toString()
  // representations and these properties don't have a valid representation.
  Password trustStorePassword = (Password) clientSecurityProps.get("ssl.truststore.password");
  config.put("ssl.truststore.password", trustStorePassword.value());
  config.put("ssl.enabled.protocols", "TLSv1.2");

  assertThat(ClusterStatus.isKafkaReady(config, 3, 10000)).isTrue();
}
 
Example 3
Source File: ClusterStatusSASLTest.java    From common-docker with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 120000)
public void isKafkaReadyWithSASLAndSSLUsingZK() throws Exception {
  Properties clientSecurityProps = kafka.getClientSecurityConfig();

  boolean zkReady = ClusterStatus.isZookeeperReady(this.kafka.getZookeeperConnectString(), 30000);
  if (!zkReady) {
    throw new RuntimeException(
        "Could not reach zookeeper " + this.kafka.getZookeeperConnectString());
  }
  Map<String, String> endpoints = ClusterStatus.getKafkaEndpointFromZookeeper(
      this.kafka.getZookeeperConnectString(),
      30000
  );

  String bootstrap_broker = endpoints.get("SASL_SSL");
  Map<String, String> config = Utils.propsToStringMap(clientSecurityProps);
  config.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrap_broker);

  // Set password and enabled protocol as the Utils.propsToStringMap just returns toString()
  // representations and these properties don't have a valid representation.
  Password trustStorePassword = (Password) clientSecurityProps.get("ssl.truststore.password");
  config.put("ssl.truststore.password", trustStorePassword.value());
  config.put("ssl.enabled.protocols", "TLSv1.2");

  assertThat(ClusterStatus.isKafkaReady(config, 3, 10000)).isTrue();
}
 
Example 4
Source File: ConnectEmbedded.java    From hello-kafka-streams with Apache License 2.0 6 votes vote down vote up
public ConnectEmbedded(Properties workerConfig, Properties... connectorConfigs) throws Exception {
    Time time = new SystemTime();
    DistributedConfig config = new DistributedConfig(Utils.propsToStringMap(workerConfig));

    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
    offsetBackingStore.configure(config);

    //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random
    String workerId = UUID.randomUUID().toString();
    worker = new Worker(workerId, time, config, offsetBackingStore);

    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter());
    statusBackingStore.configure(config);

    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter());
    configBackingStore.configure(config);

    //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything
    herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, "");
    this.connectorConfigs = connectorConfigs;

    shutdownHook = new ShutdownHook();
}
 
Example 5
Source File: KafkaConnectRunner.java    From camel-kafka-connector with Apache License 2.0 5 votes vote down vote up
/**
 * here does not seem to be a public interface for embedding a Kafka connect runtime,
 * therefore, this code is modeled from the behavior taken from
 * https://github.com/apache/kafka/blob/2.1/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java
 * and performs the initialization in a roughly similar manner.
 *
 */
private void init() {
    LOG.info("Started worked initialization");

    Time time = Time.SYSTEM;

    // Initializes the system runtime information and logs some of the information
    WorkerInfo initInfo = new WorkerInfo();
    initInfo.logAll();

    Properties props = kafkaConnectPropertyFactory.getProperties();

    Map<String, String> standAloneProperties = Utils.propsToStringMap(props);

    // Not needed, but we need this one to initialize the worker
    Plugins plugins = new Plugins(standAloneProperties);

    StandaloneConfig config = new StandaloneConfig(standAloneProperties);
    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
    AllConnectorClientConfigOverridePolicy allConnectorClientConfigOverridePolicy = new AllConnectorClientConfigOverridePolicy();

    RestServer rest = new RestServer(config);
    rest.initializeServer();

    /*
     According to the Kafka source code "... Worker runs a (dynamic) set of tasks
     in a set of threads, doing the work of actually moving data to/from Kafka ..."
     */
    Worker worker = new Worker(bootstrapServer, time, plugins, config, new FileOffsetBackingStore(), allConnectorClientConfigOverridePolicy);

    /*
    From Kafka source code: " ... The herder interface tracks and manages workers
    and connectors ..."
     */
    herder = new StandaloneHerder(worker, kafkaClusterId, allConnectorClientConfigOverridePolicy);
    connect = new Connect(herder, rest);
    LOG.info("Finished initializing the worker");
}
 
Example 6
Source File: Mirus.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public static void main(String[] argv) {
  Mirus.Args args = new Mirus.Args();
  JCommander jCommander =
      JCommander.newBuilder()
          .programName(OffsetStatus.class.getSimpleName())
          .addObject(args)
          .build();
  try {
    jCommander.parse(argv);
  } catch (Exception e) {
    jCommander.usage();
    throw e;
  }
  if (args.help) {
    jCommander.usage();
    System.exit(1);
  }

  try {
    Map<String, String> workerProps =
        !args.workerPropertiesFile.isEmpty()
            ? Utils.propsToStringMap(Utils.loadProps(args.workerPropertiesFile))
            : Collections.emptyMap();

    applyOverrides(args.overrides, workerProps);

    Mirus mirus = new Mirus();
    Connect connect = mirus.startConnect(workerProps);

    // Shutdown will be triggered by Ctrl-C or via HTTP shutdown request
    connect.awaitStop();
  } catch (Throwable t) {
    log.error("Stopping due to error", t);
    Exit.exit(2);
  }
}
 
Example 7
Source File: ConnectorApplication.java    From apicurio-registry with Apache License 2.0 4 votes vote down vote up
public ConnectorApplication(@RegistryProperties("") Properties properties) {
    workerProperties = Utils.propsToStringMap(properties);
}