org.apache.kafka.connect.runtime.distributed.DistributedConfig Java Examples

The following examples show how to use org.apache.kafka.connect.runtime.distributed.DistributedConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Compatibility.java    From apicurio-registry with Apache License 2.0 6 votes vote down vote up
static DistributedHerder createDistributedHerder(DistributedConfig config,
                                                 Time time,
                                                 Worker worker,
                                                 String kafkaClusterId,
                                                 StatusBackingStore statusBackingStore,
                                                 ConfigBackingStore configBackingStore,
                                                 String restUrl,
                                                 Object connectorClientConfigOverridePolicy) throws ConnectException {

    if (CTR_DISTRIBUTED_HERDER_22 == null) {
        return new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, restUrl, (ConnectorClientConfigOverridePolicy)connectorClientConfigOverridePolicy);
    }
    try {
        return (DistributedHerder)CTR_DISTRIBUTED_HERDER_22.newInstance(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, restUrl);
    } catch (Throwable t) {
        throw new ConnectException(t);
    }
}
 
Example #2
Source File: EmbeddedKafka.java    From mongo-kafka with Apache License 2.0 6 votes vote down vote up
private Properties connectWorkerConfig() {
  Properties workerProps = new Properties();
  workerProps.put(DistributedConfig.GROUP_ID_CONFIG, "mongo-kafka-test");
  workerProps.put(DistributedConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers());
  workerProps.put(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "connect-offsets");
  workerProps.put(DistributedConfig.CONFIG_TOPIC_CONFIG, "connect-configs");
  workerProps.put(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "connect-status");
  workerProps.put(
      DistributedConfig.KEY_CONVERTER_CLASS_CONFIG,
      "org.apache.kafka.connect.storage.StringConverter");
  workerProps.put("key.converter.schemas.enable", "false");
  workerProps.put(
      DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG,
      "org.apache.kafka.connect.storage.StringConverter");
  workerProps.put("value.converter.schemas.enable", "false");
  workerProps.put(DistributedConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG, "100");
  workerProps.put(
      StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
      createTempDirectory().getAbsolutePath() + "connect");

  return workerProps;
}
 
Example #3
Source File: MirusOffsetTool.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private static MirusOffsetTool newOffsetTool(Args args) throws IOException {
  // This needs to be the admin topic properties.
  // By default these are in the worker properties file, as this has the has admin producer and
  // consumer settings.  Separating these might be wise - also useful for storing state in
  // source cluster if it proves necessary.
  final Map<String, String> properties =
      !args.propertiesFile.isEmpty()
          ? Utils.propsToStringMap(Utils.loadProps(args.propertiesFile))
          : Collections.emptyMap();
  final DistributedConfig config = new DistributedConfig(properties);
  final KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
  offsetBackingStore.configure(config);

  // Avoid initializing the entire Kafka Connect plugin system by assuming the
  // internal.[key|value].converter is org.apache.kafka.connect.json.JsonConverter
  final Converter internalConverter = new JsonConverter();
  internalConverter.configure(config.originalsWithPrefix("internal.key.converter."), true);

  final OffsetSetter offsetSetter = new OffsetSetter(internalConverter, offsetBackingStore);
  final OffsetFetcher offsetFetcher = new OffsetFetcher(config, internalConverter);
  final OffsetSerDe offsetSerDe = OffsetSerDeFactory.create(args.format);

  return new MirusOffsetTool(args, offsetFetcher, offsetSetter, offsetSerDe);
}
 
Example #4
Source File: ConnectEmbedded.java    From hello-kafka-streams with Apache License 2.0 6 votes vote down vote up
public ConnectEmbedded(Properties workerConfig, Properties... connectorConfigs) throws Exception {
    Time time = new SystemTime();
    DistributedConfig config = new DistributedConfig(Utils.propsToStringMap(workerConfig));

    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
    offsetBackingStore.configure(config);

    //not sure if this is going to work but because we don't have advertised url we can get at least a fairly random
    String workerId = UUID.randomUUID().toString();
    worker = new Worker(workerId, time, config, offsetBackingStore);

    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, worker.getInternalValueConverter());
    statusBackingStore.configure(config);

    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(worker.getInternalValueConverter());
    configBackingStore.configure(config);

    //advertisedUrl = "" as we don't have the rest server - hopefully this will not break anything
    herder = new DistributedHerder(config, time, worker, statusBackingStore, configBackingStore, "");
    this.connectorConfigs = connectorConfigs;

    shutdownHook = new ShutdownHook();
}
 
Example #5
Source File: OffsetFetcher.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
OffsetFetcher(final WorkerConfig config, Converter internalConverter) {
  String topic = config.getString(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG);
  if ("".equals(topic)) {
    throw new ConfigException("Offset storage topic must be specified");
  }

  Map<String, Object> producerProps = new HashMap<>(config.originals());
  producerProps.put(
      ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
  producerProps.put(
      ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
  producerProps.put(ProducerConfig.RETRIES_CONFIG, Integer.MAX_VALUE);

  Map<String, Object> consumerProps = new HashMap<>(config.originals());
  consumerProps.put(
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
  consumerProps.put(
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());

  Callback<ConsumerRecord<byte[], byte[]>> consumedCallback =
      (error, record) -> {
        ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
        ByteBuffer value = record.value() != null ? ByteBuffer.wrap(record.value()) : null;
        data.put(key, value);
      };
  this.offsetLog =
      new KafkaBasedLog<>(
          topic, producerProps, consumerProps, consumedCallback, Time.SYSTEM, null);
  this.internalConverter = internalConverter;
}
 
Example #6
Source File: Mirus.java    From mirus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Create a new DistributedConfig object with a suffix applied to the client id. This allows us to
 * make the client id unique so JMX metrics work properly.
 */
private static DistributedConfig configWithClientIdSuffix(
    Map<String, String> workerProps, String suffix) {
  Map<String, String> localProps = new HashMap<>(workerProps);
  localProps.computeIfPresent(CommonClientConfigs.CLIENT_ID_CONFIG, (k, v) -> v + suffix);
  return new DistributedConfig(localProps);
}
 
Example #7
Source File: WikipediaStreamDemo.java    From hello-kafka-streams with Apache License 2.0 5 votes vote down vote up
private static ConnectEmbedded createWikipediaFeedConnectInstance(String bootstrapServers) throws Exception {
    Properties workerProps = new Properties();
    workerProps.put(DistributedConfig.GROUP_ID_CONFIG, "wikipedia-connect");
    workerProps.put(DistributedConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    workerProps.put(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "connect-offsets");
    workerProps.put(DistributedConfig.CONFIG_TOPIC_CONFIG, "connect-configs");
    workerProps.put(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "connect-status");
    workerProps.put(DistributedConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("key.converter.schemas.enable", "false");
    workerProps.put(DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("value.converter.schemas.enable", "false");
    workerProps.put(DistributedConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG, "30000");
    workerProps.put(DistributedConfig.INTERNAL_KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("internal.key.converter.schemas.enable", "false");
    workerProps.put(DistributedConfig.INTERNAL_VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("internal.value.converter.schemas.enable", "false");

    Properties connectorProps = new Properties();
    connectorProps.put(ConnectorConfig.NAME_CONFIG, "wikipedia-irc-source");
    connectorProps.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "io.amient.kafka.connect.irc.IRCFeedConnector");
    connectorProps.put(ConnectorConfig.TASKS_MAX_CONFIG, "10");
    connectorProps.put(IRCFeedConnector.IRC_HOST_CONFIG, "irc.wikimedia.org");
    connectorProps.put(IRCFeedConnector.IRC_PORT_CONFIG, "6667");
    connectorProps.put(IRCFeedConnector.IRC_CHANNELS_CONFIG, "#en.wikipedia,#en.wiktionary,#en.wikinews");
    connectorProps.put(IRCFeedConnector.TOPIC_CONFIG, "wikipedia-raw");

    return new ConnectEmbedded(workerProps, connectorProps);

}
 
Example #8
Source File: ConnectorApplication.java    From apicurio-registry with Apache License 2.0 4 votes vote down vote up
private Connect startConnect(Map<String, String> workerProps) {
    log.info("Scanning for plugin classes. This might take a moment ...");
    Plugins plugins = new Plugins(workerProps);
    // ignore this TCCL switch plugins.compareAndSwapWithDelegatingLoader();
    DistributedConfig config = new DistributedConfig(workerProps);

    String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(config);
    log.debug("Kafka cluster ID: {}", kafkaClusterId);

    RestServer rest = new RestServer(config);
    rest.initializeServer();

    URI advertisedUrl = rest.advertisedUrl();
    String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();

    KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
    offsetBackingStore.configure(config);
    Object connectorClientConfigOverridePolicy = Compatibility.createConnectorClientConfigOverridePolicy(plugins, config);

    Worker worker = Compatibility.createWorker(workerId, time, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy);
    WorkerConfigTransformer configTransformer = worker.configTransformer();

    Converter internalValueConverter = worker.getInternalValueConverter();
    StatusBackingStore statusBackingStore = new KafkaStatusBackingStore(time, internalValueConverter);
    statusBackingStore.configure(config);

    ConfigBackingStore configBackingStore = new KafkaConfigBackingStore(
        internalValueConverter,
        config,
        configTransformer);

    DistributedHerder herder = Compatibility.createDistributedHerder(config, time, worker,
                                                     kafkaClusterId, statusBackingStore, configBackingStore,
                                                     advertisedUrl.toString(), connectorClientConfigOverridePolicy);

    final Connect connect = new Connect(herder, rest);
    log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
    try {
        connect.start();
    } catch (Exception e) {
        log.error("Failed to start Connect", e);
        connect.stop();
        throw new IllegalStateException(e);
    }

    return connect;
}
 
Example #9
Source File: Mirus.java    From mirus with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * This method is based on the the standard Kafka Connect start logic in {@link
 * org.apache.kafka.connect.cli.ConnectDistributed#startConnect(Map)}, but with `clientid` prefix
 * support, to prevent JMX metric names from clashing. Also supports command-line property
 * overrides (useful for run-time port configuration), and starts the Mirus {@link
 * HerderStatusMonitor}.
 */
public Connect startConnect(Map<String, String> workerProps) {
  log.info("Scanning for plugin classes. This might take a moment ...");
  Plugins plugins = new Plugins(workerProps);
  plugins.compareAndSwapWithDelegatingLoader();
  DistributedConfig distributedConfig = configWithClientIdSuffix(workerProps, "herder");

  MirusConfig mirusConfig = new MirusConfig(workerProps);

  String kafkaClusterId = ConnectUtils.lookupKafkaClusterId(distributedConfig);
  log.debug("Kafka cluster ID: {}", kafkaClusterId);

  RestServer rest = new RestServer(configWithClientIdSuffix(workerProps, "rest"));
  rest.initializeServer();

  URI advertisedUrl = rest.advertisedUrl();
  String workerId = advertisedUrl.getHost() + ":" + advertisedUrl.getPort();

  KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore();
  offsetBackingStore.configure(configWithClientIdSuffix(workerProps, "offset"));

  WorkerConfig workerConfigs = configWithClientIdSuffix(workerProps, "worker");

  ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy =
      plugins.newPlugin(
          distributedConfig.getString(WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG),
          workerConfigs,
          ConnectorClientConfigOverridePolicy.class);

  Worker worker =
      new Worker(
          workerId,
          time,
          plugins,
          workerConfigs,
          offsetBackingStore,
          connectorClientConfigOverridePolicy);

  WorkerConfigTransformer configTransformer = worker.configTransformer();

  Converter internalValueConverter = worker.getInternalValueConverter();
  StatusBackingStore statusBackingStore =
      new KafkaStatusBackingStore(time, internalValueConverter);
  statusBackingStore.configure(configWithClientIdSuffix(workerProps, "status"));

  ConfigBackingStore configBackingStore =
      new KafkaConfigBackingStore(
          internalValueConverter,
          configWithClientIdSuffix(workerProps, "config"),
          configTransformer);

  DistributedHerder herder =
      new DistributedHerder(
          distributedConfig,
          time,
          worker,
          kafkaClusterId,
          statusBackingStore,
          configBackingStore,
          advertisedUrl.toString(),
          connectorClientConfigOverridePolicy);

  // Initialize HerderStatusMonitor
  boolean autoStartTasks = mirusConfig.getTaskAutoRestart();
  boolean autoStartConnectors = mirusConfig.getConnectorAutoRestart();
  long pollingCycle = mirusConfig.getTaskStatePollingInterval();
  HerderStatusMonitor herderStatusMonitor =
      new HerderStatusMonitor(
          herder, workerId, pollingCycle, autoStartTasks, autoStartConnectors);
  Thread herderStatusMonitorThread = new Thread(herderStatusMonitor);
  herderStatusMonitorThread.setName("herder-status-monitor");

  final Connect connect = new Connect(herder, rest);
  log.info("Mirus worker initialization took {}ms", time.hiResClockMs() - initStart);
  try {
    connect.start();
  } catch (Exception e) {
    log.error("Failed to start Mirus", e);
    connect.stop();
    Exit.exit(3);
  }

  herderStatusMonitorThread.start();

  return connect;
}