org.apache.kafka.connect.errors.ConnectException Java Examples

The following examples show how to use org.apache.kafka.connect.errors.ConnectException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HdfsFileWatcherPolicyTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void invalidPollTime(PolicyFsTestConfig fsConfig) {
    Map<String, String> originals = fsConfig.getSourceTaskConfig().originalsStrings();
    originals.put(HdfsFileWatcherPolicy.HDFS_FILE_WATCHER_POLICY_POLL_MS, "invalid");
    FsSourceTaskConfig cfg = new FsSourceTaskConfig(originals);
    assertThrows(ConnectException.class, () ->
            ReflectionUtils.makePolicy((Class<? extends Policy>) fsConfig.getSourceTaskConfig()
                    .getClass(FsSourceTaskConfig.POLICY_CLASS), cfg));
    assertThrows(ConfigException.class, () -> {
        try {
            ReflectionUtils.makePolicy((Class<? extends Policy>) fsConfig.getSourceTaskConfig()
                    .getClass(FsSourceTaskConfig.POLICY_CLASS), cfg);
        } catch (Exception e) {
            throw e.getCause();
        }
    });
}
 
Example #2
Source File: AvroFileReaderTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void readerWithInvalidSchema(ReaderFsTestConfig fsConfig) throws IOException {
    Map<String, Object> readerConfig = getReaderConfig();
    readerConfig.put(AvroFileReader.FILE_READER_AVRO_SCHEMA, Schema.create(Schema.Type.STRING).toString());
    FileSystem testFs = FileSystem.newInstance(fsConfig.getFsUri(), new Configuration());
    fsConfig.setReader(getReader(testFs, fsConfig.getDataFile(), readerConfig));
    assertThrows(ConnectException.class, () -> readAllData(fsConfig));
    assertThrows(AvroTypeException.class, () -> {
        try {
            readAllData(fsConfig);
        } catch (Exception e) {
            throw e.getCause();
        }
    });
}
 
Example #3
Source File: MongoSinkTask.java    From mongo-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Start the Task. This should handle any configuration parsing and one-time setup of the task.
 *
 * @param props initial configuration
 */
@Override
public void start(final Map<String, String> props) {
  LOGGER.info("Starting MongoDB sink task");
  try {
    sinkConfig = new MongoSinkConfig(props);
    remainingRetriesTopicMap =
        new ConcurrentHashMap<>(
            sinkConfig.getTopics().orElse(emptyList()).stream()
                .collect(
                    Collectors.toMap(
                        (t) -> t,
                        (t) ->
                            new AtomicInteger(
                                sinkConfig
                                    .getMongoSinkTopicConfig(t)
                                    .getInt(MAX_NUM_RETRIES_CONFIG)))));
  } catch (Exception e) {
    throw new ConnectException("Failed to start new task", e);
  }
  LOGGER.debug("Started MongoDB sink task");
}
 
Example #4
Source File: IgniteSinkTask.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * Buffers records.
 *
 * @param records Records to inject into grid.
 */
@SuppressWarnings("unchecked")
@Override public void put(Collection<SinkRecord> records) {
    try {
        for (SinkRecord record : records) {
            // Data is flushed asynchronously when CACHE_PER_NODE_DATA_SIZE is reached.
            if (extractor != null) {
                Map.Entry<Object, Object> entry = extractor.extract(record);
                StreamerContext.getStreamer().addData(entry.getKey(), entry.getValue());
            }
            else {
                if (record.key() != null) {
                    StreamerContext.getStreamer().addData(record.key(), record.value());
                }
                else {
                    log.error("Failed to stream a record with null key!");
                }
            }
        }
    }
    catch (ConnectException e) {
        log.error("Failed adding record", e);

        throw new ConnectException(e);
    }
}
 
Example #5
Source File: GcsSinkTask.java    From aiven-kafka-connect-gcs with GNU Affero General Public License v3.0 6 votes vote down vote up
private void flushFile(final String filename, final List<SinkRecord> records) {
    final BlobInfo blob = BlobInfo
        .newBuilder(config.getBucketName(), config.getPrefix() + filename)
        .build();

    try (final ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
        // Don't group these two tries,
        // because the internal one must be closed before writing to GCS.
        try (final OutputStream compressedStream = getCompressedStream(baos)) {
            for (int i = 0; i < records.size() - 1; i++) {
                outputWriter.writeRecord(records.get(i), compressedStream);
            }
            outputWriter.writeLastRecord(records.get(records.size() - 1), compressedStream);
        }
        storage.create(blob, baos.toByteArray());
    } catch (final Exception e) {
        throw new ConnectException(e);
    }
}
 
Example #6
Source File: HdfsFileWatcherPolicyTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void invalidRetryTime(PolicyFsTestConfig fsConfig) {
    Map<String, String> originals = fsConfig.getSourceTaskConfig().originalsStrings();
    originals.put(HdfsFileWatcherPolicy.HDFS_FILE_WATCHER_POLICY_RETRY_MS, "invalid");
    FsSourceTaskConfig cfg = new FsSourceTaskConfig(originals);
    assertThrows(ConnectException.class, () ->
            ReflectionUtils.makePolicy((Class<? extends Policy>) fsConfig.getSourceTaskConfig()
                    .getClass(FsSourceTaskConfig.POLICY_CLASS), cfg));
    assertThrows(ConfigException.class, () -> {
        try {
            ReflectionUtils.makePolicy((Class<? extends Policy>) fsConfig.getSourceTaskConfig()
                    .getClass(FsSourceTaskConfig.POLICY_CLASS), cfg);
        } catch (Exception e) {
            throw e.getCause();
        }
    });
}
 
Example #7
Source File: ConverterMessageBuilder.java    From kafka-connect-mq-sink with Apache License 2.0 6 votes vote down vote up
/**
 * Configure this class.
 * 
 * @param props initial configuration
 *
 * @throws ConnectException   Operation failed and connector should stop.
 */
public void configure(Map<String, String> props) {
    log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), this.getClass().getName(), props);

    super.configure(props);

    String converterClass = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER);

    try {
        Class<? extends Converter> c = Class.forName(converterClass).asSubclass(Converter.class);
        converter = c.newInstance();

        // Make a copy of the configuration to filter out only those that begin "mq.message.builder.value.converter."
        // since those are used to configure the converter itself
        AbstractConfig ac = new AbstractConfig(new ConfigDef(), props, false);

        // Configure the Converter to convert the value, not the key (isKey == false)
        converter.configure(ac.originalsWithPrefix(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER + "."), false);
    }
    catch (ClassNotFoundException | IllegalAccessException | InstantiationException | NullPointerException exc) {
        log.error("Could not instantiate converter for message builder {}", converterClass);
        throw new ConnectException("Could not instantiate converter for message builder", exc);
    }

    log.trace("[{}]  Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName());
}
 
Example #8
Source File: JMSWriter.java    From kafka-connect-mq-sink with Apache License 2.0 6 votes vote down vote up
/**
 * Sends a message to MQ. Adds the message to the current transaction. Reconnects to MQ if required.
 * 
 * @param r                  The message and schema to send
 *
 * @throws RetriableException Operation failed, but connector should continue to retry.
 * @throws ConnectException   Operation failed and connector should stop.
 */
public void send(SinkRecord r) throws ConnectException, RetriableException {
    log.trace("[{}] Entry {}.send", Thread.currentThread().getId(), this.getClass().getName());

    connectInternal();

    try {
        Message m = builder.fromSinkRecord(jmsCtxt, r);
        inflight = true;
        jmsProd.send(queue, m);
    }
    catch (JMSRuntimeException jmse) {
        log.error("JMS exception {}", jmse);
        throw handleException(jmse);
    }

    log.trace("[{}]  Exit {}.send", Thread.currentThread().getId(), this.getClass().getName());
}
 
Example #9
Source File: DataWriter.java    From streamx with Apache License 2.0 6 votes vote down vote up
public void close(Collection<TopicPartition> partitions) {
  // Close any writers we have. We may get assigned the same partitions and end up duplicating
  // some effort since we'll have to reprocess those messages. It may be possible to hold on to
  // the TopicPartitionWriter and continue to use the temp file, but this can get significantly
  // more complex due to potential failures and network partitions. For example, we may get
  // this close, then miss a few generations of group membership, during which
  // data may have continued to be processed and we'd have to restart from the recovery stage,
  // make sure we apply the WAL, and only reuse the temp file if the starting offset is still
  // valid. For now, we prefer the simpler solution that may result in a bit of wasted effort.
  for (TopicPartition tp: assignment) {
    try {
      topicPartitionWriters.get(tp).close();
    } catch (ConnectException e) {
      log.error("Error closing writer for {}. Error: {}", tp, e.getMessage());
    } finally {
      topicPartitionWriters.remove(tp);
    }
  }
}
 
Example #10
Source File: AbstractPolicy.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@Override
public FileReader offer(FileMetadata metadata, OffsetStorageReader offsetStorageReader) {
    FileSystem current = fileSystems.stream()
            .filter(fs -> metadata.getPath().startsWith(fs.getWorkingDirectory().toString()))
            .findFirst()
            .orElse(null);
    try {
        FileReader reader = ReflectionUtils.makeReader(
                (Class<? extends FileReader>) conf.getClass(FsSourceTaskConfig.FILE_READER_CLASS),
                current, new Path(metadata.getPath()), conf.originals());
        Map<String, Object> partition = Collections.singletonMap("path", metadata.getPath());
        Map<String, Object> offset = offsetStorageReader.offset(partition);
        if (offset != null && offset.get("offset") != null) {
            log.info("Seeking to offset [{}] for file [{}].", offset.get("offset"), metadata.getPath());
            reader.seek((Long) offset.get("offset"));
        }
        return reader;
    } catch (Exception e) {
        throw new ConnectException("An error has occurred when creating reader for file: " + metadata.getPath(), e);
    }
}
 
Example #11
Source File: BulkProcessor.java    From jkes with Apache License 2.0 6 votes vote down vote up
/**
 * Request a flush and block upto {@code timeoutMs} until all pending records have been flushed.
 *
 * If any task has failed prior to or during the flush, {@link ConnectException} will be thrown with that error.
 */
public void flush(long timeoutMs) {
  log.trace("flush {}", timeoutMs);
  final long flushStartTimeMs = time.milliseconds();
  try {
    flushRequested = true;
    synchronized (this) {
      notifyAll();
      for (long elapsedMs = time.milliseconds() - flushStartTimeMs;
           !isTerminal() && elapsedMs < timeoutMs && bufferedRecords() > 0;
           elapsedMs = time.milliseconds() - flushStartTimeMs) {
        wait(timeoutMs - elapsedMs);
      }
      throwIfTerminal();
      if (bufferedRecords() > 0) {
        throw new ConnectException("Flush timeout expired with unflushed records: " + bufferedRecords());
      }
    }
  } catch (InterruptedException e) {
    throw new ConnectException(e);
  } finally {
    flushRequested = false;
  }
}
 
Example #12
Source File: CronPolicyTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void invalidCronExpression(PolicyFsTestConfig fsConfig) {
    Map<String, String> originals = fsConfig.getSourceTaskConfig().originalsStrings();
    originals.put(CronPolicy.CRON_POLICY_EXPRESSION, "invalid");
    FsSourceTaskConfig cfg = new FsSourceTaskConfig(originals);
    assertThrows(ConnectException.class, () ->
            ReflectionUtils.makePolicy((Class<? extends Policy>) fsConfig.getSourceTaskConfig()
                    .getClass(FsSourceTaskConfig.POLICY_CLASS), cfg));
    assertThrows(ConfigException.class, () -> {
        try {
            ReflectionUtils.makePolicy((Class<? extends Policy>) fsConfig.getSourceTaskConfig()
                    .getClass(FsSourceTaskConfig.POLICY_CLASS), cfg);
        } catch (Exception e) {
            throw e.getCause();
        }
    });
}
 
Example #13
Source File: ElasticSourceTask.java    From kafka-connect-elasticsearch-source with Apache License 2.0 6 votes vote down vote up
public void setupTest(List<String> index) {

        final String esHost = "localhost";
        final int esPort = 9200;

        maxConnectionAttempts = 3;
        connectionRetryBackoff = 1000;
        es = new ElasticConnection(
                esHost,
                esPort,
                maxConnectionAttempts,
                connectionRetryBackoff
        );

        indices = index;
        if (indices.isEmpty()) {
            throw new ConnectException("Invalid configuration: each ElasticSourceTask must have at "
                    + "least one index assigned to it");
        }

        topic = "connect_";
        incrementingField = "@timestamp";
        size = 10000;
        pollingMs = 1000;
    }
 
Example #14
Source File: JenkinsSourceConnector.java    From kafka-connect-jenkins with Apache License 2.0 6 votes vote down vote up
/**
   * Start this Connector. This method will only be called on a clean Connector, i.e. it has either just been
   * instantiated and initialized or stop() has been invoked.
   *
   * @param props
   */
  @Override
  public void start(Map<String, String> props) {
      logger.info("JenkinsSourceConnector starting");
      jenkinsCfg = new JenkinsSourceConfig(props);

      //Do a test connection to Fail Fast
      try {
          logger.trace("Doing a test connection to {}", jenkinsCfg.getJobsResource());
   if (jenkinsCfg.isProtected()) {
client = new JenkinsClient(jenkinsCfg.getJobsResource(), jenkinsCfg.getUsername(), jenkinsCfg.getPasswordOrApiToken(), jenkinsCfg.getJenkinsConnTimeout(), jenkinsCfg.getJenkinsReadTimeout());
   }
   else {
client = new JenkinsClient(jenkinsCfg.getJobsResource(), jenkinsCfg.getJenkinsConnTimeout(), jenkinsCfg.getJenkinsReadTimeout());
   }
          HttpURLConnection connection = client.connect();
          connection.disconnect();
      } catch (JenkinsException e) {
          throw new ConnectException("Unable to open connection to " + jenkinsCfg.getJenkinsUrl(), e);
      }
  }
 
Example #15
Source File: TextFileReaderTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void invalidFileEncoding(ReaderFsTestConfig fsConfig) {
    Map<String, Object> readerConfig = getReaderConfig();
    readerConfig.put(TextFileReader.FILE_READER_TEXT_FIELD_NAME_VALUE, FIELD_NAME_VALUE);
    readerConfig.put(TextFileReader.FILE_READER_TEXT_ENCODING, "invalid_charset");
    readerConfig.put(TextFileReader.FILE_READER_TEXT_COMPRESSION_TYPE, COMPRESSION_TYPE_DEFAULT);
    assertThrows(ConnectException.class, () -> getReader(fsConfig.getFs(), fsConfig.getDataFile(), readerConfig));
    assertThrows(UnsupportedCharsetException.class, () -> {
        try {
            getReader(fsConfig.getFs(), fsConfig.getDataFile(), readerConfig);
        } catch (Exception e) {
            throw e.getCause();
        }
    });
}
 
Example #16
Source File: InvocationClientConfig.java    From kafka-connect-lambda with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
AWSCredentialsProvider loadAwsCredentialsProvider() {
    try {
        AWSCredentialsProvider credentialsProvider = ((Class<? extends AWSCredentialsProvider>)
            getClass(CREDENTIALS_PROVIDER_CLASS_KEY)).getDeclaredConstructor().newInstance();

        if (credentialsProvider instanceof Configurable) {
            Map<String, Object> configs = originalsWithPrefix(
                CREDENTIALS_PROVIDER_CONFIG_PREFIX);

            ((Configurable)credentialsProvider).configure(configs);
        }

        return credentialsProvider;

    } catch (IllegalAccessException | InstantiationException | InvocationTargetException | NoSuchMethodException e) {
        throw new ConnectException("Unable to create " + CREDENTIALS_PROVIDER_CLASS_KEY, e);
    }
}
 
Example #17
Source File: DynamoDbSinkTask.java    From kafka-connect-dynamodb with Apache License 2.0 6 votes vote down vote up
private void insert(ValueSource valueSource, Schema schema, Object value, PutRequest put) {
    final AttributeValue attributeValue;
    try {
        attributeValue = schema == null
                ? AttributeValueConverter.toAttributeValueSchemaless(value)
                : AttributeValueConverter.toAttributeValue(schema, value);
    } catch (DataException e) {
        log.error("Failed to convert record with schema={} value={}", schema, value, e);
        throw e;
    }

    final String topAttributeName = valueSource.topAttributeName(config);
    if (!topAttributeName.isEmpty()) {
        put.addItemEntry(topAttributeName, attributeValue);
    } else if (attributeValue.getM() != null) {
        put.setItem(attributeValue.getM());
    } else {
        throw new ConnectException("No top attribute name configured for " + valueSource + ", and it could not be converted to Map: " + attributeValue);
    }
}
 
Example #18
Source File: UnivocityFileReaderTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void readAllDataWithMappingErrors(ReaderFsTestConfig fsConfig) throws IOException {
    Path file = createDataFile(fsConfig, true);
    Map<String, Object> readerConfig = getReaderConfig();
    readerConfig.put(T.FILE_READER_DELIMITED_SETTINGS_SCHEMA, "boolean,boolean,boolean,boolean,boolean,boolean,int,long,double");
    FileReader reader = getReader(fsConfig.getFs(), file, readerConfig);

    assertTrue(reader.hasNext());

    int recordCount = 0;
    while (reader.hasNext()) {
        try {
            reader.next();
        } catch (Exception e) {
            assertEquals(ConnectException.class, e.getClass());
            assertEquals(DataProcessingException.class, e.getCause().getClass());
        }
        recordCount++;
    }
    assertEquals(NUM_RECORDS, recordCount, "The number of records in the file does not match");
}
 
Example #19
Source File: MongoWrapper.java    From MongoDb-Sink-Connector with Apache License 2.0 6 votes vote down vote up
private MongoClient createClient(AbstractConfig config, MongoClientOptions options) {
    String host = config.getString(MONGO_HOST);
    int port = config.getInt(MONGO_PORT);

    try {
        MongoClientOptions actualOptions;
        if (options != null) {
            actualOptions = options;
        } else {
            actualOptions = new MongoClientOptions.Builder().build();
        }
        ServerAddress server = new ServerAddress(host, port);
        if (credentials != null) {
            return new MongoClient(server, credentials, actualOptions);
        } else {
            return new MongoClient(server, actualOptions);
        }
    } catch (MongoException ex) {
        log.error("Failed to create MongoDB client to {}:{} with credentials {}", host, port,
                credentials, ex);
        throw new ConnectException("MongoDb client cannot be created.", ex);
    }
}
 
Example #20
Source File: ParquetFileReaderTest.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
@ParameterizedTest
@MethodSource("fileSystemConfigProvider")
public void readerWithInvalidProjection(ReaderFsTestConfig fsConfig) throws IOException {
    Schema testSchema = SchemaBuilder.record("test_projection").namespace("test.avro")
            .fields()
            .name("field1").type("string").noDefault()
            .endRecord();
    Map<String, Object> readerConfig = getReaderConfig();
    readerConfig.put(ParquetFileReader.FILE_READER_PARQUET_PROJECTION, testSchema.toString());
    readerConfig.put(AgnosticFileReader.FILE_READER_AGNOSTIC_EXTENSIONS_PARQUET, getFileExtension());
    FileSystem testFs = FileSystem.newInstance(fsConfig.getFsUri(), new Configuration());
    fsConfig.setReader(getReader(testFs, fsConfig.getDataFile(), readerConfig));
    try {
        readAllData(fsConfig);
    } catch (Exception e) {
        assertEquals(ConnectException.class, e.getClass());
        assertEquals(InvalidRecordException.class, e.getCause().getClass());
    }
}
 
Example #21
Source File: TigerGraphSinkConfig.java    From ecosys with Apache License 2.0 6 votes vote down vote up
private String getParamStr(List<BasicNameValuePair> params) {
    StringBuilder result = new StringBuilder();
    boolean first = true;

    for (BasicNameValuePair pair : params)
    {
        if (first)
            first = false;
        else
            result.append("&");
        try {
            result.append(URLEncoder.encode(pair.getName(), "UTF-8"));
            result.append("=");
            result.append(URLEncoder.encode(pair.getValue(), "UTF-8"));
        } catch (IOException ex) {
            throw new ConnectException("Encode Exception: " + ex.getMessage());
        }
    }

    return result.toString();
}
 
Example #22
Source File: TopicPartitionWriter.java    From streamx with Apache License 2.0 6 votes vote down vote up
private void resetOffsets() throws ConnectException {
  if (!recovered) {
    if(wal instanceof DBWAL)
      readOffsetFromWAL();
    else
      readOffset();
    // Note that we must *always* request that we seek to an offset here. Currently the framework will still commit
    // Kafka offsets even though we track our own (see KAFKA-3462), which can result in accidentally using that offset
    // if one was committed but no files were rolled to their final location in HDFS (i.e. some data was accepted,
    // written to a tempfile, but then that tempfile was discarded). To protect against this, even if we just want
    // to start at offset 0 or reset to the earliest offset, we specify that explicitly to forcibly override any
    // committed offsets.
    long seekOffset = offset > 0 ? offset : 0;
    log.debug("Resetting offset for {} to {}", tp, seekOffset);
    context.offset(tp, seekOffset);
    recovered = true;
  }
}
 
Example #23
Source File: Compatibility.java    From apicurio-registry with Apache License 2.0 6 votes vote down vote up
static DistributedHerder createDistributedHerder(DistributedConfig config,
                                                 Time time,
                                                 Worker worker,
                                                 String kafkaClusterId,
                                                 StatusBackingStore statusBackingStore,
                                                 ConfigBackingStore configBackingStore,
                                                 String restUrl,
                                                 Object connectorClientConfigOverridePolicy) throws ConnectException {

    if (CTR_DISTRIBUTED_HERDER_22 == null) {
        return new DistributedHerder(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, restUrl, (ConnectorClientConfigOverridePolicy)connectorClientConfigOverridePolicy);
    }
    try {
        return (DistributedHerder)CTR_DISTRIBUTED_HERDER_22.newInstance(config, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, restUrl);
    } catch (Throwable t) {
        throw new ConnectException(t);
    }
}
 
Example #24
Source File: ReflectionUtils.java    From kafka-connect-fs with Apache License 2.0 5 votes vote down vote up
private static <T> T make(Class<T> clazz, Object... args) {
    try {
        Class[] constClasses = Arrays.stream(args).map(Object::getClass).toArray(Class[]::new);

        Constructor<T> constructor = ConstructorUtils.getMatchingAccessibleConstructor(clazz, constClasses);
        return constructor.newInstance(args);
    } catch (IllegalAccessException |
            InstantiationException |
            InvocationTargetException e) {
        throw new ConnectException(e.getCause());
    }
}
 
Example #25
Source File: AgnosticFileReader.java    From kafka-connect-fs with Apache License 2.0 5 votes vote down vote up
public AgnosticFileReader(FileSystem fs, Path filePath, Map<String, Object> config) throws Exception {
    super(fs, filePath, new AgnosticAdapter(), config);

    try {
        reader = readerByExtension(fs, filePath, config);
    } catch (ConnectException ce) {
        throw (Exception) ce.getCause();
    }
}
 
Example #26
Source File: FsSourceTask.java    From kafka-connect-fs with Apache License 2.0 5 votes vote down vote up
private Stream<FileMetadata> filesToProcess() {
    try {
        return asStream(policy.execute())
                .filter(metadata -> metadata.getLen() > 0);
    } catch (IOException | ConnectException e) {
        //when an exception happens executing the policy, the connector continues
        log.error("Cannot retrieve files to process from the FS: {}. " +
                        "There was an error executing the policy but the task tolerates this and continues.",
                policy.getURIs(), e);
        return Stream.empty();
    }
}
 
Example #27
Source File: RestSourceTask.java    From kafka-connect-rest with Apache License 2.0 5 votes vote down vote up
@Override
public void start(Map<String, String> properties) {
  log.info("Starting REST source task");
  try {
    connectorConfig = new RestSourceConnectorConfig(properties);
  } catch (ConfigException ex) {
    throw new ConnectException("Couldn't start RestSourceTask due to configuration error", ex);
  }

  taskName = properties.getOrDefault("name", "unknown");
  ctx = ExecutionContext.create(taskName);

  pollInterval = connectorConfig.getPollInterval();
  String url = connectorConfig.getUrl();
  requestFactory = new Request.RequestFactory(url, connectorConfig.getMethod());
  payloadGenerator = connectorConfig.getPayloadGenerator();
  responseHandler = connectorConfig.getResponseHandler();
  executor = connectorConfig.getRequestExecutor();
  topicSelector = connectorConfig.getTopicSelector();

  sourcePartition = Collections.singletonMap("URL", url);
  Map<String, Object> offsets = context.offsetStorageReader().offset(sourcePartition);
  if(offsets != null) {
    log.info("Loaded Offsets: " + Arrays.toString(offsets.entrySet().toArray()));
    payloadGenerator.setOffsets(offsets);
  }
}
 
Example #28
Source File: IgniteSinkTask.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes grid client from configPath.
 *
 * @param props Task properties.
 */
@Override public void start(Map<String, String> props) {
    // Each task has the same parameters -- avoid setting more than once.
    if (cacheName != null)
        return;

    cacheName = props.get(IgniteSinkConstants.CACHE_NAME);
    igniteConfigFile = props.get(IgniteSinkConstants.CACHE_CFG_PATH);

    if (props.containsKey(IgniteSinkConstants.CACHE_ALLOW_OVERWRITE))
        StreamerContext.getStreamer().allowOverwrite(
            Boolean.parseBoolean(props.get(IgniteSinkConstants.CACHE_ALLOW_OVERWRITE)));

    if (props.containsKey(IgniteSinkConstants.CACHE_PER_NODE_DATA_SIZE))
        StreamerContext.getStreamer().perNodeBufferSize(
            Integer.parseInt(props.get(IgniteSinkConstants.CACHE_PER_NODE_DATA_SIZE)));

    if (props.containsKey(IgniteSinkConstants.CACHE_PER_NODE_PAR_OPS))
        StreamerContext.getStreamer().perNodeParallelOperations(
            Integer.parseInt(props.get(IgniteSinkConstants.CACHE_PER_NODE_PAR_OPS)));

    if (props.containsKey(IgniteSinkConstants.SINGLE_TUPLE_EXTRACTOR_CLASS)) {
        String transformerCls = props.get(IgniteSinkConstants.SINGLE_TUPLE_EXTRACTOR_CLASS);
        if (transformerCls != null && !transformerCls.isEmpty()) {
            try {
                Class<? extends StreamSingleTupleExtractor> clazz =
                    (Class<? extends StreamSingleTupleExtractor<SinkRecord, Object, Object>>)
                        Class.forName(transformerCls);

                extractor = clazz.newInstance();
            }
            catch (Exception e) {
                throw new ConnectException("Failed to instantiate the provided transformer!", e);
            }
        }
    }

    stopped = false;
}
 
Example #29
Source File: HttpSinkTask.java    From kafka-connect-http with Apache License 2.0 5 votes vote down vote up
@Override
public void put(Collection<SinkRecord> records) {
  if (records.isEmpty()) {
    return;
  }
  final SinkRecord first = records.iterator().next();
  final int recordsCount = records.size();
  log.trace(
      "Received {} records. First record kafka coordinates:({}-{}-{}). Writing them to the "
      + "API...",
      recordsCount, first.topic(), first.kafkaPartition(), first.kafkaOffset()
  );
  try {
    writer.write(records);
  } catch (Exception e) {
    log.warn(
        "Write of {} records failed, remainingRetries={}",
        records.size(),
        remainingRetries,
        e
    );
    if (remainingRetries == 0) {
      throw new ConnectException(e);
    } else {
      initWriter();
      remainingRetries--;
      context.timeout(config.retryBackoffMs);
      throw new RetriableException(e);
    }
  }
  remainingRetries = config.maxRetries;
}
 
Example #30
Source File: FileStreamSourceTaskTest.java    From kafka-connector-skeleton with Apache License 2.0 5 votes vote down vote up
@Test(expected = ConnectException.class)
public void testMissingTopic() throws InterruptedException {
    replay();

    config.remove(FileStreamSourceConnector.TOPIC_CONFIG);
    task.start(config);
}