org.apache.kafka.connect.errors.RetriableException Java Examples

The following examples show how to use org.apache.kafka.connect.errors.RetriableException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SplunkHttpSinkTaskTest.java    From kafka-connect-splunk with Apache License 2.0 6 votes vote down vote up
@Test
public void connectionRefused() throws IOException {
  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com"));
  SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com", "time", new Date(1472256858924L), "source", "testapp"));
  SinkRecordContentTest.addRecord(sinkRecords, ImmutableMap.of("host", "hostname.example.com", "time", new Date(1472256858924L), "source", "testapp", "sourcetype", "txt", "index", "main"));

  final LowLevelHttpRequest httpRequest = mock(LowLevelHttpRequest.class, CALLS_REAL_METHODS);
  when(httpRequest.execute()).thenThrow(ConnectException.class);
  this.task.transport = new MockHttpTransport() {
    @Override
    public LowLevelHttpRequest buildRequest(String method, String url) throws IOException {
      return httpRequest;
    }
  };

  this.task.httpRequestFactory = this.task.transport.createRequestFactory(this.task.httpRequestInitializer);
  assertThrows(RetriableException.class, () -> this.task.put(sinkRecords));
}
 
Example #2
Source File: RedisSinkTaskReconnectIT.java    From kafka-connect-redis with Apache License 2.0 6 votes vote down vote up
@Test
public void serverReset(
    @DockerContainer(container = "redis") Container container,
    @Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException, IOException {
  log.info("address = {}", address);
  final String topic = "putWrite";
  SinkTaskContext context = mock(SinkTaskContext.class);
  when(context.assignment()).thenReturn(ImmutableSet.of());
  this.task.initialize(context);
  this.task.start(
      ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort()))
  );

  sendAndVerifyRecords(task, topic, 0);
  container.stop();

  assertThrows(RetriableException.class, () -> {
    sendAndVerifyRecords(task, topic, 100);
  });
  container.start();
  sendAndVerifyRecords(task, topic, 100);
}
 
Example #3
Source File: OffsetSink.java    From kafka-backup with Apache License 2.0 6 votes vote down vote up
private void syncOffsetsForGroup(String consumerGroup) throws IOException {
    Map<TopicPartition, OffsetAndMetadata> topicOffsetsAndMetadata;
    try {
        topicOffsetsAndMetadata = adminClient.listConsumerGroupOffsets(consumerGroup).partitionsToOffsetAndMetadata().get();
    } catch (InterruptedException | ExecutionException e) {
        throw new RetriableException(e);
    }
    for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : topicOffsetsAndMetadata.entrySet()) {
        TopicPartition tp = entry.getKey();
        OffsetAndMetadata offsetAndMetadata = entry.getValue();

        if (validTopic(tp.topic())) {
            if (!this.topicOffsets.containsKey(tp)) {
                this.topicOffsets.put(tp, new OffsetStoreFile(targetDir, tp));
            }
            OffsetStoreFile offsets = this.topicOffsets.get(tp);
            offsets.put(consumerGroup, offsetAndMetadata.offset());
        }
    }
}
 
Example #4
Source File: JMSWriter.java    From kafka-connect-mq-sink with Apache License 2.0 6 votes vote down vote up
/**
 * Commits the current transaction.
 *
 * @throws RetriableException Operation failed, but connector should continue to retry.
 * @throws ConnectException   Operation failed and connector should stop.
 */
public void commit() throws ConnectException, RetriableException {
    log.trace("[{}] Entry {}.commit", Thread.currentThread().getId(), this.getClass().getName());

    connectInternal();
    try {
        if (inflight) {
            inflight = false;
        }

        jmsCtxt.commit();
    }
    catch (JMSRuntimeException jmse) {
        log.error("JMS exception {}", jmse);
        throw handleException(jmse);
    }

    log.trace("[{}]  Exit {}.commit", Thread.currentThread().getId(), this.getClass().getName());
}
 
Example #5
Source File: JMSWriter.java    From kafka-connect-mq-sink with Apache License 2.0 6 votes vote down vote up
/**
 * Sends a message to MQ. Adds the message to the current transaction. Reconnects to MQ if required.
 * 
 * @param r                  The message and schema to send
 *
 * @throws RetriableException Operation failed, but connector should continue to retry.
 * @throws ConnectException   Operation failed and connector should stop.
 */
public void send(SinkRecord r) throws ConnectException, RetriableException {
    log.trace("[{}] Entry {}.send", Thread.currentThread().getId(), this.getClass().getName());

    connectInternal();

    try {
        Message m = builder.fromSinkRecord(jmsCtxt, r);
        inflight = true;
        jmsProd.send(queue, m);
    }
    catch (JMSRuntimeException jmse) {
        log.error("JMS exception {}", jmse);
        throw handleException(jmse);
    }

    log.trace("[{}]  Exit {}.send", Thread.currentThread().getId(), this.getClass().getName());
}
 
Example #6
Source File: RestSinkTaskTest.java    From kafka-connect-rest with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldRetryOnErrorMaxRetriesTimes() throws Exception {
  when(executor.execute(any()))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenReturn(mock(Response.class));

  subject.setMaxRetries(3);
  subject.put(Arrays.asList(sinkRecord));

  verify(executor, times(4)).execute(any());
}
 
Example #7
Source File: KafkaSinkTask.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Override
public void put(Collection<SinkRecord> collection) {
    // Any retriable exception thrown here will be attempted again and not cause the task to pause
    for(SinkRecord sinkRecord : collection) {
        if (sinkRecord.keySchema() != Schema.OPTIONAL_BYTES_SCHEMA || sinkRecord.valueSchema() != Schema.OPTIONAL_BYTES_SCHEMA)
            throw new IllegalStateException("Expected sink record key/value to be optional bytes, but saw instead key: "
                    + sinkRecord.keySchema() + " value: " + sinkRecord.valueSchema() + ". Must use converter: " +
                    "org.apache.kafka.connect.converters.ByteArrayConverter");

        LOGGER.debug("Sending record {}", sinkRecord);

        try {
            producer.send(new ProducerRecord<>(sinkRecord.topic(), sinkRecord.kafkaPartition(), (byte[]) sinkRecord.key(),
                    (byte[]) sinkRecord.value()));
        } catch (KafkaException e) {
            // If send throws an exception ensure we always retry the record/collection
            throw new RetriableException(e);
        }
    }
}
 
Example #8
Source File: MongoSinkTask.java    From mongo-kafka with Apache License 2.0 5 votes vote down vote up
private void checkRetriableException(final MongoSinkTopicConfig config, final MongoException e) {
  if (getRemainingRetriesForTopic(config.getTopic()).decrementAndGet() <= 0) {
    throw new DataException("Failed to write mongodb documents despite retrying", e);
  }
  Integer deferRetryMs = config.getInt(RETRIES_DEFER_TIMEOUT_CONFIG);
  LOGGER.debug("Deferring retry operation for {}ms", deferRetryMs);
  context.timeout(deferRetryMs);
  throw new RetriableException(e.getMessage(), e);
}
 
Example #9
Source File: SinkOperation.java    From kafka-connect-redis with Apache License 2.0 5 votes vote down vote up
protected void wait(RedisFuture<?> future) throws InterruptedException {
  log.debug("wait() - future = {}", future);
  if (!future.await(this.config.operationTimeoutMs, TimeUnit.MILLISECONDS)) {
    future.cancel(true);
    throw new RetriableException(
        String.format("Timeout after %s ms while waiting for operation to complete.", this.config.operationTimeoutMs)
    );
  }
}
 
Example #10
Source File: MongoDbSinkTask.java    From kafka-connect-mongodb with Apache License 2.0 5 votes vote down vote up
private void processSinkRecords(MongoCollection<BsonDocument> collection, List<SinkRecord> batch) {
    String collectionName = collection.getNamespace().getCollectionName();
    List<? extends WriteModel<BsonDocument>> docsToWrite =
            sinkConfig.isUsingCdcHandler(collectionName)
                    ? buildWriteModelCDC(batch,collectionName)
                    : buildWriteModel(batch,collectionName);
    try {
        if (!docsToWrite.isEmpty()) {
            LOGGER.debug("bulk writing {} document(s) into collection [{}]",
                    docsToWrite.size(), collection.getNamespace().getFullName());
            BulkWriteResult result = collection.bulkWrite(
                    docsToWrite, BULK_WRITE_OPTIONS);
            LOGGER.debug("mongodb bulk write result: " + result.toString());
        }
    } catch (MongoException mexc) {
        if (mexc instanceof BulkWriteException) {
            BulkWriteException bwe = (BulkWriteException) mexc;
            LOGGER.error("mongodb bulk write (partially) failed", bwe);
            LOGGER.error(bwe.getWriteResult().toString());
            LOGGER.error(bwe.getWriteErrors().toString());
            LOGGER.error(bwe.getWriteConcernError().toString());
        } else {
            LOGGER.error("error on mongodb operation", mexc);
            LOGGER.error("writing {} document(s) into collection [{}] failed -> remaining retries ({})",
                    docsToWrite.size(), collection.getNamespace().getFullName() ,remainingRetries);
        }
        if (remainingRetries-- <= 0) {
            throw new ConnectException("failed to write mongodb documents"
                    + " despite retrying -> GIVING UP! :( :( :(", mexc);
        }
        LOGGER.debug("deferring retry operation for {}ms", deferRetryMs);
        context.timeout(deferRetryMs);
        throw new RetriableException(mexc.getMessage(), mexc);
    }
}
 
Example #11
Source File: HttpSinkTask.java    From kafka-connect-http with Apache License 2.0 5 votes vote down vote up
@Override
public void put(Collection<SinkRecord> records) {
  if (records.isEmpty()) {
    return;
  }
  final SinkRecord first = records.iterator().next();
  final int recordsCount = records.size();
  log.trace(
      "Received {} records. First record kafka coordinates:({}-{}-{}). Writing them to the "
      + "API...",
      recordsCount, first.topic(), first.kafkaPartition(), first.kafkaOffset()
  );
  try {
    writer.write(records);
  } catch (Exception e) {
    log.warn(
        "Write of {} records failed, remainingRetries={}",
        records.size(),
        remainingRetries,
        e
    );
    if (remainingRetries == 0) {
      throw new ConnectException(e);
    } else {
      initWriter();
      remainingRetries--;
      context.timeout(config.retryBackoffMs);
      throw new RetriableException(e);
    }
  }
  remainingRetries = config.maxRetries;
}
 
Example #12
Source File: KafkaSinkTaskTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test (expected = RetriableException.class)
public void flush_producerThrowsException() {
    doThrow(new KafkaException()).when(kafkaProducer).flush();

    task.put(Collections.singletonList(sinkRecord));

    // Doesn't matter what the offset map is just need to call flush
    task.flush(Collections.emptyMap());
}
 
Example #13
Source File: KafkaSinkTask.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Override
public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
    LOGGER.debug("Flushing kafka sink");

    try {
        producer.flush();
    } catch (IOException e) {
        LOGGER.debug("IOException on flush, re-throwing as retriable", e);
        // Re-throw exception as connect retriable since we just want connect to keep retrying forever
        throw new RetriableException(e);
    }

    super.flush(offsets);
}
 
Example #14
Source File: RestSinkTaskTest.java    From kafka-connect-rest with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldRetryInfinitelyWhenMaxRetriesIsNegative() throws Exception {
  when(executor.execute(any()))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenThrow(new RetriableException("Test"))
    .thenReturn(mock(Response.class));

  subject.setMaxRetries(-1);
  subject.put(Arrays.asList(sinkRecord));

  verify(executor, times(6)).execute(any());
}
 
Example #15
Source File: OkHttpRequestExecutor.java    From kafka-connect-rest with Apache License 2.0 5 votes vote down vote up
@Override
public com.tm.kafka.connect.rest.http.Response execute(com.tm.kafka.connect.rest.http.Request request) throws IOException {
  okhttp3.Request.Builder builder = new okhttp3.Request.Builder()
    .url(createUrl(request.getUrl(), request.getParameters()))
    .headers(Headers.of(headersToArray(request.getHeaders())));

  if ("GET".equalsIgnoreCase(request.getMethod())) {
    builder.get();
  } else {
    builder.method(request.getMethod(), RequestBody.create(
      MediaType.parse(request.getHeaders().getOrDefault("Content-Type", "")),
      request.getBody())
    );
  }

  okhttp3.Request okRequest = builder.build();
  log.trace("Making request to: " + request);

  try (okhttp3.Response okResponse = client.newCall(okRequest).execute()) {

    return new com.tm.kafka.connect.rest.http.Response(
      okResponse.code(),
      okResponse.headers().toMultimap(),
      okResponse.body() != null ? okResponse.body().string() : null
    );
  } catch (IOException e) {
    throw new RetriableException(e.getMessage(), e);
  }
}
 
Example #16
Source File: SinkRecordToPayloadConverter.java    From kafka-connect-aws-lambda with Apache License 2.0 5 votes vote down vote up
default String apply(final SinkRecord record) {
  try {
    return convert(record);
  } catch (final Exception e) {
    throw new RetriableException("Payload converter " + getClass().getName() + " failed to convert '" + record.toString(), e);
  }
}
 
Example #17
Source File: OffsetSink.java    From kafka-backup with Apache License 2.0 5 votes vote down vote up
public void syncConsumerGroups() {
    try {
        consumerGroups = adminClient.listConsumerGroups().all().get().stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList());
    } catch (InterruptedException | ExecutionException e) {
        throw new RetriableException(e);
    }
}
 
Example #18
Source File: KafkaSinkTaskTest.java    From common-kafka with Apache License 2.0 4 votes vote down vote up
@Test (expected = RetriableException.class)
public void put_producerThrowsException() {
    when(kafkaProducer.send(anyObject())).thenThrow(new KafkaException());
    task.put(Collections.singletonList(sinkRecord));
}
 
Example #19
Source File: DefaultResponseHandler.java    From kafka-connect-rest with Apache License 2.0 4 votes vote down vote up
private void checkCodeIsForbidden(String code) {
  Matcher forbidden = forbiddenCodes.matcher(code);
  if (forbidden.find()) {
    throw new RetriableException("HTTP Response code is in blacklist " + code);
  }
}
 
Example #20
Source File: JMSWriter.java    From kafka-connect-mq-sink with Apache License 2.0 4 votes vote down vote up
/**
 * Internal method to connect to MQ.
 *
 * @throws RetriableException Operation failed, but connector should continue to retry.
 * @throws ConnectException   Operation failed and connector should stop.
 */
private void connectInternal() throws ConnectException, RetriableException {
    log.trace("[{}] Entry {}.connectInternal", Thread.currentThread().getId(), this.getClass().getName());

    if (connected) {
        return;
    }

    try {
        if (userName != null) {
            jmsCtxt = mqConnFactory.createContext(userName, password, JMSContext.SESSION_TRANSACTED);
        }
        else {
            jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED);
        }

        jmsProd = jmsCtxt.createProducer();
        jmsProd.setDeliveryMode(deliveryMode);
        jmsProd.setTimeToLive(timeToLive);
        reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN;
        connected = true;
    }
    catch (JMSRuntimeException jmse) {
        // Delay slightly so that repeated reconnect loops don't run too fast
        try {
            Thread.sleep(reconnectDelayMillis);
        }
        catch (InterruptedException ie) {
            ;
        }

        if (reconnectDelayMillis < RECONNECT_DELAY_MILLIS_MAX)
        {
            reconnectDelayMillis = reconnectDelayMillis * 2;
        }

        log.error("JMS exception {}", jmse);
        throw handleException(jmse);
    }

    log.trace("[{}]  Exit {}.connectInternal", Thread.currentThread().getId(), this.getClass().getName());
}
 
Example #21
Source File: JMSWriter.java    From kafka-connect-mq-sink with Apache License 2.0 4 votes vote down vote up
/**
 * Handles exceptions from MQ. Some JMS exceptions are treated as retriable meaning that the
 * connector can keep running and just trying again is likely to fix things.
 */
private ConnectException handleException(Throwable exc) {
    boolean isRetriable = false;
    boolean mustClose = true;
    int reason = -1;

    // Try to extract the MQ reason code to see if it's a retriable exception
    Throwable t = exc.getCause();
    while (t != null) {
        if (t instanceof MQException) {
            MQException mqe = (MQException)t;
            log.error("MQ error: CompCode {}, Reason {}", mqe.getCompCode(), mqe.getReason());
            reason = mqe.getReason();
            break;
        }
        t = t.getCause();
    }

    switch (reason)
    {
        // These reason codes indicate that the connection needs to be closed, but just retrying later
        // will probably recover
        case MQConstants.MQRC_BACKED_OUT:
        case MQConstants.MQRC_CHANNEL_NOT_AVAILABLE:
        case MQConstants.MQRC_CONNECTION_BROKEN:
        case MQConstants.MQRC_HOST_NOT_AVAILABLE:
        case MQConstants.MQRC_NOT_AUTHORIZED:
        case MQConstants.MQRC_Q_MGR_NOT_AVAILABLE:
        case MQConstants.MQRC_Q_MGR_QUIESCING:
        case MQConstants.MQRC_Q_MGR_STOPPING:
        case MQConstants.MQRC_UNEXPECTED_ERROR:
            isRetriable = true;
            break;

        // These reason codes indicates that the connect is still OK, but just retrying later
        // will probably recover - possibly with administrative action on the queue manager
        case MQConstants.MQRC_Q_FULL:
        case MQConstants.MQRC_PUT_INHIBITED:
            isRetriable = true;
            mustClose = false;
            break;
    }

    if (mustClose) {
        close();
    }

    if (isRetriable) {
        return new RetriableException(exc);
    }

    return new ConnectException(exc);
}
 
Example #22
Source File: JMSReader.java    From kafka-connect-mq-source with Apache License 2.0 4 votes vote down vote up
/**
 * Handles exceptions from MQ. Some JMS exceptions are treated as retriable meaning that the
 * connector can keep running and just trying again is likely to fix things.
 */
private ConnectException handleException(Throwable exc) {
    boolean isRetriable = false;
    boolean mustClose = true;
    int reason = -1;

    // Try to extract the MQ reason code to see if it's a retriable exception
    Throwable t = exc.getCause();
    while (t != null) {
        if (t instanceof MQException) {
            MQException mqe = (MQException)t;
            log.error("MQ error: CompCode {}, Reason {} {}", mqe.getCompCode(), mqe.getReason(),
                      MQConstants.lookupReasonCode(mqe.getReason()));
            reason = mqe.getReason();
            break;
        }
        else if (t instanceof JMSException) {
            JMSException jmse = (JMSException)t;
            log.error("JMS exception: error code {}", jmse.getErrorCode());
        }

        t = t.getCause();
    }

    switch (reason)
    {
        // These reason codes indicate that the connection needs to be closed, but just retrying later
        // will probably recover
        case MQConstants.MQRC_BACKED_OUT:
        case MQConstants.MQRC_CHANNEL_NOT_AVAILABLE:
        case MQConstants.MQRC_CONNECTION_BROKEN:
        case MQConstants.MQRC_HOST_NOT_AVAILABLE:
        case MQConstants.MQRC_NOT_AUTHORIZED:
        case MQConstants.MQRC_Q_MGR_NOT_AVAILABLE:
        case MQConstants.MQRC_Q_MGR_QUIESCING:
        case MQConstants.MQRC_Q_MGR_STOPPING:
        case MQConstants.MQRC_UNEXPECTED_ERROR:
            isRetriable = true;
            break;

        // These reason codes indicate that the connection is still OK, but just retrying later
        // will probably recover - possibly with administrative action on the queue manager
        case MQConstants.MQRC_GET_INHIBITED:
            isRetriable = true;
            mustClose = false;
            break;
    }

    if (mustClose) {
        // Delay so that repeated reconnect loops don't run too fast
        try {
            Thread.sleep(RECONNECT_DELAY_MILLIS_MAX);
        }
        catch (InterruptedException ie) {
            ;
        }
        closeInternal();
    }

    if (isRetriable) {
        return new RetriableException(exc);
    }

    return new ConnectException(exc);
}
 
Example #23
Source File: DefaultResponseHandler.java    From kafka-connect-rest with Apache License 2.0 4 votes vote down vote up
private void checkCodeIsAllowed(String code) {
  Matcher allowed = allowedCodes.matcher(code);
  if (!allowed.find()) {
    throw new RetriableException("HTTP Response code is not whitelisted " + code);
  }
}
 
Example #24
Source File: SplunkHttpSinkTask.java    From kafka-connect-splunk with Apache License 2.0 4 votes vote down vote up
@Override
public void put(Collection<SinkRecord> collection) {
  if (collection.isEmpty()) {
    log.trace("No records in collection.");
    return;
  }

  try {
    log.trace("Posting {} message(s) to {}", collection.size(), this.eventCollectorUrl);

    SinkRecordContent sinkRecordContent = new SinkRecordContent(collection);

    if (log.isTraceEnabled()) {
      try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
        sinkRecordContent.writeTo(outputStream);
        outputStream.flush();
        byte[] buffer = outputStream.toByteArray();
        log.trace("Posting\n{}", new String(buffer, "UTF-8"));
      } catch (IOException ex) {
        if (log.isTraceEnabled()) {
          log.trace("exception thrown while previewing post", ex);
        }
      }
    }

    HttpRequest httpRequest = this.httpRequestFactory.buildPostRequest(this.eventCollectorUrl, sinkRecordContent);
    HttpResponse httpResponse = httpRequest.execute();

    if (httpResponse.getStatusCode() == 403) {
      throw new ConnectException("Authentication was not successful. Please check the token with Splunk.");
    }

    if (httpResponse.getStatusCode() == 417) {
      log.warn("This exception happens when too much content is pushed to splunk per call. Look at this blog post " +
          "http://blogs.splunk.com/2016/08/12/handling-http-event-collector-hec-content-length-too-large-errors-without-pulling-your-hair-out/" +
          " Setting consumer.max.poll.records to a lower value will decrease the number of message posted to Splunk " +
          "at once.");
      throw new ConnectException("Status 417: Content-Length of XXXXX too large (maximum is 1000000). Verify Splunk config or " +
          " lower the value in consumer.max.poll.records.");
    }

    if (JSON_MEDIA_TYPE.equalsIgnoreParameters(httpResponse.getMediaType())) {
      SplunkStatusMessage statusMessage = httpResponse.parseAs(SplunkStatusMessage.class);

      if (!statusMessage.isSuccessful()) {
        throw new RetriableException(statusMessage.toString());
      }
    } else {
      throw new RetriableException("Media type of " + Json.MEDIA_TYPE + " was not returned.");
    }
  } catch (IOException e) {
    throw new RetriableException(
        String.format("Exception while posting data to %s.", this.eventCollectorUrl),
        e
    );
  }
}
 
Example #25
Source File: SnowflakeSinkTask.java    From snowflake-kafka-connector with Apache License 2.0 4 votes vote down vote up
/**
 * Sync committed offsets
 *
 * @param offsets - the current map of offsets as of the last call to put
 * @return a map of offsets by topic-partition that are safe to commit
 * @throws RetriableException when meet any issue during processing
 */
@Override
public Map<TopicPartition, OffsetAndMetadata> preCommit(
  Map<TopicPartition, OffsetAndMetadata> offsets)
  throws RetriableException
{
  long startTime = System.currentTimeMillis();
  LOGGER.info(Logging.logMessage("SnowflakeSinkTask[ID:{}]:preCommit", this.id));

  if (sink == null || sink.isClosed())
  {
    LOGGER.warn(Logging.logMessage("SnowflakeSinkTask[ID:{}]: sink " +
      "not initialized or closed before preCommit", this.id));
    return offsets;
  }

  Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
  // it's ok to just log the error since commit can retry
  try
  {
    offsets.forEach(
      (topicPartition, offsetAndMetadata) ->
      {
        long offSet = sink.getOffset(topicPartition);
        if (offSet == 0) {
          committedOffsets.put(topicPartition, offsetAndMetadata);
          //todo: update offset?
        } else {
          committedOffsets.put(topicPartition,
            new OffsetAndMetadata(sink.getOffset(topicPartition)));
        }
      }
    );
  } catch (Exception e)
  {
    LOGGER.error(Logging.logMessage("SnowflakeSinkTask[ID:{}]: Error " +
      "while preCommit: {} ", this.id, e.getMessage()));
    return offsets;
  }
  LOGGER.info(Logging.logMessage("SnowflakeSinkTask[ID:{}]:preCommit. Time: {} seconds", this.id,
    (System.currentTimeMillis() - startTime) / 1000));
  return committedOffsets;
}