Java Code Examples for com.google.api.client.util.BackOff#STOP

The following examples show how to use com.google.api.client.util.BackOff#STOP . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BatchRequestService.java    From connector-sdk with Apache License 2.0 6 votes vote down vote up
private boolean backOff(List<AsyncRequest<?>> pendingTasks) {
  long backOffMillis = getBackOffTime();
  if (backOffMillis == BackOff.STOP) {
    pendingTasks.forEach(
        t ->
            onFailure(
                t, new ExecutionException(new Exception("Maximum back-off cycle reached."))));
    return false;
  }
  try {
    batchRequestHelper.sleep(backOffMillis);
  } catch (InterruptedException e) {
    logger.log(Level.WARNING, "Task interrupted while processing batch requests, stopping.", e);
    pendingTasks.forEach(t -> onFailure(t, e));
    Thread.currentThread().interrupt();
    return false;
  }
  return true;
}
 
Example 2
Source File: PistachiosClient.java    From Pistachio with Apache License 2.0 6 votes vote down vote up
void waitBeforeRetry(Exception me) throws Exception {
    if (me instanceof MasterNotFoundException && !noMasterAutoRetry) {
        failureMeter.mark();
        throw me;
    }

    if (me instanceof ConnectionBrokenException && !connectionBrokenAutoRetry) {
        failureMeter.mark();
        throw me;
    }

    try{
        backOffMillis = backoff.nextBackOffMillis();
        if (backOffMillis == BackOff.STOP) {
            failureMeter.mark();
            throw me;
        }
        logger.debug("no master found, auto retry after sleeping {} ms", backOffMillis);
        Thread.sleep(backOffMillis);
    }catch(Exception e) {
    }
}
 
Example 3
Source File: DeleteVariants.java    From dataflow-java with Apache License 2.0 6 votes vote down vote up
@ProcessElement
public void processElement(DoFn<String, Integer>.ProcessContext context) throws Exception {
  String variantId = context.element();
  // Call the deletion operation via exponential backoff so that "Rate Limit Exceeded"
  // quota issues do not cause the pipeline to fail.
  ExponentialBackOff backoff = new ExponentialBackOff.Builder().build();
  while (true) {
    try {
      genomics.variants().delete(variantId).execute();
      Metrics.counter(DeleteVariantFn.class, "Number of variants deleted").inc();
      context.output(1);
      return;
    } catch (Exception e) {
      if (e.getMessage().startsWith("429 Too Many Requests")) {
        LOG.warn("Backing-off per: ", e);
        long backOffMillis = backoff.nextBackOffMillis();
        if (backOffMillis == BackOff.STOP) {
          throw e;
        }
        Thread.sleep(backOffMillis);
      } else {
        throw e;
      }
    }
  }
}
 
Example 4
Source File: ResilientOperation.java    From hadoop-connectors with Apache License 2.0 6 votes vote down vote up
/**
 * Determines the amount to sleep for and sleeps if needed.
 *
 * @param backoff BackOff to determine how long to sleep for
 * @param sleeper Used to sleep
 * @param currentException exception that caused the retry and sleep. For logging.
 * @throws InterruptedException if sleep is interrupted
 */
private static boolean nextSleep(BackOff backoff, Sleeper sleeper, Exception currentException)
    throws InterruptedException {
  long backOffTime;
  try {
    backOffTime = backoff.nextBackOffMillis();
  } catch (IOException e) {
    throw new RuntimeException("Failed to to get next back off time", e);
  }
  if (backOffTime == BackOff.STOP) {
    return false;
  }
  logger.atInfo().withCause(currentException).log(
      "Transient exception caught. Sleeping for %d, then retrying.", backOffTime);
  sleeper.sleep(backOffTime);
  return true;
}
 
Example 5
Source File: BaseWorkflowSample.java    From googleads-shopping-samples with Apache License 2.0 6 votes vote down vote up
protected <T extends GenericJson> T retryFailures(
    AbstractGoogleClientRequest<T> request, BackOff backOff) throws IOException {
  while (true) {
    try {
      return request.execute();
    } catch (GoogleJsonResponseException e) {
      try {
        long nextPause = backOff.nextBackOffMillis();
        if (nextPause == BackOff.STOP) {
          throw e;
        }
        System.out.printf("Operation failed, retrying in %f seconds.%n", nextPause / 1000.0);
        BackOffUtils.next(Sleeper.DEFAULT, backOff);
      } catch (InterruptedException ie) {
        // Just go straight into retry if interrupted.
      }
    }
  }
}
 
Example 6
Source File: BatchRequestService.java    From connector-sdk with Apache License 2.0 5 votes vote down vote up
private long getBackOffTime() {
  long backOffMillis = BackOff.STOP;
  try {
    backOffMillis = backOff.nextBackOffMillis();
  } catch (IOException e) {
    // Use the default.
  }
  return backOffMillis;
}
 
Example 7
Source File: RetryHttpRequestInitializer.java    From beam with Apache License 2.0 5 votes vote down vote up
/** Returns true iff performing the backoff was successful. */
private boolean backOffWasSuccessful(BackOff backOff) {
  try {
    long backOffTime = backOff.nextBackOffMillis();
    if (backOffTime == BackOff.STOP) {
      return false;
    }
    throttlingSeconds.inc(backOffTime / 1000);
    sleeper.sleep(backOffTime);
    return true;
  } catch (InterruptedException | IOException e) {
    return false;
  }
}
 
Example 8
Source File: RetryBoundedBackOff.java    From hadoop-connectors with Apache License 2.0 5 votes vote down vote up
@Override
public long nextBackOffMillis() throws IOException {
  if (retriesAttempted >= maxRetries) {
    return BackOff.STOP;
  }
  long next = backoff.nextBackOffMillis();
  if (next == BackOff.STOP) {
    return BackOff.STOP;
  }
  retriesAttempted++;
  return next;
}
 
Example 9
Source File: PackageUtil.java    From beam with Apache License 2.0 4 votes vote down vote up
private StagingResult tryStagePackageWithRetry(
    PackageAttributes attributes, Sleeper retrySleeper, CreateOptions createOptions)
    throws IOException, InterruptedException {
  String sourceDescription = attributes.getSourceDescription();
  String target = attributes.getDestination().getLocation();
  BackOff backoff = BackOffAdapter.toGcpBackOff(BACKOFF_FACTORY.backoff());

  while (true) {
    try {
      return tryStagePackage(attributes, createOptions);
    } catch (IOException ioException) {

      if (ERROR_EXTRACTOR.accessDenied(ioException)) {
        String errorMessage =
            String.format(
                "Uploaded failed due to permissions error, will NOT retry staging "
                    + "of %s. Please verify credentials are valid and that you have "
                    + "write access to %s. Stale credentials can be resolved by executing "
                    + "'gcloud auth application-default login'.",
                sourceDescription, target);
        LOG.error(errorMessage);
        throw new IOException(errorMessage, ioException);
      }

      long sleep = backoff.nextBackOffMillis();
      if (sleep == BackOff.STOP) {
        LOG.error(
            "Upload failed, will NOT retry staging of package: {}",
            sourceDescription,
            ioException);
        throw new RuntimeException(
            String.format("Could not stage %s to %s", sourceDescription, target), ioException);
      } else {
        LOG.warn(
            "Upload attempt failed, sleeping before retrying staging of package: {}",
            sourceDescription,
            ioException);
        retrySleeper.sleep(sleep);
      }
    }
  }
}
 
Example 10
Source File: BigQueryUtils.java    From hadoop-connectors with Apache License 2.0 4 votes vote down vote up
/**
 * Polls job until it is completed.
 *
 * @param bigquery the Bigquery instance to poll.
 * @param projectId the project that is polling.
 * @param jobReference the job to poll.
 * @param progressable to get progress of task.
 * @throws IOException on IO Error.
 * @throws InterruptedException on sleep interrupt.
 */
public static void waitForJobCompletion(
    Bigquery bigquery, String projectId, JobReference jobReference, Progressable progressable)
    throws IOException, InterruptedException {

  Sleeper sleeper = Sleeper.DEFAULT;
  BackOff pollBackOff =
      new ExponentialBackOff.Builder()
          .setMaxIntervalMillis(POLL_WAIT_INTERVAL_MAX_MILLIS)
          .setInitialIntervalMillis(POLL_WAIT_INITIAL_MILLIS)
          .setMaxElapsedTimeMillis(POLL_WAIT_MAX_ELAPSED_MILLIS)
          .build();

  // Get starting time.
  long startTime = System.currentTimeMillis();
  long elapsedTime = 0;
  boolean notDone = true;

  // While job is incomplete continue to poll.
  while (notDone) {
    BackOff operationBackOff = new ExponentialBackOff();
    Get get =
        bigquery
            .jobs()
            .get(projectId, jobReference.getJobId())
            .setLocation(jobReference.getLocation());

    Job pollJob =
        ResilientOperation.retry(
            ResilientOperation.getGoogleRequestCallable(get),
            operationBackOff,
            RetryDeterminer.RATE_LIMIT_ERRORS,
            IOException.class,
            sleeper);

    elapsedTime = System.currentTimeMillis() - startTime;
    logger.atFine().log(
        "Job status (%s ms) %s: %s",
        elapsedTime, jobReference.getJobId(), pollJob.getStatus().getState());
    if (pollJob.getStatus().getState().equals("DONE")) {
      notDone = false;
      if (pollJob.getStatus().getErrorResult() != null) {
        throw new IOException(
            String.format(
                "Job %s failed: %s. Errors: %s",
                jobReference.getJobId(),
                pollJob.getStatus().getErrorResult(),
                pollJob.getStatus().getErrors()));
      }
    } else {
      long millisToWait = pollBackOff.nextBackOffMillis();
      if (millisToWait == BackOff.STOP) {
        throw new IOException(
            String.format(
                "Job %s failed to complete after %s millis.",
                jobReference.getJobId(), elapsedTime));
      }
      // Pause execution for the configured duration before polling job status again.
      Thread.sleep(millisToWait);
      // Call progress to ensure task doesn't time out.
      progressable.progress();
    }
  }
}
 
Example 11
Source File: GoogleCloudStorageImpl.java    From hadoop-connectors with Apache License 2.0 4 votes vote down vote up
/**
 * Helper to check whether an empty object already exists with the expected metadata specified
 * in {@code options}, to be used to determine whether it's safe to ignore an exception that
 * was thrown when trying to create the object, {@code exceptionOnCreate}.
 */
private boolean canIgnoreExceptionForEmptyObject(
    IOException exceptionOnCreate, StorageResourceId resourceId, CreateObjectOptions options)
    throws IOException {
  // TODO(user): Maybe also add 409 and even 412 errors if they pop up in this use case.
  // 500 ISE and 503 Service Unavailable tend to be raised when spamming GCS with create requests:
  if (errorExtractor.rateLimited(exceptionOnCreate)
      || errorExtractor.internalServerError(exceptionOnCreate)) {
    // We know that this is an error that is most often associated with trying to create an empty
    // object from multiple workers at the same time. We perform the following assuming that we
    // will eventually succeed and find an existing object. This will add up to a user-defined
    // maximum delay that caller will wait to receive an exception in the case of an incorrect
    // assumption and this being a scenario other than the multiple workers racing situation.
    GoogleCloudStorageItemInfo existingInfo;
    BackOff backOff;
    int maxWaitMillis = storageOptions.getMaxWaitMillisForEmptyObjectCreation();
    if (maxWaitMillis > 0) {
      backOff = new ExponentialBackOff.Builder()
          .setMaxElapsedTimeMillis(maxWaitMillis)
          .setMaxIntervalMillis(500)
          .setInitialIntervalMillis(100)
          .setMultiplier(1.5)
          .setRandomizationFactor(0.15)
          .build();
    } else {
      backOff = BackOff.STOP_BACKOFF;
    }
    long nextSleep = 0L;
    do {
      if (nextSleep > 0) {
        try {
          sleeper.sleep(nextSleep);
        } catch (InterruptedException e) {
          // We caught an InterruptedException, we should set the interrupted bit on this thread.
          Thread.currentThread().interrupt();
          nextSleep = BackOff.STOP;
        }
      }
      existingInfo = getItemInfo(resourceId);
      nextSleep = nextSleep == BackOff.STOP ? BackOff.STOP : backOff.nextBackOffMillis();
    } while (!existingInfo.exists() && nextSleep != BackOff.STOP);

    // Compare existence, size, and metadata; for 429 errors creating an empty object,
    // we don't care about metaGeneration/contentGeneration as long as the metadata
    // matches, since we don't know for sure whether our low-level request succeeded
    // first or some other client succeeded first.
    if (existingInfo.exists() && existingInfo.getSize() == 0) {
      if (!options.getRequireMetadataMatchForEmptyObjects()) {
        return true;
      } else if (existingInfo.metadataEquals(options.getMetadata())) {
        return true;
      }
    }
  }
  return false;
}