Java Code Examples for com.google.common.util.concurrent.Futures#catchingAsync()

The following examples show how to use com.google.common.util.concurrent.Futures#catchingAsync() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ByteStreamUploader.java    From bazel-buildfarm with Apache License 2.0 6 votes vote down vote up
private ListenableFuture<Void> guardQueryWithSuppression(
    Exception e, AtomicLong committedOffset, ProgressiveBackoff progressiveBackoff) {
  // we are destined to return this, avoid recreating it
  ListenableFuture<Void> exceptionFuture = Futures.immediateFailedFuture(e);

  // FIXME we should also return immediately without the query if
  // we were out of retry attempts for the underlying backoff. This
  // is meant to be an only in-between-retries query request.
  if (!retrier.isRetriable(Status.fromThrowable(e))) {
    return exceptionFuture;
  }

  ListenableFuture<Void> suppressedQueryFuture =
      Futures.catchingAsync(
          query(committedOffset, progressiveBackoff),
          Throwable.class,
          (t) -> {
            // if the query threw an exception, add it to the suppressions
            // for the destined exception
            e.addSuppressed(t);
            return exceptionFuture;
          },
          MoreExecutors.directExecutor());
  return Futures.transformAsync(
      suppressedQueryFuture, (result) -> exceptionFuture, MoreExecutors.directExecutor());
}
 
Example 2
Source File: AbstractSchemaRepository.java    From yangtools with Eclipse Public License 1.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private static <T extends SchemaSourceRepresentation> ListenableFuture<T> fetchSource(
        final SourceIdentifier id, final Iterator<AbstractSchemaSourceRegistration<?>> it) {
    final AbstractSchemaSourceRegistration<?> reg = it.next();

    return Futures.catchingAsync(((SchemaSourceProvider<T>)reg.getProvider()).getSource(id), Throwable.class,
        input -> {
            LOG.debug("Failed to acquire source from {}", reg, input);

            if (it.hasNext()) {
                return fetchSource(id, it);
            }

            throw new MissingSchemaSourceException("All available providers exhausted", id, input);
        }, MoreExecutors.directExecutor());
}
 
Example 3
Source File: GrpcCacheClient.java    From bazel with Apache License 2.0 6 votes vote down vote up
private ListenableFuture<Void> downloadBlob(
    Digest digest,
    OutputStream out,
    @Nullable Supplier<HashCode> hashSupplier) {
  Context ctx = Context.current();
  AtomicLong offset = new AtomicLong(0);
  ProgressiveBackoff progressiveBackoff = new ProgressiveBackoff(retrier::newBackoff);
  return Futures.catchingAsync(
      retrier.executeAsync(
          () ->
              ctx.call(() -> requestRead(offset, progressiveBackoff, digest, out, hashSupplier)),
          progressiveBackoff),
      StatusRuntimeException.class,
      (e) -> Futures.immediateFailedFuture(new IOException(e)),
      MoreExecutors.directExecutor());
}
 
Example 4
Source File: ByteStreamUploader.java    From bazel with Apache License 2.0 6 votes vote down vote up
private ListenableFuture<Void> guardQueryWithSuppression(
    Exception e, AtomicLong committedOffset, ProgressiveBackoff progressiveBackoff) {
  // we are destined to return this, avoid recreating it
  ListenableFuture<Void> exceptionFuture = Futures.immediateFailedFuture(e);

  // TODO(buchgr): we should also return immediately without the query if
  // we were out of retry attempts for the underlying backoff. This
  // is meant to be an only in-between-retries query request.
  if (!retrier.isRetriable(e)) {
    return exceptionFuture;
  }

  ListenableFuture<Void> suppressedQueryFuture =
      Futures.catchingAsync(
          query(committedOffset, progressiveBackoff),
          Exception.class,
          (queryException) -> {
            // if the query threw an exception, add it to the suppressions
            // for the destined exception
            e.addSuppressed(queryException);
            return exceptionFuture;
          },
          MoreExecutors.directExecutor());
  return Futures.transformAsync(
      suppressedQueryFuture, (result) -> exceptionFuture, MoreExecutors.directExecutor());
}
 
Example 5
Source File: BuckQueryEnvironment.java    From buck with Apache License 2.0 6 votes vote down vote up
private static ListenableFuture<Unit> attachParentNodeToErrorMessage(
    BuildTarget buildTarget, BuildTarget parseDep, ListenableFuture<Unit> depWork) {
  return Futures.catchingAsync(
      depWork,
      Exception.class,
      exceptionInput -> {
        if (exceptionInput instanceof BuildFileParseException) {
          if (exceptionInput instanceof BuildTargetException) {
            throw ParserMessages.createReadableExceptionWithWhenSuffix(
                buildTarget, parseDep, (BuildTargetException) exceptionInput);
          } else {
            throw ParserMessages.createReadableExceptionWithWhenSuffix(
                buildTarget, parseDep, (BuildFileParseException) exceptionInput);
          }
        }
        throw exceptionInput;
      });
}
 
Example 6
Source File: EtcdClient.java    From etcd-java with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<AuthenticateResponse> authenticate() {
    AuthenticateRequest request = AuthenticateRequest.newBuilder()
            .setNameBytes(name).setPasswordBytes(password).build();
    // no call creds for auth call
    CallOptions callOpts = CallOptions.DEFAULT;
    return Futures.catchingAsync(
            grpc.fuCall(METHOD_AUTHENTICATE, request, callOpts, 0L),
                Exception.class, ex -> !retryAuthRequest(ex)
                    ? Futures.immediateFailedFuture(ex)
                    : grpc.fuCall(METHOD_AUTHENTICATE, request, callOpts, 0L),
            directExecutor());
}
 
Example 7
Source File: ByteStreamUploader.java    From bazel-buildfarm with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<Void> callAndQueryOnFailure(
    AtomicLong committedOffset, ProgressiveBackoff progressiveBackoff) {
  return Futures.catchingAsync(
      call(committedOffset),
      Exception.class,
      (e) -> guardQueryWithSuppression(e, committedOffset, progressiveBackoff),
      MoreExecutors.directExecutor());
}
 
Example 8
Source File: Retrier.java    From bazel-buildfarm with Apache License 2.0 5 votes vote down vote up
/**
 * Executes an {@link AsyncCallable}, retrying execution in case of failure with the given
 * backoff.
 */
public <T> ListenableFuture<T> executeAsync(AsyncCallable<T> call, Backoff backoff) {
  try {
    return Futures.catchingAsync(
        call.call(),
        Exception.class,
        t -> onExecuteAsyncFailure(t, call, backoff),
        MoreExecutors.directExecutor());
  } catch (Exception e) {
    return onExecuteAsyncFailure(e, call, backoff);
  }
}
 
Example 9
Source File: GrpcCacheClient.java    From bazel with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<ActionResult> handleStatus(ListenableFuture<ActionResult> download) {
  return Futures.catchingAsync(
      download,
      StatusRuntimeException.class,
      (sre) ->
          sre.getStatus().getCode() == Code.NOT_FOUND
              // Return null to indicate that it was a cache miss.
              ? Futures.immediateFuture(null)
              : Futures.immediateFailedFuture(new IOException(sre)),
      MoreExecutors.directExecutor());
}
 
Example 10
Source File: DiskAndRemoteCacheClient.java    From bazel with Apache License 2.0 5 votes vote down vote up
private static ListenableFuture<Void> closeStreamOnError(
    ListenableFuture<Void> f, OutputStream out) {
  return Futures.catchingAsync(
      f,
      Exception.class,
      (rootCause) -> {
        try {
          out.close();
        } catch (IOException e) {
          rootCause.addSuppressed(e);
        }
        return Futures.immediateFailedFuture(rootCause);
      },
      MoreExecutors.directExecutor());
}
 
Example 11
Source File: Retrier.java    From bazel with Apache License 2.0 5 votes vote down vote up
/**
 * Executes an {@link AsyncCallable}, retrying execution in case of failure with the given
 * backoff.
 */
public <T> ListenableFuture<T> executeAsync(AsyncCallable<T> call, Backoff backoff) {
  try {
    return Futures.catchingAsync(
        call.call(),
        Exception.class,
        t -> onExecuteAsyncFailure(t, call, backoff),
        MoreExecutors.directExecutor());
  } catch (Exception e) {
    return onExecuteAsyncFailure(e, call, backoff);
  }
}
 
Example 12
Source File: ByteStreamUploader.java    From bazel with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<Void> callAndQueryOnFailure(
    AtomicLong committedOffset, ProgressiveBackoff progressiveBackoff) {
  return Futures.catchingAsync(
      call(committedOffset),
      Exception.class,
      (e) -> guardQueryWithSuppression(e, committedOffset, progressiveBackoff),
      Context.current().fixedContextExecutor(MoreExecutors.directExecutor()));
}
 
Example 13
Source File: ByteStreamUploader.java    From bazel with Apache License 2.0 5 votes vote down vote up
private ListenableFuture<Void> query(
    AtomicLong committedOffset, ProgressiveBackoff progressiveBackoff) {
  ListenableFuture<Long> committedSizeFuture =
      Futures.transform(
          bsFutureStub()
              .queryWriteStatus(
                  QueryWriteStatusRequest.newBuilder().setResourceName(resourceName).build()),
          (response) -> response.getCommittedSize(),
          MoreExecutors.directExecutor());
  ListenableFuture<Long> guardedCommittedSizeFuture =
      Futures.catchingAsync(
          committedSizeFuture,
          Exception.class,
          (e) -> {
            Status status = Status.fromThrowable(e);
            if (status.getCode() == Code.UNIMPLEMENTED) {
              // if the bytestream server does not implement the query, insist
              // that we should reset the upload
              return Futures.immediateFuture(0L);
            }
            return Futures.immediateFailedFuture(e);
          },
          MoreExecutors.directExecutor());
  return Futures.transformAsync(
      guardedCommittedSizeFuture,
      (committedSize) -> {
        if (committedSize > committedOffset.get()) {
          // we have made progress on this upload in the last request,
          // reset the backoff so that this request has a full deck of retries
          progressiveBackoff.reset();
        }
        committedOffset.set(committedSize);
        return Futures.immediateFuture(null);
      },
      MoreExecutors.directExecutor());
}
 
Example 14
Source File: GrpcClient.java    From etcd-java with Apache License 2.0 4 votes vote down vote up
private <ReqT,R> ListenableFuture<R> call(MethodDescriptor<ReqT,R> method,
        Condition precondition, ReqT request, Executor executor, RetryDecision<ReqT> retry,
        int attempt, boolean afterReauth, boolean backoff, Deadline deadline, long timeoutMs) {

    if (precondition != null && !precondition.satisfied()) {
        return failInExecutor(new CancellationException("precondition false"), executor);
    }
    //TODO(maybe) in delay case (attempt > 1), if "session" is inactive,
    //     skip attempt (and don't increment attempt #)
    final CallOptions baseCallOpts = getCallOptions();
    CallOptions callOpts = deadline != null ? baseCallOpts.withDeadline(deadline) : baseCallOpts;
    if (executor != null) {
        callOpts = callOpts.withExecutor(executor);
    }
    return Futures.catchingAsync(fuCall(method, request, callOpts, timeoutMs), Exception.class, t -> {
        // first cases: determine if we fail immediately
        if ((!backoff && attempt > 0) || (deadline != null && deadline.isExpired())) {
            // multiple retries disabled or deadline expired
            return Futures.immediateFailedFuture(t);
        }
        boolean reauth = false;
        if (authProvider.requiresReauth(t)) {
            if (afterReauth) {
                // if we have an auth failure immediately following a reauth, give up
                // (important to avoid infinite loop of auth failures)
                return Futures.immediateFailedFuture(t);
            }
            reauthenticate(baseCallOpts, t);
            reauth = true;
        } else if (!retry.retry(t, request)) {
            // retry predicate says no (non retryable request and/or error)
            return Futures.immediateFailedFuture(t);
        }

        // second case: immediate retry (first failure or after auth failure + reauth)
        if (reauth || attempt == 0 && immediateRetryLimiter.tryAcquire()) {
            return call(method, precondition, request, executor, retry,
                    reauth ? attempt : 1, reauth, backoff, deadline, timeoutMs);
        }
        int nextAttempt = attempt <= 1 ? 2 : attempt + 1; // skip attempt if we were rate-limited

        // final case: retry after back-off delay
        long delayMs = delayAfterFailureMs(nextAttempt);
        if (deadline != null && deadline.timeRemaining(MILLISECONDS) < delayMs) {
            return Futures.immediateFailedFuture(t);
        }
        return Futures.scheduleAsync(() -> call(method, precondition, request, executor, retry,
                nextAttempt, false, backoff, deadline, timeoutMs), delayMs, MILLISECONDS, ses);
    }, executor != null ? executor : directExecutor());
}
 
Example 15
Source File: GrpcCacheClient.java    From bazel with Apache License 2.0 4 votes vote down vote up
@Override
public ListenableFuture<ImmutableSet<Digest>> findMissingDigests(Iterable<Digest> digests) {
  if (Iterables.isEmpty(digests)) {
    return Futures.immediateFuture(ImmutableSet.of());
  }
  // Need to potentially split the digests into multiple requests.
  FindMissingBlobsRequest.Builder requestBuilder =
      FindMissingBlobsRequest.newBuilder().setInstanceName(options.remoteInstanceName);
  List<ListenableFuture<FindMissingBlobsResponse>> getMissingDigestCalls = new ArrayList<>();
  for (Digest digest : digests) {
    requestBuilder.addBlobDigests(digest);
    if (requestBuilder.getBlobDigestsCount() == maxMissingBlobsDigestsPerMessage) {
      getMissingDigestCalls.add(getMissingDigests(requestBuilder.build()));
      requestBuilder.clearBlobDigests();
    }
  }

  if (requestBuilder.getBlobDigestsCount() > 0) {
    getMissingDigestCalls.add(getMissingDigests(requestBuilder.build()));
  }

  ListenableFuture<ImmutableSet<Digest>> success =
      Futures.whenAllSucceed(getMissingDigestCalls)
          .call(
              () -> {
                ImmutableSet.Builder<Digest> result = ImmutableSet.builder();
                for (ListenableFuture<FindMissingBlobsResponse> callFuture :
                    getMissingDigestCalls) {
                  result.addAll(callFuture.get().getMissingBlobDigestsList());
                }
                return result.build();
              },
              MoreExecutors.directExecutor());
  return Futures.catchingAsync(
      success,
      RuntimeException.class,
      (e) ->
          Futures.immediateFailedFuture(
              new IOException(
                  String.format(
                      "findMissingBlobs(%d) for %s: %s",
                      requestBuilder.getBlobDigestsCount(),
                      TracingMetadataUtils.fromCurrentContext().getActionId(),
                      e.getMessage()),
                  e)),
      MoreExecutors.directExecutor());
}
 
Example 16
Source File: ByteStreamUploader.java    From bazel with Apache License 2.0 4 votes vote down vote up
/**
 * Uploads a BLOB asynchronously to the remote {@code ByteStream} service. The call returns
 * immediately and one can listen to the returned future for the success/failure of the upload.
 *
 * <p>Uploads are retried according to the specified {@link RemoteRetrier}. Retrying is
 * transparent to the user of this API.
 *
 * <p>Trying to upload the same BLOB multiple times concurrently, results in only one upload being
 * performed. This is transparent to the user of this API.
 *
 * @param hash the hash of the data to upload.
 * @param chunker the data to upload.
 * @param forceUpload if {@code false} the blob is not uploaded if it has previously been
 *     uploaded, if {@code true} the blob is uploaded.
 * @throws IOException when reading of the {@link Chunker}s input source fails
 */
public ListenableFuture<Void> uploadBlobAsync(
    HashCode hash, Chunker chunker, boolean forceUpload) {
  synchronized (lock) {
    checkState(!isShutdown, "Must not call uploadBlobs after shutdown.");

    if (!forceUpload && uploadedBlobs.contains(hash)) {
      return Futures.immediateFuture(null);
    }

    ListenableFuture<Void> inProgress = uploadsInProgress.get(hash);
    if (inProgress != null) {
      return inProgress;
    }

    ListenableFuture<Void> uploadResult =
        Futures.transform(
            startAsyncUpload(hash, chunker),
            (v) -> {
              synchronized (lock) {
                uploadedBlobs.add(hash);
              }
              return null;
            },
            MoreExecutors.directExecutor());

    uploadResult =
        Futures.catchingAsync(
            uploadResult,
            StatusRuntimeException.class,
            (sre) -> Futures.immediateFailedFuture(new IOException(sre)),
            MoreExecutors.directExecutor());

    uploadsInProgress.put(hash, uploadResult);
    uploadResult.addListener(
        () -> {
          synchronized (lock) {
            uploadsInProgress.remove(hash);
          }
        },
        MoreExecutors.directExecutor());

    return uploadResult;
  }
}