Java Code Examples for com.twitter.util.Future#exception()

The following examples show how to use com.twitter.util.Future#exception() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LedgerHandleCache.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
/**
 * Async try read last confirmed.
 *
 * @param ledgerDesc
 *          ledger descriptor
 * @return future presenting read last confirmed result.
 */
public Future<Long> asyncTryReadLastConfirmed(LedgerDescriptor ledgerDesc) {
    RefCountedLedgerHandle refHandle = handlesMap.get(ledgerDesc);
    if (null == refHandle) {
        LOG.error("Accessing ledger {} without opening.", ledgerDesc);
        return Future.exception(BKException.create(BKException.Code.UnexpectedConditionException));
    }
    final Promise<Long> promise = new Promise<Long>();
    refHandle.handle.asyncTryReadLastConfirmed(new AsyncCallback.ReadLastConfirmedCallback() {
        @Override
        public void readLastConfirmedComplete(int rc, long lastAddConfirmed, Object ctx) {
            if (BKException.Code.OK == rc) {
                promise.setValue(lastAddConfirmed);
            } else {
                promise.setException(BKException.create(rc));
            }
        }
    }, null);
    return promise;
}
 
Example 2
Source File: SimpleLedgerAllocator.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
public synchronized Future<LedgerHandle> tryObtain(final Transaction<Object> txn,
                                                   final OpListener<LedgerHandle> listener) {
    if (Phase.ERROR == phase) {
        return Future.exception(new AllocationException(Phase.ERROR,
                "Error on allocating ledger under " + allocatePath));
    }
    if (Phase.HANDING_OVER == phase || Phase.HANDED_OVER == phase || null != tryObtainTxn) {
        return Future.exception(new ConcurrentObtainException(phase,
                "Ledger handle is handling over to another thread : " + phase));
    }
    tryObtainTxn = txn;
    tryObtainListener = listener;
    if (null != allocatedLh) {
        completeAllocation(allocatedLh);
    }
    return allocatePromise;
}
 
Example 3
Source File: StreamManagerImpl.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
/**
 * Must be enqueued to an executor to avoid deadlocks (close and execute-op both
 * try to acquire the same read-write lock).
 */
@Override
public Future<Void> deleteAndRemoveAsync(final String stream) {
    final Promise<Void> result = new Promise<Void>();
    java.util.concurrent.Future<?> scheduleFuture = schedule(new Runnable() {
        @Override
        public void run() {
            result.become(doDeleteAndRemoveAsync(stream));
        }
    }, 0);
    if (null == scheduleFuture) {
        return Future.exception(
            new ServiceUnavailableException("Couldn't schedule a delete task."));
    }
    return result;
}
 
Example 4
Source File: StreamManagerImpl.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
/**
 * Must be enqueued to an executor to avoid deadlocks (close and execute-op both
 * try to acquire the same read-write lock).
 */
@Override
public Future<Void> closeAndRemoveAsync(final String streamName) {
    final Promise<Void> releasePromise = new Promise<Void>();
    java.util.concurrent.Future<?> scheduleFuture = schedule(new Runnable() {
        @Override
        public void run() {
            releasePromise.become(doCloseAndRemoveAsync(streamName));
        }
    }, 0);
    if (null == scheduleFuture) {
        return Future.exception(
            new ServiceUnavailableException("Couldn't schedule a release task."));
    }
    return releasePromise;
}
 
Example 5
Source File: BKAsyncLogWriter.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
private Future<BKLogSegmentWriter> doGetLogSegmentWriter(final long firstTxid,
                                                         final boolean bestEffort,
                                                         final boolean rollLog,
                                                         final boolean allowMaxTxID) {
    if (encounteredError) {
        return Future.exception(new WriteException(bkDistributedLogManager.getStreamName(),
                "writer has been closed due to error."));
    }
    Future<BKLogSegmentWriter> writerFuture = asyncGetLedgerWriter(!disableRollOnSegmentError);
    if (null == writerFuture) {
        return rollLogSegmentIfNecessary(null, firstTxid, bestEffort, allowMaxTxID);
    } else if (rollLog) {
        return writerFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<BKLogSegmentWriter>>() {
            @Override
            public Future<BKLogSegmentWriter> apply(BKLogSegmentWriter writer) {
                return rollLogSegmentIfNecessary(writer, firstTxid, bestEffort, allowMaxTxID);
            }
        });
    } else {
        return writerFuture;
    }
}
 
Example 6
Source File: LedgerHandleCache.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
/**
 * Async read last confirmed and entry
 *
 * @param ledgerDesc
 *          ledger descriptor
 * @param entryId
 *          entry id to read
 * @param timeOutInMillis
 *          time out if no newer entry available
 * @param parallel
 *          whether to read from replicas in parallel
 */
public Future<Pair<Long, LedgerEntry>> asyncReadLastConfirmedAndEntry(
        LedgerDescriptor ledgerDesc,
        long entryId,
        long timeOutInMillis,
        boolean parallel) {
    RefCountedLedgerHandle refHandle = handlesMap.get(ledgerDesc);
    if (null == refHandle) {
        LOG.error("Accessing ledger {} without opening.", ledgerDesc);
        return Future.exception(BKException.create(BKException.Code.UnexpectedConditionException));
    }
    final Promise<Pair<Long, LedgerEntry>> promise = new Promise<Pair<Long, LedgerEntry>>();
    refHandle.handle.asyncReadLastConfirmedAndEntry(entryId, timeOutInMillis, parallel,
            new AsyncCallback.ReadLastConfirmedAndEntryCallback() {
                @Override
                public void readLastConfirmedAndEntryComplete(int rc, long lac, LedgerEntry ledgerEntry, Object ctx) {
                    if (BKException.Code.OK == rc) {
                        promise.setValue(Pair.of(lac, ledgerEntry));
                    } else {
                        promise.setException(BKException.create(rc));
                    }
                }
            }, null);
    return promise;
}
 
Example 7
Source File: PinLaterServiceImpl.java    From pinlater with Apache License 2.0 6 votes vote down vote up
@Override
public Future<PinLaterDequeueResponse> dequeueJobs(
    RequestContext context, final PinLaterDequeueRequest request) {
  if (!queueConfig.allowDequeue(request.getQueueName(), request.getLimit())) {
    Stats.incr(request.getQueueName() + "_dequeue_requests_rate_limited");
    return Future.exception(new PinLaterException(ErrorCode.DEQUEUE_RATE_LIMITED,
        "Dequeue rate limit exceeded for queue: " + request.getQueueName()));
  }

  return Stats.timeFutureMillis(
      "PinLaterService.dequeueJobs",
      backend.dequeueJobs(context.getSource(), request).onSuccess(
          new Function<PinLaterDequeueResponse, BoxedUnit>() {
            @Override
            public BoxedUnit apply(PinLaterDequeueResponse response) {
              Stats.incr(request.getQueueName() + "_dequeue", response.getJobsSize());
              return null;
            }
          }).rescue(new LogAndWrapException<PinLaterDequeueResponse>(
          context, "dequeueJobs", request.toString())));
}
 
Example 8
Source File: BKLogSegmentWriter.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public Future<Long> apply(Integer transmitRc) {
    if (BKException.Code.OK == transmitRc) {
        return Future.value(getLastTxIdAcknowledged());
    } else {
        return Future.exception(new BKTransmitException("Failed to transmit entry", transmitRc));
    }
}
 
Example 9
Source File: BKLogWriteHandler.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
Future<List<LogSegmentMetadata>> purgeLogSegmentsOlderThanTimestamp(final long minTimestampToKeep) {
    if (minTimestampToKeep >= Utils.nowInMillis()) {
        return Future.exception(new IllegalArgumentException(
                "Invalid timestamp " + minTimestampToKeep + " to purge logs for " + getFullyQualifiedName()));
    }
    return asyncGetFullLedgerList(false, false).flatMap(
            new Function<List<LogSegmentMetadata>, Future<List<LogSegmentMetadata>>>() {
        @Override
        public Future<List<LogSegmentMetadata>> apply(List<LogSegmentMetadata> logSegments) {
            List<LogSegmentMetadata> purgeList = new ArrayList<LogSegmentMetadata>(logSegments.size());

            int numCandidates = getNumCandidateLogSegmentsToTruncate(logSegments);

            for (int iterator = 0; iterator < numCandidates; iterator++) {
                LogSegmentMetadata l = logSegments.get(iterator);
                // When application explicitly truncates segments; timestamp based purge is
                // only used to cleanup log segments that have been marked for truncation
                if ((l.isTruncated() || !conf.getExplicitTruncationByApplication()) &&
                    !l.isInProgress() && (l.getCompletionTime() < minTimestampToKeep)) {
                    purgeList.add(l);
                } else {
                    // stop truncating log segments if we find either an inprogress or a partially
                    // truncated log segment
                    break;
                }
            }
            LOG.info("Deleting log segments older than {} for {} : {}",
                    new Object[] { minTimestampToKeep, getFullyQualifiedName(), purgeList });
            return deleteLogSegments(purgeList);
        }
    });
}
 
Example 10
Source File: BKLogWriteHandler.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
public Future<Long> recoverIncompleteLogSegments() {
    try {
        FailpointUtils.checkFailPoint(FailpointUtils.FailPointName.FP_RecoverIncompleteLogSegments);
    } catch (IOException ioe) {
        return Future.exception(ioe);
    }
    return asyncGetFilteredLedgerList(false, false).flatMap(recoverLogSegmentsFunction);
}
 
Example 11
Source File: DistributedLogClientImpl.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private Future<SocketAddress> getOwner(final StreamOp op) {
    if (clusterClient.isPresent()) {
        final Promise<SocketAddress> getOwnerPromise = new Promise<SocketAddress>();
        getOwnerFromResourcePlacementServer(op, getOwnerPromise);
        return getOwnerPromise;
    }
    // pickup host by hashing
    try {
        return Future.value(routingService.getHost(op.stream, op.routingContext));
    } catch (NoBrokersAvailableException nbae) {
        return Future.exception(nbae);
    }
}
 
Example 12
Source File: PinLaterServiceImpl.java    From pinlater with Apache License 2.0 5 votes vote down vote up
@Override
public Future<Response> apply(Throwable throwable) {
  LOG.error("Context: {} Method: {} Request: {} Exception:",
      context, methodName, requestDesc, throwable);
  PinLaterException exception;
  if (throwable instanceof PinLaterException) {
    exception = (PinLaterException) throwable;
  } else {
    exception = new PinLaterException(ErrorCode.UNKNOWN, throwable.toString());
  }
  String errorStats = "PinLater." + methodName + ".errors."
      + errorCodeToStr(exception.getErrorCode());
  Stats.incr(errorStats);
  return Future.exception(exception);
}
 
Example 13
Source File: BKAbstractLogWriter.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
synchronized protected Future<BKLogSegmentWriter> rollLogSegmentIfNecessary(
        final BKLogSegmentWriter segmentWriter,
        long startTxId,
        boolean bestEffort,
        boolean allowMaxTxID) {
    final BKLogWriteHandler writeHandler;
    try {
        writeHandler = getWriteHandler();
    } catch (IOException e) {
        return Future.exception(e);
    }
    Future<BKLogSegmentWriter> rollPromise;
    if (null != segmentWriter && (writeHandler.shouldStartNewSegment(segmentWriter) || forceRolling)) {
        rollPromise = closeOldLogSegmentAndStartNewOneWithPermit(
                segmentWriter, writeHandler, startTxId, bestEffort, allowMaxTxID);
    } else if (null == segmentWriter) {
        rollPromise = asyncStartNewLogSegment(writeHandler, startTxId, allowMaxTxID);
    } else {
        rollPromise = Future.value(segmentWriter);
    }
    return rollPromise.map(new AbstractFunction1<BKLogSegmentWriter, BKLogSegmentWriter>() {
        @Override
        public BKLogSegmentWriter apply(BKLogSegmentWriter newSegmentWriter) {
            if (segmentWriter == newSegmentWriter) {
                return newSegmentWriter;
            }
            truncateLogSegmentsIfNecessary(writeHandler);
            return newSegmentWriter;
        }
    });
}
 
Example 14
Source File: TruncateOp.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
protected Future<WriteResponse> executeOp(AsyncLogWriter writer,
                                          Sequencer sequencer,
                                          Object txnLock) {
    if (!stream.equals(writer.getStreamName())) {
        logger.error("Truncate: Stream Name Mismatch in the Stream Map {}, {}", stream, writer.getStreamName());
        return Future.exception(new IllegalStateException("The stream mapping is incorrect, fail the request"));
    }
    return writer.truncate(dlsn).map(new AbstractFunction1<Boolean, WriteResponse>() {
        @Override
        public WriteResponse apply(Boolean v1) {
            return ResponseUtils.writeSuccess();
        }
    });
}
 
Example 15
Source File: WriteOp.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
protected Future<WriteResponse> executeOp(AsyncLogWriter writer,
                                          Sequencer sequencer,
                                          Object txnLock) {
    if (!stream.equals(writer.getStreamName())) {
        logger.error("Write: Stream Name Mismatch in the Stream Map {}, {}", stream, writer.getStreamName());
        return Future.exception(new IllegalStateException("The stream mapping is incorrect, fail the request"));
    }

    long txnId;
    Future<DLSN> writeResult;
    synchronized (txnLock) {
        txnId = sequencer.nextId();
        LogRecord record = new LogRecord(txnId, payload);
        if (isRecordSet) {
            record.setRecordSet();
        }
        writeResult = newTFuture(writer.write(record));
    }
    return writeResult.map(new AbstractFunction1<DLSN, WriteResponse>() {
        @Override
        public WriteResponse apply(DLSN value) {
            successRecordCounter.inc();
            return ResponseUtils.writeSuccess().setDlsn(value.serialize(dlsnVersion));
        }
    });
}
 
Example 16
Source File: ScribeSender.java    From zipkin-finagle with Apache License 2.0 5 votes vote down vote up
@Override public Future<Void> apply(byte[] responseBytes) {
  TBinaryProtocol iprot = new TBinaryProtocol(new TMemoryInputTransport(responseBytes));
  try {
    if (InternalScribeCodec.readLogResponse(0, iprot)) {
      return Future.Void();
    } else {
      return Future.exception(new IllegalStateException("try later"));
    }
  } catch (Exception e) {
    return Future.exception(e);
  }
}
 
Example 17
Source File: TerrapinServiceImpl.java    From terrapin with Apache License 2.0 4 votes vote down vote up
@Override
public Future<TerrapinSingleResponse> get(final TerrapinGetRequest request) {
  final long startTimeMillis = System.currentTimeMillis();
  if (request.getClusterList().isEmpty()) {
    return Future.exception(new TerrapinGetException("Cluster list is empty", TerrapinGetErrorCode.INVALID_REQUEST));
  }
  ReplicatedTerrapinClient terrapinClient = getReplicatedTerrapinClient(request.getClusterList());
  if (terrapinClient == null) {
    return Future.exception(new TerrapinGetException(
        "Clusters [" + Joiner.on(", ").join(request.getClusterList()) + "] not found.",
        TerrapinGetErrorCode.CLUSTER_NOT_FOUND));
  }
  RequestOptions options;
  if (request.isSetOptions()) {
    options = request.getOptions();
  } else {
    options = new RequestOptions();
  }
  try {
    return terrapinClient.getMany(request.getFileSet(),
        Sets.newHashSet(ByteBuffer.wrap(request.getKey())), options).map(
            new ExceptionalFunction<TerrapinResponse, TerrapinSingleResponse>() {
              @Override
              public TerrapinSingleResponse applyE(TerrapinResponse response)
                  throws TerrapinGetException {
                ByteBuffer keyBuf = ByteBuffer.wrap(request.getKey());
                if (response.getResponseMap().containsKey(keyBuf)) {
                  TerrapinSingleResponse returnResponse = response.getResponseMap().get(keyBuf);
                  if (returnResponse.isSetErrorCode()) {
                    throw new TerrapinGetException("Read failed.", returnResponse.getErrorCode());
                  } else {
                    Stats.addMetric(request.getFileSet() + "-value-size", returnResponse.getValue().length);
                    Stats.addMetric("value-size", returnResponse.getValue().length);
                    return returnResponse;
                  }
                } else {
                  return new TerrapinSingleResponse();
                }
              }
            }).rescue(new Function<Throwable, Future<TerrapinSingleResponse>>() {
              @Override
              public Future<TerrapinSingleResponse> apply(Throwable t) {
                return getExceptionFuture(t);
              }
            }).ensure(new Function0<BoxedUnit>() {
              @Override
              public BoxedUnit apply() {
                int timeMillis = (int)(System.currentTimeMillis() - startTimeMillis);
                Stats.addMetric(request.getFileSet() + "-lookup-latency-ms", timeMillis);
                Stats.addMetric("lookup-latency-ms", timeMillis);
                return BoxedUnit.UNIT;
              }
          });
  } catch (Exception e) {
    return getExceptionFuture(e);
  }
}
 
Example 18
Source File: FederatedZKLogMetadataStore.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
private <A> Future<A> duplicatedLogException(String logName) {
    return Future.exception(new UnexpectedException("Duplicated log " + logName
            + " found in namespace " + namespace));
}
 
Example 19
Source File: TerrapinServiceImpl.java    From terrapin with Apache License 2.0 4 votes vote down vote up
@Override
public Future<TerrapinResponse> multiGet(final TerrapinMultiGetRequest request) {
  final long startTimeMillis = System.currentTimeMillis();
  if (request.getClusterList().isEmpty()) {
    return Future.exception(new TerrapinGetException("Cluster list is empty", TerrapinGetErrorCode.INVALID_REQUEST));
  }
  ReplicatedTerrapinClient terrapinClient = getReplicatedTerrapinClient(request.getClusterList());
  if (terrapinClient == null) {
    return Future.exception(new TerrapinGetException(
        "Clusters [" + Joiner.on(", ").join(request.getClusterList()) + "] not found.",
        TerrapinGetErrorCode.CLUSTER_NOT_FOUND));
  }
  RequestOptions options;
  if (request.isSetOptions()) {
    options = request.getOptions();
  } else {
    options = new RequestOptions();
  }
  try {
    return terrapinClient.getMany(request.getFileSet(), Sets.newHashSet(request.getKeyList()), options)
        .onSuccess(new Function<TerrapinResponse, BoxedUnit>() {
          @Override
          public BoxedUnit apply(TerrapinResponse terrapinResponse) {
            int responseSize = 0;
            for (Map.Entry<ByteBuffer, TerrapinSingleResponse> response : terrapinResponse.getResponseMap().entrySet()) {
              if (!response.getValue().isSetErrorCode()) {
                responseSize += response.getValue().getValue().length;
              }
            }
            Stats.addMetric(request.getFileSet() + "-multi-value-size", responseSize);
            Stats.addMetric("multi-value-size", responseSize);
            return BoxedUnit.UNIT;
          }
        })
        .rescue(new Function<Throwable, Future<TerrapinResponse>>() {
          @Override
          public Future<TerrapinResponse> apply(Throwable t) {
            return getExceptionFuture(t);
          }
        })
        .ensure(new Function0<BoxedUnit>() {
          @Override
          public BoxedUnit apply() {
            int timeMillis = (int) (System.currentTimeMillis() - startTimeMillis);
            Stats.addMetric(request.getFileSet() + "-multi-lookup-latency-ms", timeMillis);
            Stats.addMetric("multi-lookup-latency-ms", timeMillis);
            return BoxedUnit.UNIT;
          }
        });
  } catch (Exception e) {
    return getExceptionFuture(e);
  }
}
 
Example 20
Source File: BKDistributedLogManager.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
@Override
public Future<AsyncLogWriter> openAsyncLogWriter() {
    try {
        checkClosedOrInError("startLogSegmentNonPartitioned");
    } catch (AlreadyClosedException e) {
        return Future.exception(e);
    }

    Future<BKLogWriteHandler> createWriteHandleFuture;
    synchronized (this) {
        // 1. create the locked write handler
        createWriteHandleFuture = asyncCreateWriteHandler(true);
    }
    return createWriteHandleFuture.flatMap(new AbstractFunction1<BKLogWriteHandler, Future<AsyncLogWriter>>() {
        @Override
        public Future<AsyncLogWriter> apply(final BKLogWriteHandler writeHandler) {
            final BKAsyncLogWriter writer;
            synchronized (BKDistributedLogManager.this) {
                // 2. create the writer with the handler
                writer = new BKAsyncLogWriter(
                        conf,
                        dynConf,
                        BKDistributedLogManager.this,
                        writeHandler,
                        featureProvider,
                        statsLogger);
            }
            // 3. recover the incomplete log segments
            return writeHandler.recoverIncompleteLogSegments()
                    .map(new AbstractFunction1<Long, AsyncLogWriter>() {
                        @Override
                        public AsyncLogWriter apply(Long lastTxId) {
                            // 4. update last tx id if successfully recovered
                            writer.setLastTxId(lastTxId);
                            return writer;
                        }
                    }).onFailure(new AbstractFunction1<Throwable, BoxedUnit>() {
                        @Override
                        public BoxedUnit apply(Throwable cause) {
                            // 5. close the writer if recovery failed
                            writer.asyncAbort();
                            return BoxedUnit.UNIT;
                        }
                    });
        }
    });
}