com.twitter.util.ExceptionalFunction0 Java Examples

The following examples show how to use com.twitter.util.ExceptionalFunction0. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 6 votes vote down vote up
public Future<Void> deleteQueue(final String name, final String password) {
  String passwordHash;
  try {
    passwordHash = PinLaterBackendUtils.getSaltedHash(password);
  } catch (NoSuchAlgorithmException e) {
    return Future.exception(
        new PinLaterException(ErrorCode.UNKNOWN, "Error finding hashing algorithm."));
  }
  if (!passwordHash.equals(configuration.getString("ADMIN_PASSWORD_HASH"))) {
    return Future.exception(
        new PinLaterException(ErrorCode.PASSWORD_INVALID, "Invalid admin password."));
  }

  return futurePool.apply(new ExceptionalFunction0<Void>() {
    @Override
    public Void applyE() throws Throwable {
      deleteQueueImpl(name);
      return null;
    }
  });
}
 
Example #2
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 6 votes vote down vote up
public Future<Integer> retryFailedJobs(final PinLaterRetryFailedJobsRequest request) {
  // Execute retryFailedJobs query on each shard until we have updated 'limit' number of jobs.
  return futurePool.apply(new ExceptionalFunction0<Integer>() {
    @Override
    public Integer applyE() throws Throwable {
      long currentTimeMillis = System.currentTimeMillis();
      int remainingLimit = request.getLimit();
      List<String> shardNames = getRandomShardShuffle();
      for (final String shardName : shardNames) {
        int numRetried = retryFailedJobsFromShard(
            request.getQueueName(),
            shardName,
            request.getPriority(),
            request.getAttemptsToAllow(),
            request.isSetRunAfterTimestampMillis()
            ? request.getRunAfterTimestampMillis() : currentTimeMillis,
            remainingLimit);
        remainingLimit -= numRetried;
        if (remainingLimit <= 0) {
          break;
        }
      }
      return request.getLimit() - remainingLimit;
    }
  });
}
 
Example #3
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 6 votes vote down vote up
public Future<Integer> deleteJobs(final PinLaterDeleteJobsRequest request) {
  // Execute deleteJobs query on each shard until we have updated 'limit' number of jobs.
  return futurePool.apply(new ExceptionalFunction0<Integer>() {
    @Override
    public Integer applyE() throws Throwable {
      int remainingLimit = request.getLimit();
      List<String> shardNames = getRandomShardShuffle();
      for (final String shardName : shardNames) {
        int numDeleted = deleteJobsFromShard(
            request.getQueueName(),
            shardName,
            request.getJobState(),
            request.getPriority(),
            request.getBodyRegexToMatch(),
            remainingLimit);
        remainingLimit -= numDeleted;

        if (remainingLimit <= 0) {
          break;
        }
      }
      return request.getLimit() - remainingLimit;
    }
  });
}
 
Example #4
Source File: PinLaterRedisBackend.java    From pinlater with Apache License 2.0 6 votes vote down vote up
/**
 * Clean up all the keys in each shard. This method is only for test use.
 */
@VisibleForTesting
public Future<Void> cleanUpAllShards() {
  return futurePool.apply(new ExceptionalFunction0<Void>() {
    @Override
    public Void applyE() throws Throwable {
      for (final ImmutableMap.Entry<String, RedisPools> shard : shardMap.entrySet()) {
        RedisUtils.executeWithConnection(
            shard.getValue().getGeneralRedisPool(),
            new Function<Jedis, Void>() {
              @Override
              public Void apply(Jedis conn) {
                conn.flushAll();
                return null;
              }
            });
      }
      return null;
    }
  });
}
 
Example #5
Source File: PinLaterRedisBackend.java    From pinlater with Apache License 2.0 6 votes vote down vote up
/**
 * Remove the job hash from redis. This function is used in test to simulate the case where the
 * job id is still in the queue, while the job hash is evicted by redis LRU.
 */
@VisibleForTesting
public Future<Void> removeJobHash(String jobDescriptor) {
  final PinLaterJobDescriptor jobDesc = new PinLaterJobDescriptor(jobDescriptor);
  return futurePool.apply(new ExceptionalFunction0<Void>() {
    @Override
    public Void applyE() throws Throwable {
      RedisUtils.executeWithConnection(
          shardMap.get(jobDesc.getShardName()).getGeneralRedisPool(),
          new Function<Jedis, Void>() {
            @Override
            public Void apply(Jedis conn) {
              String hashRedisKey = RedisBackendUtils.constructHashRedisKey(
                  jobDesc.getQueueName(), jobDesc.getShardName(), jobDesc.getLocalId());
              conn.del(hashRedisKey);
              return null;
            }
          });
      return null;
    }
  });
}
 
Example #6
Source File: BKLogReadHandler.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
/**
 * Elective stream lock--readers are not required to acquire the lock before using the stream.
 */
synchronized Future<Void> lockStream() {
    if (null == lockAcquireFuture) {
        final Function0<DistributedLock> lockFunction =  new ExceptionalFunction0<DistributedLock>() {
            @Override
            public DistributedLock applyE() throws IOException {
                // Unfortunately this has a blocking call which we should not execute on the
                // ZK completion thread
                BKLogReadHandler.this.readLock = new ZKDistributedLock(
                        lockStateExecutor,
                        lockFactory,
                        readLockPath,
                        conf.getLockTimeoutMilliSeconds(),
                        statsLogger.scope("read_lock"));

                LOG.info("acquiring readlock {} at {}", getLockClientId(), readLockPath);
                return BKLogReadHandler.this.readLock;
            }
        };
        lockAcquireFuture = ensureReadLockPathExist().flatMap(new ExceptionalFunction<Void, Future<Void>>() {
            @Override
            public Future<Void> applyE(Void in) throws Throwable {
                return scheduler.apply(lockFunction).flatMap(new ExceptionalFunction<DistributedLock, Future<Void>>() {
                    @Override
                    public Future<Void> applyE(DistributedLock lock) throws IOException {
                        return acquireLockOnExecutorThread(lock);
                    }
                });
            }
        });
    }
    return lockAcquireFuture;
}
 
Example #7
Source File: BKDistributedLogManager.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
<T> Future<T> processReaderOperation(final Function<BKLogReadHandler, Future<T>> func) {
    initializeFuturePool(false);
    return readerFuturePool.apply(new ExceptionalFunction0<BKLogReadHandler>() {
        @Override
        public BKLogReadHandler applyE() throws Throwable {
            return getReadHandlerForListener(true);
        }
    }).flatMap(new ExceptionalFunction<BKLogReadHandler, Future<T>>() {
        @Override
        public Future<T> applyE(final BKLogReadHandler readHandler) throws Throwable {
            return func.apply(readHandler);
        }
    });
}
 
Example #8
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
public Future<Void> createQueue(final String name) {
  return futurePool.apply(new ExceptionalFunction0<Void>() {
    @Override
    public Void applyE() throws Throwable {
      createQueueImpl(name);
      return null;
    }
  });
}
 
Example #9
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
/**
 * Identical to deleteQueue method above, but with no password and intended for testing use only.
 */
@VisibleForTesting
public Future<Void> deleteQueue(final String name) {
  return futurePool.apply(new ExceptionalFunction0<Void>() {
    @Override
    public Void applyE() throws Throwable {
      deleteQueueImpl(name);
      return null;
    }
  });
}
 
Example #10
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
public Future<Integer> getJobCount(final PinLaterGetJobCountRequest request) {
  // If no priority is specified, search for jobs of all priorities.
  Range<Integer> priorityRange = request.isSetPriority()
                                 ? Range.closed((int) request.getPriority(),
      (int) request.getPriority()) :
                                 Range.closed(1, numPriorityLevels);
  final ContiguousSet<Integer> priorities =
      ContiguousSet.create(priorityRange, DiscreteDomain.integers());

  // Execute count query on each shard in parallel.
  List<Future<Integer>> futures = Lists.newArrayListWithCapacity(getShards().size());
  for (final String shardName : getShards()) {
    futures.add(futurePool.apply(new ExceptionalFunction0<Integer>() {
      @Override
      public Integer applyE() throws Throwable {
        return getJobCountFromShard(
            request.getQueueName(),
            shardName,
            priorities,
            request.getJobState(),
            request.isCountFutureJobs(),
            request.getBodyRegexToMatch());
      }
    }));
  }

  return Future.collect(futures).map(
      new Function<List<Integer>, Integer>() {
        @Override
        public Integer apply(List<Integer> shardCounts) {
          int totalCount = 0;
          for (Integer shardCount : shardCounts) {
            totalCount += shardCount;
          }
          return totalCount;
        }
      });
}
 
Example #11
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
public Future<PinLaterDequeueResponse> dequeueJobs(final String source,
                                                   final PinLaterDequeueRequest request) {
  Future<PinLaterDequeueResponse> dequeueFuture;
  try {
    dequeueFuture = dequeueSemaphoreMap.get(request.getQueueName()).acquire().flatMap(
        new Function<Permit, Future<PinLaterDequeueResponse>>() {
          @Override
          public Future<PinLaterDequeueResponse> apply(final Permit permit) {
            return futurePool.apply(new ExceptionalFunction0<PinLaterDequeueResponse>() {
              @Override
              public PinLaterDequeueResponse applyE() throws Throwable {
                return dequeueJobsImpl(source, request, numAutoRetries);
              }
            }).respond(new Function<Try<PinLaterDequeueResponse>, BoxedUnit>() {
              @Override
              public BoxedUnit apply(Try<PinLaterDequeueResponse> responseTry) {
                permit.release();
                return BoxedUnit.UNIT;
              }
            });
          }
        });
  } catch (ExecutionException e) {
    // The dequeueSemaphoreMap's get() can in theory throw an ExecutionException, but we
    // never expect it in practice since our load method is simply new'ing up an AsyncSemaphore.
    dequeueFuture = Future.exception(e);
  }

  // Dequeue requests can contain ack requests as payloads. If so, we execute both in parallel.
  Future<Void> ackFuture = request.isSetJobAckRequest()
                           ? ackDequeuedJobsImpl(request.getJobAckRequest()) : Future.Void();

  return dequeueFuture.join(ackFuture).map(
      new Function<Tuple2<PinLaterDequeueResponse, Void>, PinLaterDequeueResponse>() {
        @Override
        public PinLaterDequeueResponse apply(Tuple2<PinLaterDequeueResponse, Void> tuple) {
          return tuple._1();
        }
      });
}
 
Example #12
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
public Future<Map<String, PinLaterJobInfo>> lookupJobs(final PinLaterLookupJobRequest request) {
  List<Future<Pair<String, PinLaterJobInfo>>> lookupJobFutures =
      Lists.newArrayListWithCapacity(request.getJobDescriptorsSize());
  for (final String jobDescriptor : request.getJobDescriptors()) {
    Future<Pair<String, PinLaterJobInfo>> lookupJobFuture = futurePool.apply(
        new ExceptionalFunction0<Pair<String, PinLaterJobInfo>>() {
          @Override
          public Pair<String, PinLaterJobInfo> applyE() throws Throwable {
            PinLaterJobDescriptor jobDesc = new PinLaterJobDescriptor(jobDescriptor);
            PinLaterJobInfo jobInfo = lookupJobFromShard(
                jobDesc.getQueueName(),
                jobDesc.getShardName(),
                jobDesc.getPriority(),
                jobDesc.getLocalId(),
                request.isIncludeBody());
            return new Pair<String, PinLaterJobInfo>(jobDescriptor, jobInfo);
          }
        });
    lookupJobFutures.add(lookupJobFuture);
  }

  return Future.collect(lookupJobFutures).map(
      new Function<List<Pair<String, PinLaterJobInfo>>, Map<String, PinLaterJobInfo>>() {
        @Override
        public Map<String, PinLaterJobInfo> apply(List<Pair<String, PinLaterJobInfo>> jobPairs) {
          Map<String, PinLaterJobInfo> lookupJobMap = Maps.newHashMap();
          for (Pair<String, PinLaterJobInfo> jobPair : jobPairs) {
            if (jobPair.getSecond() != null) {
              lookupJobMap.put(jobPair.getFirst(), jobPair.getSecond());
            }
          }
          return lookupJobMap;
        }
      });
}
 
Example #13
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
public Future<Void> checkpointJobs(final String source,
                                   final PinLaterCheckpointJobsRequest request) {
  // Partition the requests such that there are roughly <queryParallelism> partitions. Then
  // execute those in parallel. Within each partition, each checkpoint is executed serially.
  List<Future<Void>> futures = Lists.newArrayList();
  if (request.getRequestsSize() > 0) {
    futures.addAll(PinLaterBackendUtils.executePartitioned(
        request.getRequests(),
        queryParallelism,
        new Function<List<PinLaterCheckpointJobRequest>, Future<Void>>() {
          @Override
          public Future<Void> apply(final List<PinLaterCheckpointJobRequest> checkpointRequests) {
            return futurePool.apply(new ExceptionalFunction0<Void>() {
              @Override
              public Void applyE() throws Throwable {
                for (PinLaterCheckpointJobRequest checkpointRequest : checkpointRequests) {
                  checkpointSingleJob(source, request.getQueueName(), checkpointRequest,
                      numAutoRetries);
                }
                return null;
              }
            });
          }
        }));
  }
  return Future.collect(futures).voided();
}
 
Example #14
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
public Future<Set<String>> getQueueNames() {
  return futurePool.apply(new ExceptionalFunction0<Set<String>>() {
    @Override
    public Set<String> applyE() throws Throwable {
      return getQueueNamesImpl();
    }
  });
}
 
Example #15
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 4 votes vote down vote up
public Future<PinLaterScanJobsResponse> scanJobs(final PinLaterScanJobsRequest request) {
  // Validate continuation token. CONTINUATION_START is the only supported token right now.
  if (!request.getContinuation().equals(Constants.CONTINUATION_START)) {
    return Future.exception(new PinLaterException(ErrorCode.CONTINUATION_INVALID,
        "CONTINUATION_START is the only continuation token supported right now."));
  }

  // If no priority is specified, search for jobs of all priorities.
  Range<Integer> priorityRange = request.isSetPriority()
                                 ? Range.closed((int) request.getPriority(),
      (int) request.getPriority()) :
                                 Range.closed(1, numPriorityLevels);
  final ContiguousSet<Integer> priorities =
      ContiguousSet.create(priorityRange, DiscreteDomain.integers());

  // Execute scanJobs query on each shard in parallel.
  List<Future<List<PinLaterJobInfo>>> futures =
      Lists.newArrayListWithCapacity(getShards().size());
  for (final String shardName : getShards()) {
    futures.add(futurePool.apply(new ExceptionalFunction0<List<PinLaterJobInfo>>() {
      @Override
      public List<PinLaterJobInfo> applyE() throws Throwable {
        return scanJobsFromShard(
            request.getQueueName(),
            shardName,
            priorities,
            request.getJobState(),
            request.isScanFutureJobs(),
            request.getContinuation(),
            request.getLimit(),
            request.getBodyRegexToMatch());
      }
    }));
  }

  // Perform a merge, and then truncate at the requested limit.
  return Future.collect(futures).map(
      new Function<List<List<PinLaterJobInfo>>, PinLaterScanJobsResponse>() {
        @Override
        public PinLaterScanJobsResponse apply(List<List<PinLaterJobInfo>> shardLists) {
          // First grab all of the lists of job info and perform a merge on them.
          List<PinLaterJobInfo> mergedList = PinLaterBackendUtils.mergeIntoList(
              shardLists,
              PinLaterBackendUtils.JobInfoComparator.getInstance(),
              request.getLimit());

          // If we were to support continuation we would need to create and set the token here.
          // Right now, we just leave it as the default: CONTINUATION_END.
          return new PinLaterScanJobsResponse(mergedList);
        }
      });
}