com.twitter.util.ExecutorServiceFuturePool Java Examples

The following examples show how to use com.twitter.util.ExecutorServiceFuturePool. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TerrapinControllerServiceImpl.java    From terrapin with Apache License 2.0 6 votes vote down vote up
public TerrapinControllerServiceImpl(PropertiesConfiguration configuration,
                                     ZooKeeperManager zkManager,
                                     DFSClient hdfsClient,
                                     HelixAdmin helixAdmin,
                                     String clusterName) {
  this.configuration = configuration;
  this.zkManager = zkManager;
  this.hdfsClient = hdfsClient;
  this.helixAdmin = helixAdmin;
  this.clusterName = clusterName;

  ExecutorService threadPool = new ThreadPoolExecutor(100,
      100,
      0,
      TimeUnit.SECONDS,
      new LinkedBlockingDeque<Runnable>(1000),
      new ThreadFactoryBuilder().setDaemon(false)
                    .setNameFormat("controller-pool-%d")
                    .build());
 this.futurePool = new ExecutorServiceFuturePool(threadPool);
}
 
Example #2
Source File: OrderedScheduler.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private OrderedScheduler(String name,
                         int corePoolSize,
                         ThreadFactory threadFactory,
                         boolean traceTaskExecution,
                         long traceTaskExecutionWarnTimeUs,
                         StatsLogger statsLogger,
                         StatsLogger perExecutorStatsLogger) {
    this.name = name;
    this.corePoolSize = corePoolSize;
    this.executors = new MonitoredScheduledThreadPoolExecutor[corePoolSize];
    this.futurePools = new MonitoredFuturePool[corePoolSize];
    for (int i = 0; i < corePoolSize; i++) {
        ThreadFactory tf = new ThreadFactoryBuilder()
                .setNameFormat(name + "-executor-" + i + "-%d")
                .setThreadFactory(threadFactory)
                .build();
        StatsLogger broadcastStatsLogger =
                BroadCastStatsLogger.masterslave(perExecutorStatsLogger.scope("executor-" + i), statsLogger);
        executors[i] = new MonitoredScheduledThreadPoolExecutor(
                1, tf, broadcastStatsLogger, traceTaskExecution);
        futurePools[i] = new MonitoredFuturePool(
                new ExecutorServiceFuturePool(executors[i]),
                broadcastStatsLogger.scope("futurepool"),
                traceTaskExecution,
                traceTaskExecutionWarnTimeUs);
    }
    this.random = new Random(System.currentTimeMillis());
}
 
Example #3
Source File: BKDistributedLogManager.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private FuturePool buildFuturePool(ExecutorService executorService,
                                   StatsLogger statsLogger) {
    FuturePool futurePool = new ExecutorServiceFuturePool(executorService);
    return new MonitoredFuturePool(
            futurePool,
            statsLogger,
            conf.getEnableTaskExecutionStats(),
            conf.getTaskExecutionWarnTimeMicros());
}
 
Example #4
Source File: TerrapinClient.java    From terrapin with Apache License 2.0 5 votes vote down vote up
private void init(FileSetViewManager fileSetViewManager,
                  String clusterName,
                  int targetPort,
                  int connectTimeoutMs,
                  int timeoutMs) throws Exception {
  this.statsPrefix = "terrapin-client-" + clusterName + "-";
  this.fileSetViewManager = fileSetViewManager;
  this.thriftClientCache = CacheBuilder.newBuilder()
      .maximumSize(5000)
      .expireAfterAccess(60, TimeUnit.MINUTES)
      .removalListener(new RemovalListener<String,
          Pair<Service<ThriftClientRequest, byte[]>,
               TerrapinServerInternal.ServiceIface>>() {
        @Override
        public void onRemoval(RemovalNotification<String,
            Pair<Service<ThriftClientRequest, byte[]>, TerrapinServerInternal.ServiceIface>>
                removalNotification) {
          removalNotification.getValue().getLeft().release();
          LOG.info("Closing client connections to " + removalNotification.getKey());
        }
      }).build();
  this.targetPort = targetPort;
  this.connectTimeoutMs = connectTimeoutMs;
  this.timeoutMs = timeoutMs;
  this.connectionfuturePool = new ExecutorServiceFuturePool(
      Executors.newFixedThreadPool(10));
}
 
Example #5
Source File: ReaderFactory.java    From terrapin with Apache License 2.0 5 votes vote down vote up
public ReaderFactory(PropertiesConfiguration configuration, FileSystem hadoopFs) {
  this.configuration = configuration;
  this.hadoopFs = hadoopFs;
  int numReaderThreads = this.configuration.getInt(Constants.READER_THREAD_POOL_SIZE, 200);
  ExecutorService threadPool = new ThreadPoolExecutor(numReaderThreads,
                             numReaderThreads,
                             0,
                             TimeUnit.SECONDS,
                             new LinkedBlockingDeque<Runnable>(10000),
                             new ThreadFactoryBuilder().setDaemon(false)
                                 .setNameFormat("reader-pool-%d")
                                 .build());
  this.readerFuturePool = new ExecutorServiceFuturePool(threadPool);
}
 
Example #6
Source File: HFileReaderTest.java    From terrapin with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  int randomNum = (int) (Math.random() * Integer.MAX_VALUE);
  hfilePath = "/tmp/hfile-" + randomNum;
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  keyValueMap = Maps.newHashMapWithExpectedSize(10000);
  errorKeys = Sets.newHashSetWithExpectedSize(2000);
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, new CacheConfig(conf),
      fs, 4096).
      withFilePath(new Path(hfilePath)).
      withCompression(Compression.Algorithm.NONE).
      build();
  // Add upto 10K values.
  for (int i = 0; i < 10000; i++) {
    byte[] key = String.format("%04d", i).getBytes();
    byte[] value = null;
    // Add a couple of empty values for testing and making sure we return them.
    if (i <= 1) {
      value = "".getBytes();
    } else {
      value = ("v" + (i + 1)).getBytes();
    }
    KeyValue kv = new KeyValue(key,
        Bytes.toBytes("cf"),
        Bytes.toBytes(""),
        value);
    writer.append(kv);
    keyValueMap.put(ByteBuffer.wrap(key), ByteBuffer.wrap(value));
    if (i >= 4000 && i < 6000) {
      errorKeys.add(ByteBuffer.wrap(key));
    }
  }
  writer.close();
  hfileReader = new TestHFileReader(fs,
      hfilePath,
      new CacheConfig(conf),
      new ExecutorServiceFuturePool(Executors.newFixedThreadPool(1)),
      errorKeys);
}
 
Example #7
Source File: PinLaterBackendBase.java    From pinlater with Apache License 2.0 5 votes vote down vote up
protected void initialize() throws Exception {
  if (this.shardConfigFilePath != null) {
    String fullFilePath = getClass().getResource("/" + shardConfigFilePath).getPath();
    ConfigFileWatcher.defaultInstance().addWatch(
        fullFilePath, new ExceptionalFunction<byte[], Void>() {
      @Override
      public synchronized Void applyE(byte[] bytes) throws Exception {
        processConfigUpdate(bytes);
        return null;
      }
    });
  }

  // Initialize the future pool we will use to make blocking calls to Redis.
  // We size the future pool such that there is one thread for every available connection.
  int futurePoolSize = configuration.getInt("BACKEND_CONNECTIONS_PER_SHARD") * getShards().size();
  this.futurePool = new ExecutorServiceFuturePool(Executors.newFixedThreadPool(
      futurePoolSize,
      new ThreadFactoryBuilder().setDaemon(true).setNameFormat(
          backendName + "FuturePool-%d").build()));

  // Create a map of queueName -> aync semaphore to control dequeue concurrency.
  // We configure the map to create entries on demand since queues can be created at any time.
  final int dequeueConcurrencyPerQueue =
      configuration.getInt("BACKEND_DEQUEUE_CONCURRENCY_PER_QUEUE_PER_SHARD") * getShards()
          .size();
  // We set maxWaiters on the async semaphore to the max concurrency on the server as an
  // additional safety measure.
  final int maxWaiters = configuration.getInt("MAX_CONCURRENT_REQUESTS");
  this.dequeueSemaphoreMap = CacheBuilder.newBuilder().build(
      new CacheLoader<String, AsyncSemaphore>() {
        @Override
        public AsyncSemaphore load(String queueName) throws Exception {
          AsyncSemaphore asyncSemaphore =
              new AsyncSemaphore(dequeueConcurrencyPerQueue, maxWaiters);
          Stats.setGauge("dequeue-semaphore-waiters-" + queueName, asyncSemaphore.numWaiters());
          return asyncSemaphore;
        }
      });
}
 
Example #8
Source File: TestSafeQueueingFuturePool.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
TestFuturePool() {
    executor = Executors.newScheduledThreadPool(1);
    pool = new ExecutorServiceFuturePool(executor);
    wrapper = new SafeQueueingFuturePool<T>(pool);
}