Java Code Examples for com.google.common.util.concurrent.ThreadFactoryBuilder

The following are top voted examples for showing how to use com.google.common.util.concurrent.ThreadFactoryBuilder. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop   File: ResourceLocalizationService.java   View source code 6 votes vote down vote up
public ResourceLocalizationService(Dispatcher dispatcher,
    ContainerExecutor exec, DeletionService delService,
    LocalDirsHandlerService dirsHandler, Context context) {

  super(ResourceLocalizationService.class.getName());
  this.exec = exec;
  this.dispatcher = dispatcher;
  this.delService = delService;
  this.dirsHandler = dirsHandler;

  this.cacheCleanup = new ScheduledThreadPoolExecutor(1,
      new ThreadFactoryBuilder()
        .setNameFormat("ResourceLocalizationService Cache Cleanup")
        .build());
  this.stateStore = context.getNMStateStore();
  this.nmContext = context;
}
 
Example 2
Project: hadoop   File: JobHistory.java   View source code 6 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  hsManager.start();
  if (storage instanceof Service) {
    ((Service) storage).start();
  }

  scheduledExecutor = new ScheduledThreadPoolExecutor(2,
      new ThreadFactoryBuilder().setNameFormat("Log Scanner/Cleaner #%d")
          .build());

  scheduledExecutor.scheduleAtFixedRate(new MoveIntermediateToDoneRunnable(),
      moveThreadInterval, moveThreadInterval, TimeUnit.MILLISECONDS);

  // Start historyCleaner
  scheduleHistoryCleaner();
  super.serviceStart();
}
 
Example 3
Project: hadoop   File: InMemorySCMStore.java   View source code 6 votes vote down vote up
/**
 * The in-memory store bootstraps itself from the shared cache entries that
 * exist in HDFS.
 */
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.startTime = System.currentTimeMillis();
  this.initialDelayMin = getInitialDelay(conf);
  this.checkPeriodMin = getCheckPeriod(conf);
  this.stalenessMinutes = getStalenessPeriod(conf);

  bootstrap(conf);

  ThreadFactory tf =
      new ThreadFactoryBuilder().setNameFormat("InMemorySCMStore")
          .build();
  scheduler = Executors.newSingleThreadScheduledExecutor(tf);

  super.serviceInit(conf);
}
 
Example 4
Project: AppCoins-ethereumj   File: PeerConnectionTester.java   View source code 6 votes vote down vote up
@Autowired
public PeerConnectionTester(final SystemProperties config) {
    this.config = config;
    ConnectThreads = config.peerDiscoveryWorkers();
    ReconnectPeriod = config.peerDiscoveryTouchPeriod() * 1000;
    ReconnectMaxPeers = config.peerDiscoveryTouchMaxNodes();
    peerConnectionPool = new ThreadPoolExecutor(ConnectThreads,
            ConnectThreads, 0L, TimeUnit.SECONDS,
            new MutablePriorityQueue<Runnable, ConnectTask>(new Comparator<ConnectTask>() {
                @Override
                public int compare(ConnectTask h1, ConnectTask h2) {
                    return h2.nodeHandler.getNodeStatistics().getReputation() -
                            h1.nodeHandler.getNodeStatistics().getReputation();
                }
            }), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("discovery-tester-%d").build());
}
 
Example 5
Project: Re-Collector   File: MemoryReporterService.java   View source code 6 votes vote down vote up
@Override
protected void doStart() {
    if (config.isEnable()) {
        this.scheduler = Executors.newSingleThreadScheduledExecutor(
                new ThreadFactoryBuilder()
                        .setDaemon(true)
                        .setNameFormat("memory-reporter-thread")
                        .setUncaughtExceptionHandler((t, e) ->
                                log.error("Problem in memory reporter", e))
                        .build());

        this.scheduledJob = this.scheduler.scheduleAtFixedRate(() -> {
            reportGC();
            reportMemoryPool();
            reportMemoryUsage();
        }, config.getInterval(), config.getInterval(), TimeUnit.MILLISECONDS);
    }

    notifyStarted();
}
 
Example 6
Project: Re-Collector   File: ChunkReaderScheduler.java   View source code 6 votes vote down vote up
public ChunkReaderScheduler(final Input input,
                            final ArrayBlockingQueue<FileChunk> chunkQueue,
                            final int readerBufferSize,
                            final long readerInterval,
                            final FileInput.InitialReadPosition initialReadPosition) {
    this.input = input;
    this.chunkQueue = chunkQueue;
    this.readerBufferSize = readerBufferSize;
    this.readerInterval = readerInterval;
    this.initialReadPosition = initialReadPosition;

    // TODO Make the thread size configurable.
    this.scheduler = Executors.newSingleThreadScheduledExecutor(
            new ThreadFactoryBuilder()
                    .setDaemon(false)
                    .setNameFormat("chunkreader-scheduler-thread-" + input.getId() + "-%d")
                    .build());
}
 
Example 7
Project: hadoop   File: KMSAudit.java   View source code 6 votes vote down vote up
/**
 * Create a new KMSAudit.
 *
 * @param windowMs Duplicate events within the aggregation window are quashed
 *                 to reduce log traffic. A single message for aggregated
 *                 events is printed at the end of the window, along with a
 *                 count of the number of aggregated events.
 */
KMSAudit(long windowMs) {
  cache = CacheBuilder.newBuilder()
      .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS)
      .removalListener(
          new RemovalListener<String, AuditEvent>() {
            @Override
            public void onRemoval(
                RemovalNotification<String, AuditEvent> entry) {
              AuditEvent event = entry.getValue();
              if (event.getAccessCount().get() > 0) {
                KMSAudit.this.logEvent(event);
                event.getAccessCount().set(0);
                KMSAudit.this.cache.put(entry.getKey(), event);
              }
            }
          }).build();
  executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
  executor.scheduleAtFixedRate(new Runnable() {
    @Override
    public void run() {
      cache.cleanUp();
    }
  }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS);
}
 
Example 8
Project: hadoop-oss   File: RENAudit.java   View source code 6 votes vote down vote up
/**
 * Create a new KMSAudit.
 *
 * @param windowMs Duplicate events within the aggregation window are quashed
 *                 to reduce log traffic. A single message for aggregated
 *                 events is printed at the end of the window, along with a
 *                 count of the number of aggregated events.
 */
RENAudit(long windowMs) {
  cache = CacheBuilder.newBuilder()
      .expireAfterWrite(windowMs, TimeUnit.MILLISECONDS)
      .removalListener(
          new RemovalListener<String, AuditEvent>() {
            @Override
            public void onRemoval(
                RemovalNotification<String, AuditEvent> entry) {
              AuditEvent event = entry.getValue();
              if (event.getAccessCount().get() > 0) {
                RENAudit.this.logEvent(event);
                event.getAccessCount().set(0);
                RENAudit.this.cache.put(entry.getKey(), event);
              }
            }
          }).build();
  executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat(REN_LOGGER_NAME + "_thread").build());
  executor.scheduleAtFixedRate(new Runnable() {
    @Override
    public void run() {
      cache.cleanUp();
    }
  }, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS);
}
 
Example 9
Project: hadoop   File: LocatedFileStatusFetcher.java   View source code 6 votes vote down vote up
/**
 * @param conf configuration for the job
 * @param dirs the initial list of paths
 * @param recursive whether to traverse the patchs recursively
 * @param inputFilter inputFilter to apply to the resulting paths
 * @param newApi whether using the mapred or mapreduce API
 * @throws InterruptedException
 * @throws IOException
 */
public LocatedFileStatusFetcher(Configuration conf, Path[] dirs,
    boolean recursive, PathFilter inputFilter, boolean newApi) throws InterruptedException,
    IOException {
  int numThreads = conf.getInt(FileInputFormat.LIST_STATUS_NUM_THREADS,
      FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
  rawExec = Executors.newFixedThreadPool(
      numThreads,
      new ThreadFactoryBuilder().setDaemon(true)
          .setNameFormat("GetFileInfo #%d").build());
  exec = MoreExecutors.listeningDecorator(rawExec);
  resultQueue = new LinkedBlockingQueue<List<FileStatus>>();
  this.conf = conf;
  this.inputDirs = dirs;
  this.recursive = recursive;
  this.inputFilter = inputFilter;
  this.newApi = newApi;
}
 
Example 10
Project: message-broker   File: MessagingEngine.java   View source code 6 votes vote down vote up
MessagingEngine(DataSource dataSource) throws BrokerException {
    DaoFactory daoFactory = new DaoFactory(dataSource);
    QueueDao queueDao = daoFactory.createQueueDao();
    ExchangeDao exchangeDao = daoFactory.createExchangeDao();
    BindingDao bindingDao = daoFactory.createBindingDao();
    MessageDao messageDao = daoFactory.createMessageDao();
    exchangeRegistry = new ExchangeRegistry(exchangeDao, bindingDao);
    // TODO: get the buffer sizes from configs
    sharedMessageStore = new SharedMessageStore(messageDao, 32768, 1024);
    queueRegistry = new QueueRegistry(queueDao, sharedMessageStore);
    exchangeRegistry.retrieveFromStore(queueRegistry);

    ThreadFactory threadFactory = new ThreadFactoryBuilder()
            .setNameFormat("MessageDeliveryTaskThreadPool-%d").build();
    deliveryTaskService = new TaskExecutorService<>(WORKER_COUNT, IDLE_TASK_DELAY_MILLIS, threadFactory);
    messageIdGenerator = new MessageIdGenerator();

    initDefaultDeadLetterQueue();
}
 
Example 11
Project: hadoop   File: SharedCacheUploadService.java   View source code 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  enabled = conf.getBoolean(YarnConfiguration.SHARED_CACHE_ENABLED,
      YarnConfiguration.DEFAULT_SHARED_CACHE_ENABLED);
  if (enabled) {
    int threadCount =
        conf.getInt(YarnConfiguration.SHARED_CACHE_NM_UPLOADER_THREAD_COUNT,
            YarnConfiguration.DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT);
    uploaderPool = Executors.newFixedThreadPool(threadCount,
        new ThreadFactoryBuilder().
          setNameFormat("Shared cache uploader #%d").
          build());
    scmClient = createSCMClient(conf);
    try {
      fs = FileSystem.get(conf);
      localFs = FileSystem.getLocal(conf);
    } catch (IOException e) {
      LOG.error("Unexpected exception in getting the filesystem", e);
      throw new RuntimeException(e);
    }
  }
  super.serviceInit(conf);
}
 
Example 12
Project: ditb   File: TBoundedThreadPoolServer.java   View source code 6 votes vote down vote up
public TBoundedThreadPoolServer(Args options, ThriftMetrics metrics) {
  super(options);

  if (options.maxQueuedRequests > 0) {
    this.callQueue = new CallQueue(
        new LinkedBlockingQueue<Call>(options.maxQueuedRequests), metrics);
  } else {
    this.callQueue = new CallQueue(new SynchronousQueue<Call>(), metrics);
  }

  ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
  tfb.setDaemon(true);
  tfb.setNameFormat("thrift-worker-%d");
  executorService =
      new ThreadPoolExecutor(options.minWorkerThreads,
          options.maxWorkerThreads, options.threadKeepAliveTimeSec,
          TimeUnit.SECONDS, this.callQueue, tfb.build());
  serverOptions = options;
}
 
Example 13
Project: dble   File: DbleServer.java   View source code 6 votes vote down vote up
private DbleServer() {
    this.config = new ServerConfig();
    scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setNameFormat("TimerScheduler-%d").build());

    /*
     * | offline | Change Server status to OFF |
     * | online | Change Server status to ON |
     */
    this.isOnline = new AtomicBoolean(true);


    // load data node active index from properties
    dnIndexProperties = DnPropertyUtil.loadDnIndexProps();

    this.startupTime = TimeUtil.currentTimeMillis();
    if (isUseZkSwitch()) {
        dnIndexLock = new InterProcessMutex(ZKUtils.getConnection(), KVPathUtil.getDnIndexLockPath());
    }
    xaSessionCheck = new XASessionCheck();
}
 
Example 14
Project: hadoop   File: DeletionService.java   View source code 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  ThreadFactory tf = new ThreadFactoryBuilder()
    .setNameFormat("DeletionService #%d")
    .build();
  if (conf != null) {
    sched = new DelServiceSchedThreadPoolExecutor(
        conf.getInt(YarnConfiguration.NM_DELETE_THREAD_COUNT,
        YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT), tf);
    debugDelay = conf.getInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0);
  } else {
    sched = new DelServiceSchedThreadPoolExecutor(
        YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT, tf);
  }
  sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
  sched.setKeepAliveTime(60L, SECONDS);
  if (stateStore.canRecover()) {
    recover(stateStore.loadDeletionServiceState());
  }
  super.serviceInit(conf);
}
 
Example 15
Project: ditb   File: HTableMultiplexer.java   View source code 6 votes vote down vote up
/**
 * @param conn The HBase connection.
 * @param conf The HBase configuration
 * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
 *          each region server before dropping the request.
 */
public HTableMultiplexer(Connection conn, Configuration conf,
    int perRegionServerBufferQueueSize) {
  this.conn = (ClusterConnection) conn;
  this.pool = HTable.getDefaultExecutor(conf);
  this.retryNum = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
  this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf);
  this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100);
  int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10);
  this.executor =
      Executors.newScheduledThreadPool(initThreads,
        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build());

  this.workerConf = HBaseConfiguration.create(conf);
  // We do not do the retry because we need to reassign puts to different queues if regions are
  // moved.
  this.workerConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
}
 
Example 16
Project: abhot   File: TelnetServer.java   View source code 6 votes vote down vote up
@Override
public void start() throws KairosDBException
{
	// Configure the server.
	serverBootstrap = new ServerBootstrap(
			new NioServerSocketChannelFactory(
					Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("telnet-boss-%d").build()),
					Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("telnet-worker-%d").build())));

	// Configure the pipeline factory.
	serverBootstrap.setPipelineFactory(this);
	serverBootstrap.setOption("child.tcpNoDelay", true);
	serverBootstrap.setOption("child.keepAlive", true);
	serverBootstrap.setOption("reuseAddress", true);

	// Bind and start to accept incoming connections.
	serverBootstrap.bind(new InetSocketAddress(address, port));
}
 
Example 17
Project: flume-release-1.7.0   File: PollingPropertiesFileConfigurationProvider.java   View source code 6 votes vote down vote up
@Override
public void start() {
  LOGGER.info("Configuration provider starting");

  Preconditions.checkState(file != null,
      "The parameter file must not be null");

  executorService = Executors.newSingleThreadScheduledExecutor(
          new ThreadFactoryBuilder().setNameFormat("conf-file-poller-%d")
              .build());

  FileWatcherRunnable fileWatcherRunnable =
      new FileWatcherRunnable(file, counterGroup);

  executorService.scheduleWithFixedDelay(fileWatcherRunnable, 0, interval,
      TimeUnit.SECONDS);

  lifecycleState = LifecycleState.START;

  LOGGER.debug("Configuration provider started");
}
 
Example 18
Project: hashsdn-controller   File: SingletonHolder.java   View source code 6 votes vote down vote up
/**
 * @deprecated This method is only used from configuration modules and thus callers of it
 *             should use service injection to make the executor configurable.
 */
@Deprecated
public static synchronized ListeningExecutorService getDefaultCommitExecutor() {
    if (COMMIT_EXECUTOR == null) {
        final ThreadFactory factory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("md-sal-binding-commit-%d").build();
        /*
         * FIXME: this used to be newCacheThreadPool(), but MD-SAL does not have transaction
         *        ordering guarantees, which means that using a concurrent threadpool results
         *        in application data being committed in random order, potentially resulting
         *        in inconsistent data being present. Once proper primitives are introduced,
         *        concurrency can be reintroduced.
         */
        final ExecutorService executor = Executors.newSingleThreadExecutor(factory);
        COMMIT_EXECUTOR = MoreExecutors.listeningDecorator(executor);
    }

    return COMMIT_EXECUTOR;
}
 
Example 19
Project: hadoop   File: LocalJobRunner.java   View source code 6 votes vote down vote up
/**
 * Creates the executor service used to run map tasks.
 *
 * @return an ExecutorService instance that handles map tasks
 */
protected synchronized ExecutorService createMapExecutor() {

  // Determine the size of the thread pool to use
  int maxMapThreads = job.getInt(LOCAL_MAX_MAPS, 1);
  if (maxMapThreads < 1) {
    throw new IllegalArgumentException(
        "Configured " + LOCAL_MAX_MAPS + " must be >= 1");
  }
  maxMapThreads = Math.min(maxMapThreads, this.numMapTasks);
  maxMapThreads = Math.max(maxMapThreads, 1); // In case of no tasks.

  LOG.debug("Starting mapper thread pool executor.");
  LOG.debug("Max local threads: " + maxMapThreads);
  LOG.debug("Map tasks to process: " + this.numMapTasks);

  // Create a new executor service to drain the work queue.
  ThreadFactory tf = new ThreadFactoryBuilder()
    .setNameFormat("LocalJobRunner Map Task Executor #%d")
    .build();
  ExecutorService executor = Executors.newFixedThreadPool(maxMapThreads, tf);

  return executor;
}
 
Example 20
Project: tikv-client-lib-java   File: PDClient.java   View source code 6 votes vote down vote up
private void initCluster() {
  GetMembersResponse resp = null;
  List<HostAndPort> pdAddrs = getSession().getConf().getPdAddrs();
  for(HostAndPort u: pdAddrs) {
    resp = getMembers(u);
    if(resp != null) {
      break;
    }
  }
  checkNotNull(resp, "Failed to init client for PD cluster.");
  long clusterId = resp.getHeader().getClusterId();
  header = RequestHeader.newBuilder().setClusterId(clusterId).build();
  tsoReq = TsoRequest.newBuilder().setHeader(header).build();
  this.pdAddrs = pdAddrs;
  createLeaderWrapper(resp.getLeader().getClientUrls(0));
  service = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setDaemon(true).build());
  service.scheduleAtFixedRate(this::updateLeader, 1, 1, TimeUnit.MINUTES);
}
 
Example 21
Project: happylifeplat-transaction   File: FixedThreadPoolHelper.java   View source code 5 votes vote down vote up
public ExecutorService getExecutorService() {
    final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("search-thread-%d")
            .setDaemon(true).build();
    return new ThreadPoolExecutor(DEFAULT_THREAD_MAX, DEFAULT_THREAD_MAX,
            0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<>(1024), threadFactory, new ThreadPoolExecutor.AbortPolicy());

}
 
Example 22
Project: hadoop   File: DecommissionManager.java   View source code 5 votes vote down vote up
DecommissionManager(final Namesystem namesystem,
    final BlockManager blockManager, final HeartbeatManager hbManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.hbManager = hbManager;

  executor = Executors.newScheduledThreadPool(1,
      new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d")
          .setDaemon(true).build());
  decomNodeBlocks = new TreeMap<>();
  pendingNodes = new LinkedList<>();
}
 
Example 23
Project: fastmq   File: SimpleThreadFactory.java   View source code 5 votes vote down vote up
public SimpleThreadFactory(String name) {
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder
        .setNameFormat(name)
        .setUncaughtExceptionHandler((t, e) -> logger.error("Uncaught exception of thread_" + t.toString(), e));
    this.threadFactory = builder.build();
}
 
Example 24
Project: tikv-client-lib-java   File: TiSession.java   View source code 5 votes vote down vote up
public ExecutorService getThreadPoolForTableScan() {
  ExecutorService res = tableScanThreadPool;
  if (res == null) {
    synchronized (this) {
      if (tableScanThreadPool == null) {
        tableScanThreadPool = Executors.newFixedThreadPool(
            conf.getTableScanConcurrency(),
            new ThreadFactoryBuilder().setDaemon(true).build());
      }
      res = tableScanThreadPool;
    }
  }
  return res;
}
 
Example 25
Project: ditb   File: RegionLocationFinder.java   View source code 5 votes vote down vote up
RegionLocationFinder() {
  this.cache = createCache();
  executor = MoreExecutors.listeningDecorator(
      Executors.newScheduledThreadPool(
          5,
          new ThreadFactoryBuilder().
              setDaemon(true)
              .setNameFormat("region-location-%d")
              .build()));
}
 
Example 26
Project: hadoop   File: ReadaheadPool.java   View source code 5 votes vote down vote up
private ReadaheadPool() {
  pool = new ThreadPoolExecutor(POOL_SIZE, MAX_POOL_SIZE, 3L, TimeUnit.SECONDS,
      new ArrayBlockingQueue<Runnable>(CAPACITY));
  pool.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardOldestPolicy());
  pool.setThreadFactory(new ThreadFactoryBuilder()
    .setDaemon(true)
    .setNameFormat("Readahead Thread #%d")
    .build());
}
 
Example 27
Project: hadoop   File: LocalContainerLauncher.java   View source code 5 votes vote down vote up
public void serviceStart() throws Exception {
  // create a single thread for serial execution of tasks
  // make it a daemon thread so that the process can exit even if the task is
  // not interruptible
  taskRunner =
      Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().
          setDaemon(true).setNameFormat("uber-SubtaskRunner").build());
  // create and start an event handling thread
  eventHandler = new Thread(new EventHandler(), "uber-EventHandler");
  eventHandler.start();
  super.serviceStart();
}
 
Example 28
Project: hadoop-oss   File: ValueQueue.java   View source code 5 votes vote down vote up
/**
 * Constructor takes the following tunable configuration parameters
 * @param numValues The number of values cached in the Queue for a
 *    particular key.
 * @param lowWatermark The ratio of (number of current entries/numValues)
 *    below which the <code>fillQueueForKey()</code> funciton will be
 *    invoked to fill the Queue.
 * @param expiry Expiry time after which the Key and associated Queue are
 *    evicted from the cache.
 * @param numFillerThreads Number of threads to use for the filler thread
 * @param policy The SyncGenerationPolicy to use when client
 *    calls "getAtMost"
 * @param refiller implementation of the QueueRefiller
 */
public ValueQueue(final int numValues, final float lowWatermark,
    long expiry, int numFillerThreads, SyncGenerationPolicy policy,
    final QueueRefiller<E> refiller) {
  Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0");
  Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)),
      "\"lowWatermark\" must be > 0 and <= 1");
  Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0");
  Preconditions.checkArgument(numFillerThreads > 0,
      "\"numFillerThreads\" must be > 0");
  Preconditions.checkNotNull(policy, "\"policy\" must not be null");
  this.refiller = refiller;
  this.policy = policy;
  this.numValues = numValues;
  this.lowWatermark = lowWatermark;
  keyQueues = CacheBuilder.newBuilder()
          .expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
          .build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
                @Override
                public LinkedBlockingQueue<E> load(String keyName)
                    throws Exception {
                  LinkedBlockingQueue<E> keyQueue =
                      new LinkedBlockingQueue<E>();
                  refiller.fillQueueForKey(keyName, keyQueue,
                      (int)(lowWatermark * numValues));
                  return keyQueue;
                }
              });

  executor =
      new ThreadPoolExecutor(numFillerThreads, numFillerThreads, 0L,
          TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder()
              .setDaemon(true)
              .setNameFormat(REFILL_THREAD).build());
}
 
Example 29
Project: hadoop-oss   File: Client.java   View source code 5 votes vote down vote up
/**
 * Get Executor on which IPC calls' parameters are sent.
 * If the internal reference counter is zero, this method
 * creates the instance of Executor. If not, this method
 * just returns the reference of clientExecutor.
 * 
 * @return An ExecutorService instance
 */
synchronized ExecutorService refAndGetInstance() {
  if (executorRefCount == 0) {
    clientExecutor = Executors.newCachedThreadPool(
        new ThreadFactoryBuilder()
        .setDaemon(true)
        .setNameFormat("IPC Parameter Sending Thread #%d")
        .build());
  }
  executorRefCount++;
  
  return clientExecutor;
}
 
Example 30
Project: hadoop-oss   File: ReadaheadPool.java   View source code 5 votes vote down vote up
private ReadaheadPool() {
  pool = new ThreadPoolExecutor(POOL_SIZE, MAX_POOL_SIZE, 3L, TimeUnit.SECONDS,
      new ArrayBlockingQueue<Runnable>(CAPACITY));
  pool.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardOldestPolicy());
  pool.setThreadFactory(new ThreadFactoryBuilder()
    .setDaemon(true)
    .setNameFormat("Readahead Thread #%d")
    .build());
}
 
Example 31
Project: tikv-client-lib-java   File: TiSession.java   View source code 5 votes vote down vote up
public ExecutorService getThreadPoolForIndexScan() {
  ExecutorService res = indexScanThreadPool;
  if (res == null) {
    synchronized (this) {
      if (indexScanThreadPool == null) {
        indexScanThreadPool = Executors.newFixedThreadPool(
            conf.getIndexScanConcurrency(),
            new ThreadFactoryBuilder().setDaemon(true).build());
      }
      res = indexScanThreadPool;
    }
  }
  return res;
}
 
Example 32
Project: open-kilda   File: Consumer.java   View source code 5 votes vote down vote up
@Override
public void run() {
	Properties kprops = new Properties();

	kprops.put("bootstrap.servers", kafka.url());
	kprops.put("group.id", groupId);
	// NB: only "true" is valid; no code to handle "false" yet.
	kprops.put("enable.auto.commit", "true");
	kprops.put("auto.commit.interval.ms", commitInterval);
	kprops.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
	kprops.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(kprops);

	consumer.subscribe(Arrays.asList(topicName));

	Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("heartbeat.consumer").build())
			.execute(new Runnable() {
				@Override
				public void run() {
					try {
						while (true) {
							logger.trace("==> Poll for records");
							ConsumerRecords<String, String> records = consumer.poll(1000);
							logger.trace("==> Number of records: " + records.count());
							// NB: if you want the beginning ..
							// consumer.seekToBeginning(records.partitions());
							for (ConsumerRecord<String, String> record : records)
								logger.debug("==> ==> offset = {}, key = {}, value = {}", record.offset(),
										record.key(), record.value());
							Thread.sleep(sleepTime);
						}
					} catch (InterruptedException e) {
						logger.info("Heartbeat Consumer Interrupted");
					} finally {
						consumer.close();
					}
				}
			});
}
 
Example 33
Project: drift   File: ConnectionPool.java   View source code 5 votes vote down vote up
public ConnectionPool(ConnectionManager connectionFactory, EventLoopGroup group, DriftNettyClientConfig config)
{
    this.connectionFactory = connectionFactory;
    this.group = requireNonNull(group, "group is null");
    requireNonNull(config, "config is null");

    // todo from config
    cachedConnections = CacheBuilder.newBuilder()
            .maximumSize(100)
            .expireAfterAccess(10, TimeUnit.MINUTES)
            .<HostAndPort, Future<Channel>>removalListener(notification -> closeConnection(notification.getValue()))
            .build(new CacheLoader<HostAndPort, Future<Channel>>()
            {
                @Override
                public Future<Channel> load(HostAndPort address)
                        throws Exception
                {
                    return createConnection(address);
                }
            });

    maintenanceThread = newSingleThreadScheduledExecutor(new ThreadFactoryBuilder()
            .setNameFormat("drift-connection-maintenance-%s")
            .setDaemon(true)
            .build());

    maintenanceThread.scheduleWithFixedDelay(cachedConnections::cleanUp, 1, 1, TimeUnit.SECONDS);
}
 
Example 34
Project: abhot   File: WriteBuffer.java   View source code 5 votes vote down vote up
public WriteBuffer(Keyspace keyspace, String cfName,
		int writeDelay, int maxWriteSize, Serializer<RowKeyType> keySerializer,
		Serializer<ColumnKeyType> columnKeySerializer,
		Serializer<ValueType> valueSerializer,
		WriteBufferStats stats,
		ReentrantLock mutatorLock,
		int threadCount,
		int jobQueueSize)
{
	m_executorService = new ThreadPoolExecutor(threadCount, threadCount,
			0L, TimeUnit.MILLISECONDS,
			new LinkedBlockingQueue<Runnable>(jobQueueSize),
			new ThreadFactoryBuilder().setNameFormat("WriteBuffer-"+cfName+"-%d").build());

	m_keyspace = keyspace;
	m_cfName = cfName;
	m_writeDelay = writeDelay;
	m_initialMaxBufferSize = m_maxBufferSize = maxWriteSize;
	m_rowKeySerializer = keySerializer;
	m_columnKeySerializer = columnKeySerializer;
	m_valueSerializer = valueSerializer;
	m_writeStats = stats;
	m_mutatorLock = mutatorLock;

	m_buffer = new ArrayList<>();
	m_mutator = new MutatorImpl<>(keyspace, keySerializer);
	m_writeThread = new Thread(this, "WriteBuffer Scheduler for "+cfName);
	m_writeThread.start();
}
 
Example 35
Project: ditb   File: MultiThreadedClientExample.java   View source code 5 votes vote down vote up
public MultiThreadedClientExample() throws IOException {
  // Base number of threads.
  // This represents the number of threads you application has
  // that can be interacting with an hbase client.
  this.threads = Runtime.getRuntime().availableProcessors() * 4;

  // Daemon threads are great for things that get shut down.
  ThreadFactory threadFactory = new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat("internal-pol-%d").build();


  this.internalPool = Executors.newFixedThreadPool(threads, threadFactory);
}
 
Example 36
Project: nges-sample-tic-tac-toe   File: EventStreams.java   View source code 5 votes vote down vote up
@PostConstruct
public void init() {
    gameListPool = Executors.newCachedThreadPool(
            new ThreadFactoryBuilder().setNameFormat("GameList-%d").setDaemon(true).build());
    singleGamePool = Executors.newCachedThreadPool(
            new ThreadFactoryBuilder().setNameFormat("SingleGame-%d").setDaemon(true).build());
}
 
Example 37
Project: datax   File: DBUtil.java   View source code 5 votes vote down vote up
@Override
protected ExecutorService initialValue() {
    return Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
            .setNameFormat("rsExecutors-%d")
            .setDaemon(true)
            .build());
}
 
Example 38
Project: message-broker   File: RdbmsCoordinationStrategy.java   View source code 5 votes vote down vote up
/**
 * Default constructor.
 *
 * @param rdbmsCoordinationDaoImpl       the RdbmsCoordinationDaoImpl to use for communication with the database
 * @param rdbmsCoordinationOptions       the configuration for RDBMS coordination
 */
public RdbmsCoordinationStrategy(RdbmsCoordinationDaoImpl rdbmsCoordinationDaoImpl,
                                 Map<String, String> rdbmsCoordinationOptions) {
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("RdbmsCoordinationStrategy-%d")
            .build();
    threadExecutor = Executors.newSingleThreadExecutor(namedThreadFactory);
    scheduledExecutorService = Executors.newSingleThreadScheduledExecutor();

    if (rdbmsCoordinationOptions.get(RdbmsCoordinationConstants.HEARTBEAT_INTERVAL) != null) {
        heartBeatInterval = Integer.parseInt(
                rdbmsCoordinationOptions.get(RdbmsCoordinationConstants.HEARTBEAT_INTERVAL));
    } else {
        heartBeatInterval = 5000;
    }
    if (rdbmsCoordinationOptions.get(RdbmsCoordinationConstants.COORDINATOR_ENTRY_CREATION_WAIT_TIME) != null) {
        coordinatorEntryCreationWaitTime = Integer.parseInt(
                rdbmsCoordinationOptions.get(RdbmsCoordinationConstants.COORDINATOR_ENTRY_CREATION_WAIT_TIME));
    } else {
        coordinatorEntryCreationWaitTime = 3000;
    }
    localNodeId = rdbmsCoordinationOptions.get(RdbmsCoordinationConstants.NODE_IDENTIFIER);
    if (localNodeId == null) {
        localNodeId = UUID.randomUUID().toString();
    }

    // Maximum age of a heartbeat. After this much of time, the heartbeat is considered invalid and node is
    // considered to have left the cluster.
    heartbeatMaxAge = heartBeatInterval * 2;

    if (heartBeatInterval <= coordinatorEntryCreationWaitTime) {
        throw new RuntimeException("Configuration error. " + heartBeatInterval + " * 2 should be greater than " +
                coordinatorEntryCreationWaitTime);
    }

    coordinationDao = rdbmsCoordinationDaoImpl;
}
 
Example 39
Project: ditb   File: RpcServer.java   View source code 5 votes vote down vote up
public Listener(final String name) throws IOException {
  super(name);
  backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
  // Create a new server socket and set to non blocking mode
  acceptChannel = ServerSocketChannel.open();
  acceptChannel.configureBlocking(false);

  // Bind the server socket to the binding addrees (can be different from the default interface)
  bind(acceptChannel.socket(), bindAddress, backlogLength);
  port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
  // create a selector;
  selector= Selector.open();

  readers = new Reader[readThreads];
  readPool = Executors.newFixedThreadPool(readThreads,
    new ThreadFactoryBuilder().setNameFormat(
      "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
      ",port=" + port).setDaemon(true).build());
  for (int i = 0; i < readThreads; ++i) {
    Reader reader = new Reader();
    readers[i] = reader;
    readPool.execute(reader);
  }
  LOG.info(getName() + ": started " + readThreads + " reader(s) listening on port=" + port);

  // Register accepts on the server socket with the selector.
  acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
  this.setName("RpcServer.listener,port=" + port);
  this.setDaemon(true);
}
 
Example 40
Project: bulbasaur   File: BulbasaurExecutorHelper.java   View source code 5 votes vote down vote up
@PostConstruct
public void init() {
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder()
        .setNameFormat("demo-pool-%d").build();

    //通用线程池
    executorService = new ThreadPoolExecutor(corePoolSize, maximumPoolSize,
        0L, TimeUnit.MILLISECONDS,
        new LinkedBlockingQueue<Runnable>(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy());
}