Java Code Examples for java.util.concurrent.Executors#newScheduledThreadPool()

The following examples show how to use java.util.concurrent.Executors#newScheduledThreadPool() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Override
public void initialize(final EventStore eventStore) {
    this.eventStore = eventStore;
    directoryManager.initialize();

    maintenanceExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Provenance Repository Maintenance"));
    maintenanceExecutor.scheduleWithFixedDelay(() -> performMaintenance(), 1, 1, TimeUnit.MINUTES);
    maintenanceExecutor.scheduleWithFixedDelay(new RemoveExpiredQueryResults(), 30, 30, TimeUnit.SECONDS);

    cachedQueries.add(new LatestEventsQuery());
    cachedQueries.add(new LatestEventsPerProcessorQuery());

    final Optional<Integer> warmCacheMinutesOption = config.getWarmCacheFrequencyMinutes();
    if (warmCacheMinutesOption.isPresent() && warmCacheMinutesOption.get() > 0) {
        for (final File storageDir : config.getStorageDirectories().values()) {
            final int minutes = warmCacheMinutesOption.get();
            cacheWarmerExecutor.scheduleWithFixedDelay(new LuceneCacheWarmer(storageDir, indexManager), 1, minutes, TimeUnit.MINUTES);
        }
    }
}
 
Example 2
Source Project: pulsar   File: MongoSink.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void open(Map<String, Object> config, SinkContext sinkContext) throws Exception {
    log.info("Open MongoDB Sink");

    mongoConfig = MongoConfig.load(config);
    mongoConfig.validate(true, true);

    if (clientProvider != null) {
        mongoClient = clientProvider.get();
    } else {
        mongoClient = MongoClients.create(mongoConfig.getMongoUri());
    }

    final MongoDatabase db = mongoClient.getDatabase(mongoConfig.getDatabase());
    collection = db.getCollection(mongoConfig.getCollection());

    incomingList = Lists.newArrayList();
    flushExecutor = Executors.newScheduledThreadPool(1);
    flushExecutor.scheduleAtFixedRate(() -> flush(),
            mongoConfig.getBatchTimeMs(), mongoConfig.getBatchTimeMs(), TimeUnit.MILLISECONDS);
}
 
Example 3
/**
 * Create a new instance
 */
public DefaultAudioPlayerManager() {
  sourceManagers = new ArrayList<>();

  // Executors
  trackPlaybackExecutorService = new ThreadPoolExecutor(1, Integer.MAX_VALUE, 10, TimeUnit.SECONDS,
      new SynchronousQueue<>(), new DaemonThreadFactory("playback"));
  trackInfoExecutorService = ExecutorTools.createEagerlyScalingExecutor(1, DEFAULT_LOADER_POOL_SIZE,
      TimeUnit.SECONDS.toMillis(30), LOADER_QUEUE_CAPACITY, new DaemonThreadFactory("info-loader"));
  scheduledExecutorService = Executors.newScheduledThreadPool(1, new DaemonThreadFactory("manager"));
  orderedInfoExecutor = new OrderedExecutor(trackInfoExecutorService);

  // Configuration
  trackStuckThreshold = TimeUnit.MILLISECONDS.toNanos(10000);
  configuration = new AudioConfiguration();
  cleanupThreshold = new AtomicLong(DEFAULT_CLEANUP_THRESHOLD);
  frameBufferDuration = DEFAULT_FRAME_BUFFER_DURATION;
  useSeekGhosting = true;

  // Additional services
  remoteNodeManager = new RemoteNodeManager(this);
  garbageCollectionMonitor = new GarbageCollectionMonitor(scheduledExecutorService);
  lifecycleManager = new AudioPlayerLifecycleManager(scheduledExecutorService, cleanupThreshold);
  lifecycleManager.initialise();
}
 
Example 4
private void startTimer() {
    if (mScheduledExecutorService == null) {
        //定时任务
        mScheduledExecutorService = Executors.newScheduledThreadPool(2);
        // 循环任务,按照上一次任务的发起时间计算下一次任务的开始时间
        mScheduledExecutorService.scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {
                handler.post(new Runnable() {
                    @Override
                    public void run() {
                        updateTextSwitcher(flag);
                    }
                });
            }
        }, 0, timePeriod, TimeUnit.MILLISECONDS);
    }
}
 
Example 5
Source Project: emodb   File: S3ScanWriterTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testWriteWithCancel()
        throws Exception {
    URI baseUri = URI.create("s3://test-bucket/scan");
    ScheduledExecutorService uploadService = Executors.newScheduledThreadPool(2);

    try {
        PutObjectResult putObjectResult = new PutObjectResult();
        putObjectResult.setETag("dummy-etag");

        AmazonS3 amazonS3 = mock(AmazonS3.class);
        when(amazonS3.putObject(argThat(putsIntoBucket("test-bucket"))))
                .thenReturn(putObjectResult);

        AmazonS3Provider amazonS3Provider = mock(AmazonS3Provider.class);
        when(amazonS3Provider.getS3ClientForBucket("test-bucket")).thenReturn(amazonS3);

        S3ScanWriter scanWriter = new S3ScanWriter(1, baseUri, Optional.of(2), new MetricRegistry(), amazonS3Provider, uploadService, new ObjectMapper());

        ScanDestinationWriter scanDestinationWriters[] = new ScanDestinationWriter[2];

        for (int i = 0; i < 2; i++) {
            scanDestinationWriters[i] = scanWriter.writeShardRows("table" + i, "p0", 0, i);
            scanDestinationWriters[i].writeDocument(ImmutableMap.of("type", "review", "rating", i));
        }

        // Simulate canceling shardWriter[0] in response to a failure.
        scanDestinationWriters[0].closeAndCancel();
        // Close shardWriter[1] normally
        scanDestinationWriters[1].closeAndTransferAsync(Optional.of(1));

        verifyAllTransfersComplete(scanWriter, uploadService);
    } finally {
        uploadService.shutdownNow();
    }
}
 
Example 6
@Test
public void findExecutorByName() {
    ScheduledExecutorService expected = Executors.newScheduledThreadPool(4);
    Throwable exc = new NoUniqueBeanDefinitionException(ScheduledExecutorService.class);
    when(context.getBean(ScheduledExecutorService.class)).thenThrow(exc);
    when(context.getBean("taskScheduler", ScheduledExecutorService.class))
            .thenReturn(expected);
    assertEquals(expected, beanLocator.resolveScheduledExecutorService());
}
 
Example 7
Source Project: helix   File: TestNoDoubleAssign.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Fetch the JobContext for all jobs in ZK and check that no two tasks are running on the same
 * Participant.
 */
private void pollForDoubleAssign() {
  _executorServicePoll = Executors.newScheduledThreadPool(THREAD_COUNT);
  _executorServicePoll.scheduleAtFixedRate(() -> {
    if (!_existsDoubleAssign.get()) {
      // Get JobContexts and test that they are assigned to disparate Participants
      for (String job : _jobNames) {
        JobContext jobContext = _driver.getJobContext(job);
        if (jobContext == null) {
          continue;
        }
        Set<String> instanceCache = new HashSet<>();
        for (int partition : jobContext.getPartitionSet()) {
          if (jobContext.getPartitionState(partition) == TaskPartitionState.RUNNING) {
            String assignedParticipant = jobContext.getAssignedParticipant(partition);
            if (assignedParticipant != null) {
              if (instanceCache.contains(assignedParticipant)) {
                // Two tasks running on the same instance at the same time
                _existsDoubleAssign.set(true);
                return;
              }
              instanceCache.add(assignedParticipant);
            }
          }
        }
      }
    }
  }, 0L, POLL_DELAY, TimeUnit.MILLISECONDS);
}
 
Example 8
@Activate
public void activate(ComponentContext componentContext) {

    try {
        DataHolder.getInstance().setModifiedTime(LoggingUpdaterUtil.readModifiedTime());
        LogConfigUpdater logConfigUpdater =
                new LogConfigUpdater(DataHolder.getInstance().getConfigurationAdmin());
        ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1);
        DataHolder.getInstance().setScheduledExecutorService(scheduledExecutorService);
        scheduledExecutorService.scheduleAtFixedRate(logConfigUpdater, 5000L, 5000L, TimeUnit.MILLISECONDS);
    } catch (LoggingUpdaterException e) {
        log.error("Error while Activating LoggingUpdater component", e);
    }
}
 
Example 9
Source Project: nifi   File: LuceneEventIndex.java    License: Apache License 2.0 5 votes vote down vote up
private void triggerReindexOfDefunctIndices() {
    final ExecutorService rebuildIndexExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Rebuild Defunct Provenance Indices", true));
    final List<File> allIndexDirectories = directoryManager.getAllIndexDirectories(true, true);
    allIndexDirectories.sort(DirectoryUtils.OLDEST_INDEX_FIRST);
    final List<File> defunctIndices = detectDefunctIndices(allIndexDirectories);

    final AtomicInteger rebuildCount = new AtomicInteger(0);
    final int totalCount = defunctIndices.size();

    for (final File defunctIndex : defunctIndices) {
        try {
            if (isLucene4IndexPresent(defunctIndex)) {
                logger.info("Encountered Lucene 8 index {} and also the corresponding Lucene 4 index; will only trigger rebuilding of one directory.", defunctIndex);
                rebuildCount.incrementAndGet();
                continue;
            }

            logger.info("Determined that Lucene Index Directory {} is defunct. Will destroy and rebuild index", defunctIndex);

            final Tuple<Long, Long> timeRange = getTimeRange(defunctIndex, allIndexDirectories);
            rebuildIndexExecutor.submit(new MigrateDefunctIndex(defunctIndex, indexManager, directoryManager, timeRange.getKey(), timeRange.getValue(),
                eventStore, eventReporter, eventConverter, rebuildCount, totalCount));
        } catch (final Exception e) {
            logger.error("Detected defunct index {} but failed to rebuild index", defunctIndex, e);
        }
    }

    rebuildIndexExecutor.shutdown();

    if (!allIndexDirectories.isEmpty()) {
        final File newestIndexDirectory = allIndexDirectories.get(allIndexDirectories.size() - 1);
        if (defunctIndices.contains(newestIndexDirectory)) {
            newestIndexDefunct = true;
        }
    }
}
 
Example 10
public void connect(long timeout, boolean blocking) throws AWSIotException, AWSIotTimeoutException {
    synchronized (this) {
        if (executionService == null) {
            executionService = Executors.newScheduledThreadPool(numOfClientThreads);
        }
    }

    AwsIotCompletion completion = new AwsIotCompletion(timeout, !blocking);
    connection.connect(completion);
    completion.get(this);
}
 
Example 11
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
    cacheReplacementEnabled = Boolean.parseBoolean(configDao.getValue(Config.StorageCacheReplacementEnabled.key()));
    cacheReplaceMentInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementInterval.key()), 86400);
    workers = NumbersUtil.parseInt(configDao.getValue(Config.ExpungeWorkers.key()), 10);
    executors = Executors.newScheduledThreadPool(workers, new NamedThreadFactory("StorageCacheManager-cache-replacement"));
    return true;
}
 
Example 12
Source Project: DataLink   File: FileStreamKeeper.java    License: Apache License 2.0 5 votes vote down vote up
public static void start() {
    executorService = Executors.newScheduledThreadPool(1, new NamedThreadFactory("File-Stream-Holder"));
    executorService.scheduleAtFixedRate(
            FileStreamKeeper::check,
            CHECK_RATE,
            CHECK_RATE,
            TimeUnit.MILLISECONDS
    );
    LOGGER.info("File Stream Keeper is started.");
}
 
Example 13
private static void scheduleAtFixedRate() {
    ScheduledExecutorService executorService = Executors.newScheduledThreadPool(5);
    for (int i = 0; i < 100; i++) {
        executorService.scheduleAtFixedRate(new Runnable() {
            @Override
            public void run() {
                System.out.println(Thread.currentThread().getName() + " 执行");
            }
        }, 1, 1, TimeUnit.SECONDS);
    }
    executorService.shutdown();
}
 
Example 14
@Override
public void start() {
    if (!running) {
        producerThreadPool = Executors.newScheduledThreadPool(numThreads);
        running = true;
    }
}
 
Example 15
Source Project: reactor-core   File: ElasticScheduler.java    License: Apache License 2.0 5 votes vote down vote up
ElasticScheduler(ThreadFactory factory, int ttlSeconds) {
	if (ttlSeconds < 0) {
		throw new IllegalArgumentException("ttlSeconds must be positive, was: " + ttlSeconds);
	}
	this.ttlSeconds = ttlSeconds;
	this.factory = factory;
	this.cache = new ConcurrentLinkedDeque<>();
	this.all = new ConcurrentLinkedQueue<>();
	this.evictor = Executors.newScheduledThreadPool(1, EVICTOR_FACTORY);
	this.evictor.scheduleAtFixedRate(this::eviction,
			ttlSeconds,
			ttlSeconds,
			TimeUnit.SECONDS);
}
 
Example 16
Source Project: nifi   File: LuceneEventIndex.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void initialize(final EventStore eventStore) {
    this.eventStore = eventStore;
    directoryManager.initialize();

    maintenanceExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Provenance Repository Maintenance"));
    maintenanceExecutor.scheduleWithFixedDelay(this::performMaintenance, 1, 1, TimeUnit.MINUTES);
    maintenanceExecutor.scheduleWithFixedDelay(this::purgeObsoleteQueries, 30, 30, TimeUnit.SECONDS);

    cachedQueries.add(new LatestEventsQuery());
    cachedQueries.add(new LatestEventsPerProcessorQuery());

    triggerReindexOfDefunctIndices();
    triggerCacheWarming();
}
 
Example 17
Source Project: litchi   File: Schedule.java    License: Apache License 2.0 4 votes vote down vote up
public Schedule(int threadSize, String name) {
	executorService = Executors.newScheduledThreadPool(threadSize, new NamedThreadFactory(name));
}
 
Example 18
@BeforeClass
public static void beforeClass() {
    AsyncContext.autoEnable();
    executor = Executors.newScheduledThreadPool(4);
}
 
Example 19
Source Project: localization_nifi   File: NiFi.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Determine if the machine we're running on has timing issues.
 */
private void detectTimingIssues() {
    final int minRequiredOccurrences = 25;
    final int maxOccurrencesOutOfRange = 15;
    final AtomicLong lastTriggerMillis = new AtomicLong(System.currentTimeMillis());

    final ScheduledExecutorService service = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        private final ThreadFactory defaultFactory = Executors.defaultThreadFactory();

        @Override
        public Thread newThread(final Runnable r) {
            final Thread t = defaultFactory.newThread(r);
            t.setDaemon(true);
            t.setName("Detect Timing Issues");
            return t;
        }
    });

    final AtomicInteger occurrencesOutOfRange = new AtomicInteger(0);
    final AtomicInteger occurrences = new AtomicInteger(0);
    final Runnable command = new Runnable() {
        @Override
        public void run() {
            final long curMillis = System.currentTimeMillis();
            final long difference = curMillis - lastTriggerMillis.get();
            final long millisOff = Math.abs(difference - 2000L);
            occurrences.incrementAndGet();
            if (millisOff > 500L) {
                occurrencesOutOfRange.incrementAndGet();
            }
            lastTriggerMillis.set(curMillis);
        }
    };

    final ScheduledFuture<?> future = service.scheduleWithFixedDelay(command, 2000L, 2000L, TimeUnit.MILLISECONDS);

    final TimerTask timerTask = new TimerTask() {
        @Override
        public void run() {
            future.cancel(true);
            service.shutdownNow();

            if (occurrences.get() < minRequiredOccurrences || occurrencesOutOfRange.get() > maxOccurrencesOutOfRange) {
                LOGGER.warn("NiFi has detected that this box is not responding within the expected timing interval, which may cause "
                        + "Processors to be scheduled erratically. Please see the NiFi documentation for more information.");
            }
        }
    };
    final Timer timer = new Timer(true);
    timer.schedule(timerTask, 60000L);
}
 
Example 20
Source Project: reactor-core   File: SchedulersTest.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void scanSupportBuffered() throws InterruptedException {
	Executor plain = Runnable::run;
	ExecutorService plainService = Executors.newSingleThreadExecutor();

	ExecutorService threadPool = Executors.newFixedThreadPool(3);
	ScheduledExecutorService scheduledThreadPool = Executors.newScheduledThreadPool(4);

	DelegateServiceScheduler.UnsupportedScheduledExecutorService unsupportedScheduledExecutorService =
			new DelegateServiceScheduler.UnsupportedScheduledExecutorService(threadPool);

	try {
		assertThat(Schedulers.scanExecutor(plain, Scannable.Attr.BUFFERED))
				.as("plain").isEqualTo(null);
		assertThat(Schedulers.scanExecutor(plainService, Scannable.Attr.BUFFERED))
				.as("plainService").isEqualTo(null);

		scheduledThreadPool.schedule(() -> {}, 500, TimeUnit.MILLISECONDS);
		scheduledThreadPool.schedule(() -> {}, 500, TimeUnit.MILLISECONDS);
		Thread.sleep(50); //give some leeway for the pool to have consistent accounting

		assertThat(Schedulers.scanExecutor(scheduledThreadPool, Scannable.Attr.BUFFERED))
				.as("scheduledThreadPool").isEqualTo(2);

		threadPool.submit(() -> {
			try { Thread.sleep(200); } catch (InterruptedException e) { e.printStackTrace(); }
		});

		assertThat(Schedulers.scanExecutor(threadPool, Scannable.Attr.BUFFERED))
				.as("threadPool").isEqualTo(1);
		assertThat(Schedulers.scanExecutor(unsupportedScheduledExecutorService, Scannable.Attr.BUFFERED))
				.as("unwrapped").isEqualTo(1);

		Thread.sleep(400);

		assertThat(Schedulers.scanExecutor(unsupportedScheduledExecutorService, Scannable.Attr.BUFFERED))
				.as("unwrapped after task").isEqualTo(0);
	}
	finally {
		plainService.shutdownNow();
		unsupportedScheduledExecutorService.shutdownNow();
		threadPool.shutdownNow();
		scheduledThreadPool.shutdownNow();
	}
}