Java Code Examples for org.elasticsearch.threadpool.ThreadPool#schedule()

The following examples show how to use org.elasticsearch.threadpool.ThreadPool#schedule() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TranslogService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Inject
public TranslogService(ShardId shardId, IndexSettingsService indexSettingsService, ThreadPool threadPool, IndexShard indexShard) {
    super(shardId, indexSettingsService.getSettings());
    this.threadPool = threadPool;
    this.indexSettingsService = indexSettingsService;
    this.indexShard = indexShard;
    this.flushThresholdOperations = indexSettings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, indexSettings.getAsInt("index.translog.flush_threshold", 50000));
    this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(100, ByteSizeUnit.MB));
    this.flushThresholdPeriod = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TimeValue.timeValueMinutes(10));
    this.interval = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_INTERVAL, timeValueMillis(5000));
    this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
    logger.debug("interval [{}], flush_threshold_ops [{}], flush_threshold_size [{}], flush_threshold_period [{}]", interval, flushThresholdOperations, flushThresholdSize, flushThresholdPeriod);

    this.future = threadPool.schedule(interval, ThreadPool.Names.SAME, new TranslogBasedFlush());

    indexSettingsService.addListener(applySettings);
}
 
Example 2
Source File: Translog.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    // don't re-schedule  if its closed..., we are done
    if (closed.get()) {
        return;
    }
    final ThreadPool threadPool = config.getThreadPool();
    if (syncNeeded()) {
        threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() {
            @Override
            public void run() {
                try {
                    sync();
                } catch (Exception e) {
                    logger.warn("failed to sync translog", e);
                }
                if (closed.get() == false) {
                    syncScheduler = threadPool.schedule(config.getSyncInterval(), ThreadPool.Names.SAME, Sync.this);
                }
            }
        });
    } else {
        syncScheduler = threadPool.schedule(config.getSyncInterval(), ThreadPool.Names.SAME, Sync.this);
    }
}
 
Example 3
Source File: TransportClientNodesService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
                                   ThreadPool threadPool, Headers headers, Version version) {
    super(settings);
    this.clusterName = clusterName;
    this.transportService = transportService;
    this.threadPool = threadPool;
    this.minCompatibilityVersion = version.minimumCompatibilityVersion();
    this.headers = headers;

    this.nodesSamplerInterval = this.settings.getAsTime("client.transport.nodes_sampler_interval", timeValueSeconds(5));
    this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
    this.ignoreClusterName = this.settings.getAsBoolean("client.transport.ignore_cluster_name", false);

    if (logger.isDebugEnabled()) {
        logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");
    }

    if (this.settings.getAsBoolean("client.transport.sniff", false)) {
        this.nodesSampler = new SniffNodesSampler();
    } else {
        this.nodesSampler = new SimpleNodeSampler();
    }
    this.nodesSamplerFuture = threadPool.schedule(nodesSamplerInterval, ThreadPool.Names.GENERIC, new ScheduledNodeSampler());
}
 
Example 4
Source File: InternalClusterService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
    this.ackedTaskListener = ackedTaskListener;
    this.clusterStateVersion = clusterStateVersion;
    this.nodes = nodes;
    int countDown = 0;
    for (DiscoveryNode node : nodes) {
        if (ackedTaskListener.mustAck(node)) {
            countDown++;
        }
    }
    //we always wait for at least 1 node (the master)
    countDown = Math.max(1, countDown);
    logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
    this.countDown = new CountDown(countDown);
    this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
        @Override
        public void run() {
            onTimeout();
        }
    });
}
 
Example 5
Source File: IndicesFieldDataCache.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Inject
public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener, ThreadPool threadPool) {
    super(settings);
    this.threadPool = threadPool;
    this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
    final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1");
    final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes();
    CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.newBuilder()
            .removalListener(this);
    if (sizeInBytes > 0) {
        cacheBuilder.maximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
    }
    // defaults to 4, but this is a busy map for all indices, increase it a bit by default
    final int concurrencyLevel =  settings.getAsInt(FIELDDATA_CACHE_CONCURRENCY_LEVEL, 16);
    if (concurrencyLevel <= 0) {
        throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
    }
    cacheBuilder.concurrencyLevel(concurrencyLevel);

    logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes));
    cache = cacheBuilder.build();

    this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1));
    // Start thread that will manage cleaning the field data cache periodically
    threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME,
            new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));
}
 
Example 6
Source File: IndicesRequestCache.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Inject
public IndicesRequestCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) {
    super(settings);
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));

    String size = settings.get(INDICES_CACHE_QUERY_SIZE);
    if (size == null) {
        size = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE);
        if (size != null) {
            deprecationLogger.deprecated("The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE
                    + "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead");
        }
    }
    if (size == null) {
        // this cache can be very small yet still be very effective
        size = "1%";
    }
    this.size = size;

    this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
    // defaults to 4, but this is a busy map for all indices, increase it a bit by default
    this.concurrencyLevel =  settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16);
    if (concurrencyLevel <= 0) {
        throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
    }
    buildCache();

    this.reaper = new Reaper();
    threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
}
 
Example 7
Source File: ConnectionManager.java    From crate with Apache License 2.0 5 votes vote down vote up
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) {
    this.transport = transport;
    this.threadPool = threadPool;
    this.pingSchedule = TransportSettings.PING_SCHEDULE.get(settings);
    this.defaultProfile = defaultProfile;
    this.lifecycle.moveToStarted();

    if (pingSchedule.millis() > 0) {
        threadPool.schedule(new ScheduledPing(), pingSchedule, ThreadPool.Names.GENERIC);
    }
}
 
Example 8
Source File: NettyTransport.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Inject
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) {
    super(settings);
    this.threadPool = threadPool;
    this.networkService = networkService;
    this.bigArrays = bigArrays;
    this.version = version;

    if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
        System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
    }

    this.workerCount = settings.getAsInt(WORKER_COUNT, EsExecutors.boundedNumberOfProcessors(settings) * 2);
    this.blockingClient = settings.getAsBoolean("transport.netty.transport.tcp.blocking_client", settings.getAsBoolean(TCP_BLOCKING_CLIENT, settings.getAsBoolean(TCP_BLOCKING, false)));
    this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT)));
    this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null);
    this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1);
    this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false);

    this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2));
    this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3));
    this.connectionsPerNodeReg = this.settings.getAsInt("transport.netty.connections_per_node.reg", settings.getAsInt(CONNECTIONS_PER_NODE_REG, 6));
    this.connectionsPerNodeState = this.settings.getAsInt("transport.netty.connections_per_node.high", settings.getAsInt(CONNECTIONS_PER_NODE_STATE, 1));
    this.connectionsPerNodePing = this.settings.getAsInt("transport.netty.connections_per_node.ping", settings.getAsInt(CONNECTIONS_PER_NODE_PING, 1));

    // we want to have at least 1 for reg/state/ping
    if (this.connectionsPerNodeReg == 0) {
        throw new IllegalArgumentException("can't set [connection_per_node.reg] to 0");
    }
    if (this.connectionsPerNodePing == 0) {
        throw new IllegalArgumentException("can't set [connection_per_node.ping] to 0");
    }
    if (this.connectionsPerNodeState == 0) {
        throw new IllegalArgumentException("can't set [connection_per_node.state] to 0");
    }

    long defaultReceiverPredictor = 512 * 1024;
    if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
        // we can guess a better default...
        long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount);
        defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
    }

    // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
    this.receivePredictorMin = this.settings.getAsBytesSize("transport.netty.receive_predictor_min", this.settings.getAsBytesSize("transport.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
    this.receivePredictorMax = this.settings.getAsBytesSize("transport.netty.receive_predictor_max", this.settings.getAsBytesSize("transport.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
    if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
        receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
    } else {
        receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
    }

    this.scheduledPing = new ScheduledPing();
    this.pingSchedule = settings.getAsTime(PING_SCHEDULE, DEFAULT_PING_SCHEDULE);
    if (pingSchedule.millis() > 0) {
        threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing);
    }
    this.namedWriteableRegistry = namedWriteableRegistry;
}
 
Example 9
Source File: DeterministicTaskQueueTests.java    From crate with Apache License 2.0 4 votes vote down vote up
public void testThreadPoolSchedulesFutureTasks() {
    final DeterministicTaskQueue taskQueue = newTaskQueue();
    advanceToRandomTime(taskQueue);
    final long startTime = taskQueue.getCurrentTimeMillis();

    final List<String> strings = new ArrayList<>(5);

    final ThreadPool threadPool = taskQueue.getThreadPool();
    final long delayMillis = randomLongBetween(1, 100);

    threadPool.schedule(() -> strings.add("deferred"), TimeValue.timeValueMillis(delayMillis), GENERIC);
    assertFalse(taskQueue.hasRunnableTasks());
    assertTrue(taskQueue.hasDeferredTasks());

    threadPool.schedule(() -> strings.add("runnable"), TimeValue.ZERO, GENERIC);
    assertTrue(taskQueue.hasRunnableTasks());

    threadPool.schedule(() -> strings.add("also runnable"), TimeValue.MINUS_ONE, GENERIC);

    taskQueue.runAllTasks();

    assertThat(taskQueue.getCurrentTimeMillis(), is(startTime + delayMillis));
    assertThat(strings, containsInAnyOrder("runnable", "also runnable", "deferred"));

    final long delayMillis1 = randomLongBetween(2, 100);
    final long delayMillis2 = randomLongBetween(1, delayMillis1 - 1);

    threadPool.schedule(() -> strings.add("further deferred"), TimeValue.timeValueMillis(delayMillis1), GENERIC);
    threadPool.schedule(() -> strings.add("not quite so deferred"), TimeValue.timeValueMillis(delayMillis2), GENERIC);

    assertFalse(taskQueue.hasRunnableTasks());
    assertTrue(taskQueue.hasDeferredTasks());

    taskQueue.runAllTasks();
    assertThat(taskQueue.getCurrentTimeMillis(), is(startTime + delayMillis + delayMillis1));

    final TimeValue cancelledDelay = TimeValue.timeValueMillis(randomLongBetween(1, 100));
    final Scheduler.Cancellable cancelledBeforeExecution =
        threadPool.schedule(() -> strings.add("cancelled before execution"), cancelledDelay, "");

    cancelledBeforeExecution.cancel();
    taskQueue.runAllTasks();

    assertThat(strings, containsInAnyOrder("runnable", "also runnable", "deferred", "not quite so deferred", "further deferred"));
}