Java Code Examples for org.elasticsearch.threadpool.ThreadPool#executor()
The following examples show how to use
org.elasticsearch.threadpool.ThreadPool#executor() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ReferenceInfos.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Inject public ReferenceInfos(Map<String, SchemaInfo> builtInSchemas, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, ThreadPool threadPool, Provider<TransportPutIndexTemplateAction> transportPutIndexTemplateAction, Functions functions) { this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.transportPutIndexTemplateAction = transportPutIndexTemplateAction; this.functions = functions; this.executorService = (ExecutorService) threadPool.executor(ThreadPool.Names.SUGGEST); schemas.putAll(builtInSchemas); schemas.remove(BlobSchemaInfo.NAME); // remove blob schema name this.builtInSchemas = builtInSchemas; clusterService.add(this); }
Example 2
Source File: PhasesTaskFactory.java From crate with Apache License 2.0 | 6 votes |
@Inject public PhasesTaskFactory(ClusterService clusterService, ThreadPool threadPool, JobSetup jobSetup, TasksService tasksService, IndicesService indicesService, TransportJobAction jobAction, TransportKillJobsNodeAction killJobsNodeAction) { this.clusterService = clusterService; this.jobSetup = jobSetup; this.tasksService = tasksService; this.indicesService = indicesService; this.jobAction = jobAction; this.killJobsNodeAction = killJobsNodeAction; this.searchExecutor = threadPool.executor(ThreadPool.Names.SEARCH); }
Example 3
Source File: ShardCollectService.java From Elasticsearch with Apache License 2.0 | 5 votes |
private CrateCollector getLuceneIndexCollector(ThreadPool threadPool, final RoutedCollectPhase collectPhase, final ShardProjectorChain projectorChain, final JobCollectContext jobCollectContext) throws Exception { SharedShardContext sharedShardContext = jobCollectContext.sharedShardContexts().getOrCreateContext(shardId); Engine.Searcher searcher = sharedShardContext.searcher(); IndexShard indexShard = sharedShardContext.indexShard(); CrateSearchContext searchContext = null; try { searchContext = searchContextFactory.createContext( sharedShardContext.readerId(), indexShard, searcher, collectPhase.whereClause() ); jobCollectContext.addSearchContext(sharedShardContext.readerId(), searchContext); CollectInputSymbolVisitor.Context docCtx = docInputSymbolVisitor.extractImplementations(collectPhase); Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); return new CrateDocCollector( searchContext, executor, Symbols.containsColumn(collectPhase.toCollect(), DocSysColumns.SCORE), jobCollectContext.queryPhaseRamAccountingContext(), projectorChain.newShardDownstreamProjector(projectorVisitor), docCtx.topLevelInputs(), docCtx.docLevelExpressions() ); } catch (Throwable t) { if (searchContext == null) { searcher.close(); } else { searchContext.close(); // will close searcher too } throw t; } }
Example 4
Source File: DocSchemaInfo.java From Elasticsearch with Apache License 2.0 | 5 votes |
/** * DocSchemaInfo constructor for the default (doc) schema. */ @Inject public DocSchemaInfo(ClusterService clusterService, ThreadPool threadPool, Provider<TransportPutIndexTemplateAction> transportPutIndexTemplateAction, IndexNameExpressionResolver indexNameExpressionResolver, Functions functions) { this(Schemas.DEFAULT_SCHEMA_NAME, clusterService, indexNameExpressionResolver, (ExecutorService) threadPool.executor(ThreadPool.Names.SUGGEST), transportPutIndexTemplateAction, functions, Predicates.and(Predicates.notNull(), DOC_SCHEMA_TABLES_FILTER), AS_IS_FUNCTION); }
Example 5
Source File: InternalCountOperation.java From crate with Apache License 2.0 | 5 votes |
@Inject public InternalCountOperation(Settings settings, Schemas schemas, LuceneQueryBuilder queryBuilder, ClusterService clusterService, ThreadPool threadPool, IndicesService indicesService) { this.schemas = schemas; this.queryBuilder = queryBuilder; this.clusterService = clusterService; executor = (ThreadPoolExecutor) threadPool.executor(ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.numProcessors = EsExecutors.numberOfProcessors(settings); }
Example 6
Source File: RemoteCollectorFactory.java From crate with Apache License 2.0 | 5 votes |
@Inject public RemoteCollectorFactory(ClusterService clusterService, TasksService tasksService, TransportActionProvider transportActionProvider, IndicesService indicesService, ThreadPool threadPool) { this.clusterService = clusterService; this.tasksService = tasksService; this.transportActionProvider = transportActionProvider; this.indicesService = indicesService; searchTp = threadPool.executor(ThreadPool.Names.SEARCH); }
Example 7
Source File: DistributingConsumerFactory.java From crate with Apache License 2.0 | 5 votes |
@Inject public DistributingConsumerFactory(ClusterService clusterService, ThreadPool threadPool, TransportDistributedResultAction transportDistributedResultAction) { this.clusterService = clusterService; this.responseExecutor = threadPool.executor(RESPONSE_EXECUTOR_NAME); this.transportDistributedResultAction = transportDistributedResultAction; distributingDownstreamLogger = LogManager.getLogger(DistributingConsumer.class); }
Example 8
Source File: TransportFetchNodeAction.java From crate with Apache License 2.0 | 5 votes |
@Inject public TransportFetchNodeAction(Settings settings, TransportService transportService, Transports transports, ThreadPool threadPool, JobsLogs jobsLogs, TasksService tasksService, CircuitBreakerService circuitBreakerService) { this.transports = transports; this.nodeFetchOperation = new NodeFetchOperation( (ThreadPoolExecutor) threadPool.executor(ThreadPool.Names.SEARCH), EsExecutors.numberOfProcessors(settings), jobsLogs, tasksService, circuitBreakerService.getBreaker(HierarchyCircuitBreakerService.QUERY) ); transportService.registerRequestHandler( TRANSPORT_ACTION, NodeFetchRequest::new, EXECUTOR_NAME, // force execution because this handler might receive empty close requests which // need to be processed to not leak the FetchTask. // This shouldn't cause too much of an issue because fetch requests always happen after a query phase. // If the threadPool is overloaded the query phase would fail first. true, false, new NodeActionRequestHandler<>(this) ); }
Example 9
Source File: NodeThreadPoolExpression.java From Elasticsearch with Apache License 2.0 | 4 votes |
public NodeThreadPoolExpression(ThreadPool threadPool, String name) { this.threadPoolExecutor = (ThreadPoolExecutor) threadPool.executor(name); this.name = new BytesRef(name); addChildImplementations(); }
Example 10
Source File: SearchService.java From Elasticsearch with Apache License 2.0 | 4 votes |
public TerminationHandle internalWarm(final IndexShard indexShard, final IndexMetaData indexMetaData, final IndicesWarmer.WarmerContext warmerContext, ThreadPool threadPool, final boolean top) { IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE); if (custom == null) { return TerminationHandle.NO_WAIT; } final Executor executor = threadPool.executor(executor()); final CountDownLatch latch = new CountDownLatch(custom.entries().size()); for (final IndexWarmersMetaData.Entry entry : custom.entries()) { executor.execute(new Runnable() { @Override public void run() { SearchContext context = null; try { long now = System.nanoTime(); ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.getNumberOfShards(), SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.requestCache()); context = createContext(request, warmerContext.searcher()); // if we use sort, we need to do query to sort on it and load relevant field data // if not, we might as well set size=0 (and cache if needed) if (context.sort() == null) { context.size(0); } boolean canCache = indicesQueryCache.canCache(request, context); // early terminate when we can cache, since we can only do proper caching on top level searcher // also, if we can't cache, and its top, we don't need to execute it, since we already did when its not top if (canCache != top) { return; } loadOrExecuteQueryPhase(request, context, queryPhase); long took = System.nanoTime() - now; if (indexShard.warmerService().logger().isTraceEnabled()) { indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took)); } } catch (Throwable t) { indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name()); } finally { try { if (context != null) { freeContext(context.id()); cleanContext(context); } } finally { latch.countDown(); } } } }); } return new TerminationHandle() { @Override public void awaitTermination() throws InterruptedException { latch.await(); } }; }
Example 11
Source File: BitsetFilterCache.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override public IndicesWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, IndexMetaData indexMetaData, IndicesWarmer.WarmerContext context, ThreadPool threadPool) { if (index.getName().equals(context.shardId().getIndex()) == false) { // this is from a different index return TerminationHandle.NO_WAIT; } if (!loadRandomAccessFiltersEagerly) { return TerminationHandle.NO_WAIT; } boolean hasNested = false; final Set<Query> warmUp = new HashSet<>(); final MapperService mapperService = indexShard.mapperService(); for (DocumentMapper docMapper : mapperService.docMappers(false)) { if (docMapper.hasNestedObjects()) { hasNested = true; for (ObjectMapper objectMapper : docMapper.objectMappers().values()) { if (objectMapper.nested().isNested()) { ObjectMapper parentObjectMapper = docMapper.findParentObjectMapper(objectMapper); if (parentObjectMapper != null && parentObjectMapper.nested().isNested()) { warmUp.add(parentObjectMapper.nestedTypeFilter()); } } } } } if (hasNested) { warmUp.add(Queries.newNonNestedFilter()); } final Executor executor = threadPool.executor(executor()); final CountDownLatch latch = new CountDownLatch(context.searcher().reader().leaves().size() * warmUp.size()); for (final LeafReaderContext ctx : context.searcher().reader().leaves()) { for (final Query filterToWarm : warmUp) { executor.execute(new Runnable() { @Override public void run() { try { final long start = System.nanoTime(); getAndLoadIfNotPresent(filterToWarm, ctx); if (indexShard.warmerService().logger().isTraceEnabled()) { indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Throwable t) { indexShard.warmerService().logger().warn("failed to load bitset for [{}]", t, filterToWarm); } finally { latch.countDown(); } } }); } } return new TerminationHandle() { @Override public void awaitTermination() throws InterruptedException { latch.await(); } }; }
Example 12
Source File: ShardCollectSource.java From crate with Apache License 2.0 | 4 votes |
@Inject public ShardCollectSource(Settings settings, Schemas schemas, IndicesService indicesService, Functions functions, ClusterService clusterService, NodeJobsCounter nodeJobsCounter, LuceneQueryBuilder luceneQueryBuilder, ThreadPool threadPool, TransportActionProvider transportActionProvider, RemoteCollectorFactory remoteCollectorFactory, SystemCollectSource systemCollectSource, IndexEventListenerProxy indexEventListenerProxy, BlobIndicesService blobIndicesService, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService) { this.unassignedShardReferenceResolver = new StaticTableReferenceResolver<>( SysShardsTableInfo.unassignedShardsExpressions()); this.shardReferenceResolver = new StaticTableReferenceResolver<>(SysShardsTableInfo.create().expressions()); this.indicesService = indicesService; this.clusterService = clusterService; this.remoteCollectorFactory = remoteCollectorFactory; ThreadPoolExecutor executor = (ThreadPoolExecutor) threadPool.executor(ThreadPool.Names.SEARCH); this.availableThreads = numIdleThreads(executor, EsExecutors.numberOfProcessors(settings)); this.executor = executor; this.inputFactory = new InputFactory(functions); BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, HierarchyCircuitBreakerService.QUERY, true); this.shardCollectorProviderFactory = new ShardCollectorProviderFactory( clusterService, settings, schemas, threadPool, transportActionProvider, blobIndicesService, functions, luceneQueryBuilder, nodeJobsCounter, bigArrays); EvaluatingNormalizer nodeNormalizer = new EvaluatingNormalizer( functions, RowGranularity.DOC, new MapBackedRefResolver(Collections.emptyMap()), null); sharedProjectorFactory = new ProjectionToProjectorVisitor( clusterService, nodeJobsCounter, functions, threadPool, settings, transportActionProvider, inputFactory, nodeNormalizer, systemCollectSource::getRowUpdater, systemCollectSource::tableDefinition ); indexEventListenerProxy.addLast(new LifecycleListener()); }
Example 13
Source File: JobSetup.java From crate with Apache License 2.0 | 4 votes |
@Inject public JobSetup(Settings settings, Schemas schemas, MapSideDataCollectOperation collectOperation, ClusterService clusterService, NodeJobsCounter nodeJobsCounter, CircuitBreakerService circuitBreakerService, CountOperation countOperation, ThreadPool threadPool, DistributingConsumerFactory distributingConsumerFactory, TransportActionProvider transportActionProvider, IndicesService indicesService, Functions functions, SystemCollectSource systemCollectSource, ShardCollectSource shardCollectSource, MemoryManagerFactory memoryManagerFactory) { this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.schemas = schemas; this.collectOperation = collectOperation; this.clusterService = clusterService; this.circuitBreakerService = circuitBreakerService; this.countOperation = countOperation; this.memoryManagerFactory = memoryManagerFactory; this.pkLookupOperation = new PKLookupOperation(indicesService, shardCollectSource); this.distributingConsumerFactory = distributingConsumerFactory; innerPreparer = new InnerPreparer(); inputFactory = new InputFactory(functions); searchTp = threadPool.executor(ThreadPool.Names.SEARCH); EvaluatingNormalizer normalizer = EvaluatingNormalizer.functionOnlyNormalizer(functions); this.projectorFactory = new ProjectionToProjectorVisitor( clusterService, nodeJobsCounter, functions, threadPool, settings, transportActionProvider, inputFactory, normalizer, systemCollectSource::getRowUpdater, systemCollectSource::tableDefinition ); }
Example 14
Source File: IndexWriterProjectorTest.java From crate with Apache License 2.0 | 4 votes |
@Test public void testIndexWriter() throws Throwable { execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)"); ensureGreen(); InputCollectExpression sourceInput = new InputCollectExpression(1); List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput); RelationName bulkImportIdent = new RelationName(sqlExecutor.getCurrentSchema(), "bulk_import"); ClusterState state = clusterService().state(); Settings tableSettings = TableSettingsResolver.get(state.getMetaData(), bulkImportIdent, false); ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class); IndexWriterProjector writerProjector = new IndexWriterProjector( clusterService(), new NodeJobsCounter(), threadPool.scheduler(), threadPool.executor(ThreadPool.Names.SEARCH), CoordinatorTxnCtx.systemTransactionContext(), internalCluster().getInstance(Functions.class), Settings.EMPTY, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(tableSettings), NumberOfReplicas.fromSettings(tableSettings, state.getNodes().getSize()), internalCluster().getInstance(TransportCreatePartitionsAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(bulkImportIdent), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING, null, null), Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount() ); BatchIterator rowsIterator = InMemoryBatchIterator.of(IntStream.range(0, 100) .mapToObj(i -> new RowN(new Object[]{i, "{\"id\": " + i + ", \"name\": \"Arthur\"}"})) .collect(Collectors.toList()), SENTINEL, true); TestingRowConsumer consumer = new TestingRowConsumer(); consumer.accept(writerProjector.apply(rowsIterator), null); Bucket objects = consumer.getBucket(); assertThat(objects, contains(isRow(100L))); execute("refresh table bulk_import"); execute("select count(*) from bulk_import"); assertThat(response.rowCount(), is(1L)); assertThat(response.rows()[0][0], is(100L)); }