Java Code Examples for org.elasticsearch.common.logging.ESLogger

The following examples show how to use org.elasticsearch.common.logging.ESLogger. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Elasticsearch   Source File: PageDownstreamContext.java    License: Apache License 2.0 6 votes vote down vote up
public PageDownstreamContext(ESLogger logger,
                             String nodeName,
                             int id,
                             String name,
                             PageDownstream pageDownstream,
                             Streamer<?>[] streamer,
                             RamAccountingContext ramAccountingContext,
                             int numBuckets,
                             @Nullable FlatProjectorChain projectorChain) {
    super(id, logger);
    this.nodeName = nodeName;
    this.name = name;
    this.pageDownstream = pageDownstream;
    this.streamer = streamer;
    this.ramAccountingContext = ramAccountingContext;
    this.numBuckets = numBuckets;
    this.projectorChain = projectorChain;
    bucketFutures = new ArrayList<>(numBuckets);
    allFuturesSet = new BitSet(numBuckets);
    exhausted = new BitSet(numBuckets);
    initBucketFutures();
}
 
Example 2
Source Project: Elasticsearch   Source File: ScriptParameterParser.java    License: Apache License 2.0 6 votes vote down vote up
public ScriptParameterParser(Set<String> parameterNames) {
    ESLogger logger = Loggers.getLogger(getClass());
    deprecationLogger = new DeprecationLogger(logger);
    if (parameterNames == null || parameterNames.isEmpty()) {
        inlineParameters = Collections.singleton(ScriptService.SCRIPT_INLINE);
        fileParameters = Collections.singleton(ScriptService.SCRIPT_FILE);
        indexedParameters = Collections.singleton(ScriptService.SCRIPT_ID);
    } else {
        inlineParameters = new HashSet<>();
        fileParameters = new HashSet<>();
        indexedParameters = new HashSet<>();
        for (String parameterName : parameterNames) {
            if (ParseFieldMatcher.EMPTY.match(parameterName, ScriptService.SCRIPT_LANG)) {
                throw new IllegalArgumentException("lang is reserved and cannot be used as a parameter name");
            }
            inlineParameters.add(new ParseField(parameterName));
            fileParameters.add(new ParseField(parameterName + FILE_SUFFIX));
            indexedParameters.add(new ParseField(parameterName + INDEXED_SUFFIX));
        }
    }
}
 
Example 3
Source Project: Elasticsearch   Source File: InternalClusterInfoService.java    License: Apache License 2.0 6 votes vote down vote up
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, HashMap<String, Long> newShardSizes, HashMap<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
    MetaData meta = state.getMetaData();
    for (ShardStats s : stats) {
        IndexMetaData indexMeta = meta.index(s.getShardRouting().index());
        Settings indexSettings = indexMeta == null ? null : indexMeta.getSettings();
        newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath());
        long size = s.getStats().getStore().sizeInBytes();
        String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting());
        if (logger.isTraceEnabled()) {
            logger.trace("shard: {} size: {}", sid, size);
        }
        if (indexSettings != null && IndexMetaData.isIndexUsingShadowReplicas(indexSettings)) {
            // Shards on a shared filesystem should be considered of size 0
            if (logger.isTraceEnabled()) {
                logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid);
            }
            size = 0;
        }
        newShardSizes.put(sid, size);
    }
}
 
Example 4
private static String getUsernameFromGSSContext(final GSSContext gssContext, final boolean strip, final ESLogger logger) {
    if (gssContext.isEstablished()) {
        GSSName gssName = null;
        try {
            gssName = gssContext.getSrcName();
        } catch (final GSSException e) {
            logger.error("Unable to get src name from gss context", e);
        }

        if (gssName != null) {
            String name = gssName.toString();

            return stripRealmName(name, strip);

        }
    }

    return null;
}
 
Example 5
Source Project: Elasticsearch   Source File: MetaData.java    License: Apache License 2.0 6 votes vote down vote up
/** As of 2.0 we require units for time and byte-sized settings.
 * This methods adds default units to any settings that are part of timeSettings or byteSettings and don't specify a unit.
 **/
@Nullable
public static Settings addDefaultUnitsIfNeeded(Set<String> timeSettings, Set<String> byteSettings, ESLogger logger, Settings settings) {
    Settings.Builder newSettingsBuilder = null;
    for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
        String settingName = entry.getKey();
        String settingValue = entry.getValue();

        String newSettingValue = convertedValue(timeSettings, settingName, settingValue, logger, "ms", "time");
        if (settingValue.equals(newSettingValue) == false) {
            newSettingsBuilder = initSettingsBuilder(settings, newSettingsBuilder);
            newSettingsBuilder.put(settingName, newSettingValue);
        }

        newSettingValue = convertedValue(byteSettings, settingName, settingValue, logger, "b", "byte-sized");
        if (settingValue.equals(newSettingValue) == false) {
            newSettingsBuilder = initSettingsBuilder(settings, newSettingsBuilder);
            newSettingsBuilder.put(settingName, newSettingValue);
        }
    }

    if (newSettingsBuilder == null) {
        return settings;
    }
    return newSettingsBuilder.build();
}
 
Example 6
Source Project: Elasticsearch   Source File: MetaData.java    License: Apache License 2.0 6 votes vote down vote up
private static String convertedValue(Set<String> settingsThatRequireUnits,
                                     String settingName,
                                     String settingValue,
                                     ESLogger logger,
                                     String unit,
                                     String unitName) {
    if (settingsThatRequireUnits.contains(settingName) == false) {
        return settingValue;
    }
    try {
        Long.parseLong(settingValue);
    } catch (NumberFormatException e) {
        return settingValue;
    }
    // It's a naked number that previously would be interpreted as default unit; now we add it:
    logger.warn("{} setting [{}] with value [{}] is missing units; assuming default units ({}) but in future versions this will be a hard error",
            unitName, settingName, settingValue, unit);
    return settingValue + unit;
}
 
Example 7
Source Project: Elasticsearch   Source File: ChildMemoryCircuitBreaker.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a circuit breaker that will break if the number of estimated
 * bytes grows above the limit. All estimations will be multiplied by
 * the given overheadConstant. Uses the given oldBreaker to initialize
 * the starting offset.
 * @param settings settings to configure this breaker
 * @param parent parent circuit breaker service to delegate tripped breakers to
 * @param name the name of the breaker
 * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
 */
public ChildMemoryCircuitBreaker(BreakerSettings settings, ChildMemoryCircuitBreaker oldBreaker,
                                 ESLogger logger, HierarchyCircuitBreakerService parent, String name) {
    this.name = name;
    this.settings = settings;
    this.memoryBytesLimit = settings.getLimit();
    this.overheadConstant = settings.getOverhead();
    if (oldBreaker == null) {
        this.used = new AtomicLong(0);
        this.trippedCount = new AtomicLong(0);
    } else {
        this.used = oldBreaker.used;
        this.trippedCount = oldBreaker.trippedCount;
    }
    this.logger = logger;
    if (logger.isTraceEnabled()) {
        logger.trace("creating ChildCircuitBreaker with settings {}", this.settings);
    }
    this.parent = parent;
}
 
Example 8
Source Project: Elasticsearch   Source File: MemoryCircuitBreaker.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a circuit breaker that will break if the number of estimated
 * bytes grows above the limit. All estimations will be multiplied by
 * the given overheadConstant. Uses the given oldBreaker to initialize
 * the starting offset.
 * @param limit circuit breaker limit
 * @param overheadConstant constant multiplier for byte estimations
 * @param oldBreaker the previous circuit breaker to inherit the used value from (starting offset)
 */
public MemoryCircuitBreaker(ByteSizeValue limit, double overheadConstant, MemoryCircuitBreaker oldBreaker, ESLogger logger) {
    this.memoryBytesLimit = limit.bytes();
    this.overheadConstant = overheadConstant;
    if (oldBreaker == null) {
        this.used = new AtomicLong(0);
        this.trippedCount = new AtomicLong(0);
    } else {
        this.used = oldBreaker.used;
        this.trippedCount = oldBreaker.trippedCount;
    }
    this.logger = logger;
    if (logger.isTraceEnabled()) {
        logger.trace("Creating MemoryCircuitBreaker with a limit of {} bytes ({}) and a overhead constant of {}",
                this.memoryBytesLimit, limit, this.overheadConstant);
    }
}
 
Example 9
Source Project: Elasticsearch   Source File: MultiDataPathUpgrader.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Runs an upgrade on all shards located under the given node environment if there is more than 1 data.path configured
 * otherwise this method will return immediately.
 */
public static void upgradeMultiDataPath(NodeEnvironment nodeEnv, ESLogger logger) throws IOException {
    if (nodeEnv.nodeDataPaths().length > 1) {
        final MultiDataPathUpgrader upgrader = new MultiDataPathUpgrader(nodeEnv);
        final Set<String> allIndices = nodeEnv.findAllIndices();

        for (String index : allIndices) {
            for (ShardId shardId : findAllShardIds(nodeEnv.indexPaths(new Index(index)))) {
                try (ShardLock lock = nodeEnv.shardLock(shardId, 0)) {
                    if (upgrader.needsUpgrading(shardId)) {
                        final ShardPath shardPath = upgrader.pickShardPath(shardId);
                        upgrader.upgrade(shardId, shardPath);
                        // we have to check if the index path exists since we might
                        // have only upgraded the shard state that is written under /indexname/shardid/_state
                        // in the case we upgraded a dedicated index directory index
                        if (Files.exists(shardPath.resolveIndex())) {
                            upgrader.checkIndex(shardPath);
                        }
                    } else {
                        logger.debug("{} no upgrade needed - already upgraded", shardId);
                    }
                }
            }
        }
    }
}
 
Example 10
public DLBasedIndexRecoverySourceHandler(IndexShard shard,
        StartRecoveryRequest request, RecoverySettings recoverySettings,
        TransportService transportService, ESLogger logger) {
    super(shard, request, recoverySettings, transportService, logger);
    this.shard = shard;
    this.request = request;
    this.snapshotRecoveryThreadhold = shard.indexSettings().getAsLong(IndexMetaData.SETTING_DL_SNAPSHOT_RECOVERY_ROWS, 50000L);
    this.translogRecoveryGap = shard.indexSettings().getAsLong(IndexMetaData.SETTING_DL_TRANSLOG_RECOVERY_GAP_ROWS, 300L);
    this.recoverySettings = recoverySettings;
    this.transportService = transportService;
    this.indexName = this.request.shardId().index().name();
    this.shardId = this.request.shardId().id();
}
 
Example 11
Source Project: Elasticsearch   Source File: JarHell.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks the current classpath for duplicate classes
 * @throws IllegalStateException if jar hell was found
 */
public static void checkJarHell() throws Exception {
    ClassLoader loader = JarHell.class.getClassLoader();
    ESLogger logger = Loggers.getLogger(JarHell.class);
    if (logger.isDebugEnabled()) {
        logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
        logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
        if (loader instanceof URLClassLoader ) {
            logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs()));
         }
    }
    checkJarHell(parseClassPath());
}
 
Example 12
Source Project: Elasticsearch   Source File: BootstrapProxy.java    License: Apache License 2.0 5 votes vote down vote up
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
    if (confFileSetting != null && confFileSetting.isEmpty() == false) {
        ESLogger logger = Loggers.getLogger(Bootstrap.class);
        logger.info("{} is no longer supported. crate.yml must be placed in the config directory and cannot be renamed.", settingName);
        System.exit(1);
    }
}
 
Example 13
Source Project: Elasticsearch   Source File: Bootstrap.java    License: Apache License 2.0 5 votes vote down vote up
private static void checkUnsetAndMaybeExit(String confFileSetting, String settingName) {
    if (confFileSetting != null && confFileSetting.isEmpty() == false) {
        ESLogger logger = Loggers.getLogger(Bootstrap.class);
        logger.info("{} is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed.", settingName);
        exit(1);
    }
}
 
Example 14
Source Project: Elasticsearch   Source File: BlobService.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void doStart() throws ElasticsearchException {
    logger.info("BlobService.doStart() {}", this);

    // suppress warning about replaced recovery handler
    ESLogger transportServiceLogger = Loggers.getLogger(TransportService.class);
    String previousLevel = transportServiceLogger.getLevel();
    transportServiceLogger.setLevel("ERROR");
    injector.getInstance(BlobRecoverySource.class).registerHandler();
    transportServiceLogger.setLevel(previousLevel);

    // validate the optional blob path setting
    String globalBlobPathPrefix = settings.get(BlobEnvironment.SETTING_BLOBS_PATH);
    if (globalBlobPathPrefix != null) {
        blobEnvironment.blobsPath(new File(globalBlobPathPrefix));
    }

    blobHeadRequestHandler.registerHandler();

    // by default the http server is started after the discovery service.
    // For the BlobService this is too late.

    // The HttpServer has to be started before so that the boundAddress
    // can be added to DiscoveryNodes - this is required for the redirect logic.
    if (settings.getAsBoolean("http.enabled", true)) {
        injector.getInstance(HttpServer.class).start();
    } else {
        logger.warn("Http server should be enabled for blob support");
    }
}
 
Example 15
Source Project: Elasticsearch   Source File: NestedLoopContext.java    License: Apache License 2.0 5 votes vote down vote up
public NestedLoopContext(ESLogger logger,
                         NestedLoopPhase phase,
                         FlatProjectorChain flatProjectorChain,
                         NestedLoopOperation nestedLoopOperation,
                         @Nullable PageDownstreamContext leftPageDownstreamContext,
                         @Nullable PageDownstreamContext rightPageDownstreamContext) {
    super(phase.executionPhaseId(), logger);

    nestedLoopPhase = phase;
    this.flatProjectorChain = flatProjectorChain;
    this.leftPageDownstreamContext = leftPageDownstreamContext;
    this.rightPageDownstreamContext = rightPageDownstreamContext;

    leftRowReceiver = nestedLoopOperation.leftRowReceiver();
    rightRowReceiver = nestedLoopOperation.rightRowReceiver();

    if (leftPageDownstreamContext == null) {
        Futures.addCallback(leftRowReceiver.finishFuture(), new RemoveContextCallback());
    } else {
        leftPageDownstreamContext.future.addCallback(new RemoveContextCallback());
    }

    if (rightPageDownstreamContext == null) {
        Futures.addCallback(rightRowReceiver.finishFuture(), new RemoveContextCallback());
    } else {
        rightPageDownstreamContext.future.addCallback(new RemoveContextCallback());
    }
}
 
Example 16
Source Project: Elasticsearch   Source File: DistributingDownstream.java    License: Apache License 2.0 5 votes vote down vote up
public DistributingDownstream(ESLogger logger,
                              UUID jobId,
                              MultiBucketBuilder multiBucketBuilder,
                              int targetExecutionPhaseId,
                              byte inputId,
                              int bucketIdx,
                              Collection<String> downstreamNodeIds,
                              TransportDistributedResultAction transportDistributedResultAction,
                              Streamer<?>[] streamers,
                              int pageSize) {
    this.logger = logger;
    this.jobId = jobId;
    this.multiBucketBuilder = multiBucketBuilder;
    this.targetExecutionPhaseId = targetExecutionPhaseId;
    this.inputId = inputId;
    this.bucketIdx = bucketIdx;
    this.transportDistributedResultAction = transportDistributedResultAction;
    this.streamers = streamers;
    this.pageSize = pageSize;

    buckets = new Bucket[downstreamNodeIds.size()];
    downstreams = new Downstream[downstreamNodeIds.size()];
    int i = 0;
    for (String downstreamNodeId : downstreamNodeIds) {
        downstreams[i] = new Downstream(downstreamNodeId);
        i++;
    }
}
 
Example 17
Source Project: Elasticsearch   Source File: CrateF.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    System.setProperty("es.foreground", "yes");
    String[] startArgs = new String[args.length + 1];
    startArgs[0] = "start";
    System.arraycopy(args, 0, startArgs, 1, args.length);
    try {
        BootstrapProxy.init(startArgs);
    } catch (Throwable t) {
        // format exceptions to the console in a special way
        // to avoid 2MB stacktraces from guice, etc.
        throw new StartupErrorProxy(t);
    }

    // start mysql server
    Settings settings = BootstrapProxy.getSettings();
    int maxConectNum = settings.getAsInt("mysql.max_connections", 1024);
    int mysqlPort = settings.getAsInt("mysql.port", 8306);
    ConnectScheduler scheduler = new ConnectScheduler(maxConectNum);
    MysqlServer mysqlServer = new MysqlServer(mysqlPort, scheduler);
    ESLogger logger = Loggers.getLogger(CrateF.class);
    if (!mysqlServer.start()) {
        logger.error("mysql server start failed");
        System.exit(-1);
    } else {
        logger.info("mysql server start successful, max_connections: "
                + maxConectNum + " port: " + mysqlPort);
    }
}
 
Example 18
Source Project: Elasticsearch   Source File: IndicesFieldDataCache.java    License: Apache License 2.0 5 votes vote down vote up
IndexFieldCache(ESLogger logger,final Cache<Key, Accountable> cache, Index index, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Listener... listeners) {
    this.logger = logger;
    this.listeners = listeners;
    this.index = index;
    this.fieldNames = fieldNames;
    this.fieldDataType = fieldDataType;
    this.cache = cache;
}
 
Example 19
Source Project: Elasticsearch   Source File: LoggerInfoStream.java    License: Apache License 2.0 5 votes vote down vote up
private ESLogger getLogger(String component) {
    if (component.equals("IFD")) {
        return ifdLogger;
    } else {
        return logger;
    }
}
 
Example 20
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
                                     SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
                                     SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
    this.logger = logger;
    this.searchPhaseController = searchPhaseController;
    this.searchService = searchService;
    this.request = request;
    this.listener = listener;
    this.scrollId = scrollId;
    this.nodes = clusterService.state().nodes();
    this.successfulOps = new AtomicInteger(scrollId.getContext().length);
    this.counter = new AtomicInteger(scrollId.getContext().length);

    this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
}
 
Example 21
Source Project: Elasticsearch   Source File: SearchCountAsyncAction.java    License: Apache License 2.0 5 votes vote down vote up
SearchCountAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, ClusterService clusterService,
                                    IndexNameExpressionResolver indexNameExpressionResolver,
                                    SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
                                    ActionListener<SearchResponse> listener) {
    super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool,
            request, listener);
}
 
Example 22
Source Project: elasticsearch-rest-command   Source File: Search.java    License: The Unlicense 5 votes vote down vote up
public static void buildTimeline(XContentBuilder builder,
		SearchResponse response, ESLogger logger) throws IOException {
	logger.info("Report took in millseconds:" + response.getTookInMillis());
	DateHistogram timeline = response.getAggregations().get(
			"data_over_time");

	// 格式化结果后输出

	builder.startObject();
	builder.field("took", response.getTookInMillis());
	builder.field("total", timeline.getBuckets().size());

	builder.startArray("fields");
	builder.value("_bucket_timevalue");
	builder.value("_doc_count");
	builder.endArray();

	builder.startArray("rows");
	for (Bucket bucket : timeline.getBuckets()) {
		builder.startArray();
		builder.value(bucket.getKey());
		builder.value(bucket.getDocCount());
		builder.endArray();
	}
	builder.endArray().endObject();

}
 
Example 23
Source Project: Elasticsearch   Source File: QueryCollector.java    License: Apache License 2.0 5 votes vote down vote up
MatchAndScore(ESLogger logger, PercolateContext context, HighlightPhase highlightPhase, boolean isNestedDoc) throws IOException {
    super(logger, context, isNestedDoc);
    this.limit = context.limit;
    this.size = context.size();
    this.context = context;
    this.highlightPhase = highlightPhase;
}
 
Example 24
Source Project: Elasticsearch   Source File: IndexingSlowLog.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Build with the specified loggers. Only used to testing.
 */
IndexingSlowLog(Settings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) {
    this.indexLogger = indexLogger;
    this.deleteLogger = deleteLogger;
    this.reformat = indexSettings.getAsBoolean(INDEX_INDEXING_SLOWLOG_REFORMAT, true);
    this.indexWarnThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, TimeValue.timeValueNanos(-1)).nanos();
    this.indexInfoThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, TimeValue.timeValueNanos(-1)).nanos();
    this.indexDebugThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, TimeValue.timeValueNanos(-1)).nanos();
    this.indexTraceThreshold = indexSettings.getAsTime(INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE, TimeValue.timeValueNanos(-1)).nanos();
    this.level = indexSettings.get(INDEX_INDEXING_SLOWLOG_LEVEL, "TRACE").toUpperCase(Locale.ROOT);
    this.maxSourceCharsToLog = readSourceToLog(indexSettings);

    indexLogger.setLevel(level);
    deleteLogger.setLevel(level);
}
 
Example 25
Source Project: Elasticsearch   Source File: Node.java    License: Apache License 2.0 5 votes vote down vote up
private Node stop() {
    if (!lifecycle.moveToStopped()) {
        return this;
    }
    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("stopping ...");

    injector.getInstance(TribeService.class).stop();
    injector.getInstance(ResourceWatcherService.class).stop();
    if (settings.getAsBoolean("http.enabled", true)) {
        injector.getInstance(HttpServer.class).stop();
    }

    injector.getInstance(SnapshotsService.class).stop();
    injector.getInstance(SnapshotShardsService.class).stop();
    // stop any changes happening as a result of cluster state changes
    injector.getInstance(IndicesClusterStateService.class).stop();
    // we close indices first, so operations won't be allowed on it
    injector.getInstance(IndexingMemoryController.class).stop();
    injector.getInstance(IndicesTTLService.class).stop();
    injector.getInstance(RoutingService.class).stop();
    injector.getInstance(ClusterService.class).stop();
    injector.getInstance(DiscoveryService.class).stop();
    injector.getInstance(MonitorService.class).stop();
    injector.getInstance(GatewayService.class).stop();
    injector.getInstance(SearchService.class).stop();
    injector.getInstance(RestController.class).stop();
    injector.getInstance(TransportService.class).stop();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {
        injector.getInstance(plugin).stop();
    }
    // we should stop this last since it waits for resources to get released
    // if we had scroll searchers etc or recovery going on we wait for to finish.
    injector.getInstance(IndicesService.class).stop();
    logger.info("stopped");

    return this;
}
 
Example 26
Source Project: elasticsearch-taste   Source File: GenTermValuesHandler.java    License: Apache License 2.0 5 votes vote down vote up
public MultiTermVectorsListener(final int numOfThread,
        final RequestHandler[] requestHandlers,
        final Params eventParams, final Map<String, DocInfo> idMap,
        final ExecutorService executor, final ESLogger logger) {
    this.requestHandlers = requestHandlers;
    this.eventParams = eventParams;
    this.idMap = idMap;
    this.executor = executor;
    this.logger = logger;
    this.numOfThread = numOfThread > 1 ? numOfThread : 1;
}
 
Example 27
Source Project: Elasticsearch   Source File: TranslogRecoveryPerformer.java    License: Apache License 2.0 5 votes vote down vote up
protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, IndexQueryParserService queryParserService,
                                    IndexAliasesService indexAliasesService, IndexCache indexCache, ESLogger logger) {
    this.shardId = shardId;
    this.mapperService = mapperService;
    this.queryParserService = queryParserService;
    this.indexAliasesService = indexAliasesService;
    this.indexCache = indexCache;
    this.logger = logger;
}
 
Example 28
Source Project: Elasticsearch   Source File: IndexShard.java    License: Apache License 2.0 5 votes vote down vote up
private static Translog.Durabilty getFromSettings(ESLogger logger, Settings settings, Translog.Durabilty defaultValue) {
    final String value = settings.get(TranslogConfig.INDEX_TRANSLOG_DURABILITY, defaultValue.name());
    try {
        return Translog.Durabilty.valueOf(value.toUpperCase(Locale.ROOT));
    } catch (IllegalArgumentException ex) {
        logger.warn("Can't apply {} illegal value: {} using {} instead, use one of: {}", TranslogConfig.INDEX_TRANSLOG_DURABILITY,
                value, defaultValue, Arrays.toString(Translog.Durabilty.values()));
        return defaultValue;
    }
}
 
Example 29
Source Project: Elasticsearch   Source File: Analysis.java    License: Apache License 2.0 5 votes vote down vote up
public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, ESLogger logger) {
    // check for explicit version on the specific analyzer component
    String sVersion = settings.get("version");
    if (sVersion != null) {
        return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
    }
    // check for explicit version on the index itself as default for all analysis components
    sVersion = indexSettings.get("index.analysis.version");
    if (sVersion != null) {
        return Lucene.parseVersion(sVersion, Lucene.ANALYZER_VERSION, logger);
    }
    // resolve the analysis version based on the version the index was created with
    return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion;
}
 
Example 30
Source Project: Elasticsearch   Source File: Store.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns <code>true</code> iff the given location contains an index an the index
 * can be successfully opened. This includes reading the segment infos and possible
 * corruption markers.
 */
public static boolean canOpenIndex(ESLogger logger, Path indexLocation) throws IOException {
    try {
        tryOpenIndex(indexLocation);
    } catch (Exception ex) {
        logger.trace("Can't open index for path [{}]", ex, indexLocation);
        return false;
    }
    return true;
}