Java Code Examples for org.elasticsearch.common.logging.ESLogger#debug()

The following examples show how to use org.elasticsearch.common.logging.ESLogger#debug() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ZenDiscovery.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * In the case we follow an elected master the new cluster state needs to have the same elected master and
 * the new cluster state version needs to be equal or higher than our cluster state version.
 * If the first condition fails we reject the cluster state and throw an error.
 * If the second condition fails we ignore the cluster state.
 */
static boolean shouldIgnoreOrRejectNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) {
    if (currentState.nodes().masterNodeId() == null) {
        return false;
    }
    if (!currentState.nodes().masterNodeId().equals(newClusterState.nodes().masterNodeId())) {
        logger.warn("received a cluster state from a different master then the current one, rejecting (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode());
        throw new IllegalStateException("cluster state from a different master than the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")");
    } else if (newClusterState.version() < currentState.version()) {
        // if the new state has a smaller version, and it has the same master node, then no need to process it
        logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version());
        return true;
    } else {
        return false;
    }
}
 
Example 2
Source File: Store.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private static void checksumFromLuceneFile(Directory directory, String file, ImmutableMap.Builder<String, StoreFileMetaData> builder, ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
    final String checksum;
    final BytesRefBuilder fileHash = new BytesRefBuilder();
    try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
        final long length;
        try {
            length = in.length();
            if (length < CodecUtil.footerLength()) {
                // truncated files trigger IAE if we seek negative... these files are really corrupted though
                throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in);
            }
            if (readFileAsHash) {
                final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for...
                hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length);
                checksum = digestToString(verifyingIndexInput.verify());
            } else {
                checksum = digestToString(CodecUtil.retrieveChecksum(in));
            }

        } catch (Throwable ex) {
            logger.debug("Can retrieve checksum from file [{}]", ex, file);
            throw ex;
        }
        builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get()));
    }
}
 
Example 3
Source File: MultiDataPathUpgrader.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * Runs an upgrade on all shards located under the given node environment if there is more than 1 data.path configured
 * otherwise this method will return immediately.
 */
public static void upgradeMultiDataPath(NodeEnvironment nodeEnv, ESLogger logger) throws IOException {
    if (nodeEnv.nodeDataPaths().length > 1) {
        final MultiDataPathUpgrader upgrader = new MultiDataPathUpgrader(nodeEnv);
        final Set<String> allIndices = nodeEnv.findAllIndices();

        for (String index : allIndices) {
            for (ShardId shardId : findAllShardIds(nodeEnv.indexPaths(new Index(index)))) {
                try (ShardLock lock = nodeEnv.shardLock(shardId, 0)) {
                    if (upgrader.needsUpgrading(shardId)) {
                        final ShardPath shardPath = upgrader.pickShardPath(shardId);
                        upgrader.upgrade(shardId, shardPath);
                        // we have to check if the index path exists since we might
                        // have only upgraded the shard state that is written under /indexname/shardid/_state
                        // in the case we upgraded a dedicated index directory index
                        if (Files.exists(shardPath.resolveIndex())) {
                            upgrader.checkIndex(shardPath);
                        }
                    } else {
                        logger.debug("{} no upgrade needed - already upgraded", shardId);
                    }
                }
            }
        }
    }
}
 
Example 4
Source File: ShardPath.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * This method walks through the nodes shard paths to find the data and state path for the given shard. If multiple
 * directories with a valid shard state exist the one with the highest version will be used.
 * <b>Note:</b> this method resolves custom data locations for the shard.
 */
public static ShardPath loadShardPath(ESLogger logger, NodeEnvironment env, ShardId shardId, Settings indexSettings) throws IOException {
    final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
    final Path[] paths = env.availableShardPaths(shardId);
    Path loadedPath = null;
    for (Path path : paths) {
        ShardStateMetaData load = ShardStateMetaData.FORMAT.loadLatestState(logger, path);
        if (load != null) {
            if (load.indexUUID.equals(indexUUID) == false && IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID) == false) {
                logger.warn("{} found shard on path: [{}] with a different index UUID - this shard seems to be leftover from a different index with the same name. Remove the leftover shard in order to reuse the path with the current index", shardId, path);
                throw new IllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " expected: " + indexUUID + " on shard path: " + path);
            }
            if (loadedPath == null) {
                loadedPath = path;
            } else{
                throw new IllegalStateException(shardId + " more than one shard state found");
            }
        }

    }
    if (loadedPath == null) {
        return null;
    } else {
        final Path dataPath;
        final Path statePath = loadedPath;
        if (NodeEnvironment.hasCustomDataPath(indexSettings)) {
            dataPath = env.resolveCustomLocation(indexSettings, shardId);
        } else {
            dataPath = statePath;
        }
        logger.debug("{} loaded data path [{}], state path [{}]", shardId, dataPath, statePath);
        return new ShardPath(NodeEnvironment.hasCustomDataPath(indexSettings), dataPath, statePath, indexUUID, shardId);
    }
}
 
Example 5
Source File: MergePolicyConfig.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public MergePolicyConfig(ESLogger logger, Settings indexSettings) {
    this.logger = logger;
    this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO)));
    double forceMergeDeletesPctAllowed = indexSettings.getAsDouble("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED); // percentage
    ByteSizeValue floorSegment = indexSettings.getAsBytesSize("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT);
    int maxMergeAtOnce = indexSettings.getAsInt("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE);
    int maxMergeAtOnceExplicit = indexSettings.getAsInt("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
    // TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
    ByteSizeValue maxMergedSegment = indexSettings.getAsBytesSize("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT);
    double segmentsPerTier = indexSettings.getAsDouble("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER);
    double reclaimDeletesWeight = indexSettings.getAsDouble("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT);
    this.mergesEnabled = indexSettings.getAsBoolean(INDEX_MERGE_ENABLED, true);
    if (mergesEnabled == false) {
        logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
    }
    maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
    mergePolicy.setNoCFSRatio(noCFSRatio);
    mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
    mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
    mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
    mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
    mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
    mergePolicy.setSegmentsPerTier(segmentsPerTier);
    mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
    logger.debug("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}]",
            forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight);
}
 
Example 6
Source File: JarHell.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Checks the current classpath for duplicate classes
 * @throws IllegalStateException if jar hell was found
 */
public static void checkJarHell() throws Exception {
    ClassLoader loader = JarHell.class.getClassLoader();
    ESLogger logger = Loggers.getLogger(JarHell.class);
    if (logger.isDebugEnabled()) {
        logger.debug("java.class.path: {}", System.getProperty("java.class.path"));
        logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
        if (loader instanceof URLClassLoader ) {
            logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs()));
         }
    }
    checkJarHell(parseClassPath());
}
 
Example 7
Source File: AST_Start.java    From elasticsearch-rest-command with The Unlicense 5 votes vote down vote up
static public void dumpWithLogger(ESLogger logger, SimpleNode node, String prefix) {
	
	logger.debug(prefix + node.toString());
	if (node.children != null) {
		for (int i = 0; i < node.children.length; ++i) {
			dumpWithLogger(logger, (SimpleNode) node.children[i], prefix + " ");				
		}
	}
}