Java Code Examples for org.elasticsearch.cluster.metadata.IndexMetaData#getIndex()

The following examples show how to use org.elasticsearch.cluster.metadata.IndexMetaData#getIndex() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndicesClusterStateService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private void applySettings(ClusterChangedEvent event) {
    if (!event.metaDataChanged()) {
        return;
    }
    for (IndexMetaData indexMetaData : event.state().metaData()) {
        if (!indicesService.hasIndex(indexMetaData.getIndex())) {
            // we only create / update here
            continue;
        }
        // if the index meta data didn't change, no need check for refreshed settings
        if (!event.indexMetaDataChanged(indexMetaData)) {
            continue;
        }
        String index = indexMetaData.getIndex();
        IndexService indexService = indicesService.indexService(index);
        if (indexService == null) {
            // already deleted on us, ignore it
            continue;
        }
        IndexSettingsService indexSettingsService = indexService.injector().getInstance(IndexSettingsService.class);
        indexSettingsService.refreshSettings(indexMetaData.getSettings());
    }
}
 
Example 2
Source File: IndicesClusterStateService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private void applyAliases(ClusterChangedEvent event) {
    // check if aliases changed
    if (aliasesChanged(event)) {
        // go over and update aliases
        for (IndexMetaData indexMetaData : event.state().metaData()) {
            String index = indexMetaData.getIndex();
            IndexService indexService = indicesService.indexService(index);
            if (indexService == null) {
                // we only create / update here
                continue;
            }
            IndexAliasesService indexAliasesService = indexService.aliasesService();
            indexAliasesService.setAliases(indexMetaData.getAliases());
        }
    }
}
 
Example 3
Source File: JoinTaskExecutor.java    From crate with Apache License 2.0 6 votes vote down vote up
/**
 * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata
 * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index
 * compatibility version.
 * @see Version#minimumIndexCompatibilityVersion()
 * @throws IllegalStateException if any index is incompatible with the given version
 */
public static void ensureIndexCompatibility(final Version nodeVersion, MetaData metaData) {
    Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion();
    // we ensure that all indices in the cluster we join are compatible with us no matter if they are
    // closed or not we can't read mappings of these indices so we need to reject the join...
    for (IndexMetaData idxMetaData : metaData) {
        if (idxMetaData.getCreationVersion().after(nodeVersion)) {
            throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: "
                + idxMetaData.getCreationVersion() + " the node version is: " + nodeVersion);
        }
        if (idxMetaData.getCreationVersion().before(supportedIndexVersion)) {
            throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: "
                + idxMetaData.getCreationVersion() + " minimum compatible index version is: " + supportedIndexVersion);
        }
    }
}
 
Example 4
Source File: IndexRoutingTable.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * Initializes an index, to be restored from snapshot
 */
private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) {
    if (!shards.isEmpty()) {
        throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
    }
    for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
        IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.getIndex(), shardId));
        for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
            if (asNew && ignoreShards.contains(shardId)) {
                // This shards wasn't completely snapshotted - restore it as new shard
                indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
            } else {
                indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo));
            }
        }
        shards.put(shardId, indexShardRoutingBuilder.build());
    }
    return this;
}
 
Example 5
Source File: IndicesService.java    From crate with Apache License 2.0 6 votes vote down vote up
/**
 * Deletes the index store trying to acquire all shards locks for this index.
 * This method will delete the metadata for the index even if the actual shards can't be locked.
 *
 * Package private for testing
 */
void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState) throws IOException {
    if (nodeEnv.hasNodeFile()) {
        synchronized (this) {
            Index index = metaData.getIndex();
            if (hasIndex(index)) {
                String localUUid = indexService(index).indexUUID();
                throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
            }

            if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().getLocalNode().isMasterNode() == true)) {
                // we do not delete the store if it is a master eligible node and the index is still in the cluster state
                // because we want to keep the meta data for indices around even if no shards are left here
                final IndexMetaData idxMeta = clusterState.metaData().index(index.getName());
                throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the " +
                                                "cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "], " +
                                                "we are master eligible, so will keep the index metadata even if no shards are left.");
            }
        }
        final IndexSettings indexSettings = buildIndexSettings(metaData);
        deleteIndexStore(reason, indexSettings.getIndex(), indexSettings);
    }
}
 
Example 6
Source File: RoutingProvider.java    From crate with Apache License 2.0 5 votes vote down vote up
public ShardRouting forId(ClusterState state, String index, String id, @Nullable String routing) {
    IndexMetaData indexMetaData = indexMetaData(state, index);
    ShardId shardId = new ShardId(indexMetaData.getIndex(), generateShardId(indexMetaData, id, routing));
    IndexShardRoutingTable routingTable = state.getRoutingTable().shardRoutingTable(shardId);
    ShardRouting shardRouting;
    if (awarenessAttributes.isEmpty()) {
        shardRouting = routingTable.activeInitializingShardsIt(seed).nextOrNull();
    } else {
        shardRouting = routingTable
            .preferAttributesActiveInitializingShardsIt(awarenessAttributes, state.nodes(), seed)
            .nextOrNull();
    }
    return shardRouting == null ? routingTable.primaryShard() : shardRouting;
}
 
Example 7
Source File: BlobIndices.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void removeIndexLocationsForDeletedIndices(ClusterChangedEvent event, MetaData currentMetaData) {
    MetaData newMetaData = event.state().metaData();
    for (IndexMetaData current : currentMetaData) {
        String index = current.getIndex();
        if (!newMetaData.hasIndex(index) && isBlobIndex(index)) {
            deleteBlobIndexLocation(current, index);
        }
    }
}
 
Example 8
Source File: SnapshotsService.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Returns status of shards  currently finished snapshots
 * <p>
 * This method is executed on master node and it's complimentary to the
 * {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it
 * returns similar information but for already finished snapshots.
 * </p>
 *
 * @param repositoryName  repository name
 * @param snapshotInfo    snapshot info
 * @return map of shard id to snapshot status
 */
public Map<ShardId, IndexShardSnapshotStatus> snapshotShards(final String repositoryName,
                                                             final RepositoryData repositoryData,
                                                             final SnapshotInfo snapshotInfo) throws IOException {
    final Repository repository = repositoriesService.repository(repositoryName);
    final Map<ShardId, IndexShardSnapshotStatus> shardStatus = new HashMap<>();
    for (String index : snapshotInfo.indices()) {
        IndexId indexId = repositoryData.resolveIndexId(index);
        IndexMetaData indexMetaData = repository.getSnapshotIndexMetaData(snapshotInfo.snapshotId(), indexId);
        if (indexMetaData != null) {
            int numberOfShards = indexMetaData.getNumberOfShards();
            for (int i = 0; i < numberOfShards; i++) {
                ShardId shardId = new ShardId(indexMetaData.getIndex(), i);
                SnapshotShardFailure shardFailure = findShardFailure(snapshotInfo.shardFailures(), shardId);
                if (shardFailure != null) {
                    shardStatus.put(shardId, IndexShardSnapshotStatus.newFailed(shardFailure.reason()));
                } else {
                    final IndexShardSnapshotStatus shardSnapshotStatus;
                    if (snapshotInfo.state() == SnapshotState.FAILED) {
                        // If the snapshot failed, but the shard's snapshot does
                        // not have an exception, it means that partial snapshots
                        // were disabled and in this case, the shard snapshot will
                        // *not* have any metadata, so attempting to read the shard
                        // snapshot status will throw an exception.  Instead, we create
                        // a status for the shard to indicate that the shard snapshot
                        // could not be taken due to partial being set to false.
                        shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped");
                    } else {
                        shardSnapshotStatus = repository.getShardSnapshotStatus(
                            snapshotInfo.snapshotId(),
                            indexId,
                            shardId);
                    }
                    shardStatus.put(shardId, shardSnapshotStatus);
                }
            }
        }
    }
    return unmodifiableMap(shardStatus);
}
 
Example 9
Source File: MetaStateService.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the index state.
 * <p>
 * This method is public for testing purposes.
 *
 * @throws WriteStateException if exception when writing state occurs. {@link WriteStateException#isDirty()} will always return
 *                             false, because new index state file is not yet referenced by manifest file.
 */
public long writeIndex(String reason, IndexMetaData indexMetaData) throws WriteStateException {
    final Index index = indexMetaData.getIndex();
    LOGGER.trace("[{}] writing state, reason [{}]", index, reason);
    try {
        long generation = INDEX_META_DATA_FORMAT.write(indexMetaData,
                nodeEnv.indexPaths(indexMetaData.getIndex()));
        LOGGER.trace("[{}] state written", index);
        return generation;
    } catch (WriteStateException ex) {
        throw new WriteStateException(false, "[" + index + "]: failed to write index state", ex);
    }
}
 
Example 10
Source File: IndicesService.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@link IndexService} for the given metadata.
 *
 * @param indexMetaData          the index metadata to create the index for
 * @param builtInListeners       a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with the
 *                               per-index listeners
 * @throws ResourceAlreadyExistsException if the index already exists.
 */
@Override
public synchronized IndexService createIndex(
        final IndexMetaData indexMetaData, final List<IndexEventListener> builtInListeners) throws IOException {
    ensureChangesAllowed();
    if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) {
        throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]");
    }
    final Index index = indexMetaData.getIndex();
    if (hasIndex(index)) {
        throw new ResourceAlreadyExistsException(index);
    }
    List<IndexEventListener> finalListeners = new ArrayList<>(builtInListeners);
    final IndexEventListener onStoreClose = new IndexEventListener() {
        @Override
        public void onStoreClosed(ShardId shardId) {
            indicesQueryCache.onClose(shardId);
        }
    };
    finalListeners.add(onStoreClose);
    final IndexService indexService = createIndexService(
        "create index",
        indexMetaData,
        indicesQueryCache,
        finalListeners,
        indexingMemoryController
    );
    boolean success = false;
    try {
        indexService.getIndexEventListener().afterIndexCreated(indexService);
        indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap();
        success = true;
        return indexService;
    } finally {
        if (success == false) {
            indexService.close("plugins_failed", true);
        }
    }
}
 
Example 11
Source File: ClusterIndexHealth.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public ClusterIndexHealth(IndexMetaData indexMetaData, IndexRoutingTable indexRoutingTable) {
    this.index = indexMetaData.getIndex();
    this.numberOfShards = indexMetaData.getNumberOfShards();
    this.numberOfReplicas = indexMetaData.getNumberOfReplicas();
    this.validationFailures = indexRoutingTable.validate(indexMetaData);

    for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
        int shardId = shardRoutingTable.shardId().id();
        shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable));
    }

    // update the index status
    status = ClusterHealthStatus.GREEN;

    for (ClusterShardHealth shardHealth : shards.values()) {
        if (shardHealth.isPrimaryActive()) {
            activePrimaryShards++;
        }
        activeShards += shardHealth.getActiveShards();
        relocatingShards += shardHealth.getRelocatingShards();
        initializingShards += shardHealth.getInitializingShards();
        unassignedShards += shardHealth.getUnassignedShards();

        if (shardHealth.getStatus() == ClusterHealthStatus.RED) {
            status = ClusterHealthStatus.RED;
        } else if (shardHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) {
            // do not override an existing red
            status = ClusterHealthStatus.YELLOW;
        }
    }
    if (!validationFailures.isEmpty()) {
        status = ClusterHealthStatus.RED;
    } else if (shards.isEmpty()) { // might be since none has been created yet (two phase index creation)
        status = ClusterHealthStatus.RED;
    }
}
 
Example 12
Source File: IndexRoutingTable.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes a new empty index, with an option to control if its from an API or not.
 */
private Builder initializeEmpty(IndexMetaData indexMetaData, UnassignedInfo unassignedInfo) {
    if (!shards.isEmpty()) {
        throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
    }
    for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
        IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.getIndex(), shardId));
        for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
            indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
        }
        shards.put(shardId, indexShardRoutingBuilder.build());
    }
    return this;
}
 
Example 13
Source File: MetaStateService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the index state.
 */
void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception {
    logger.trace("[{}] writing state, reason [{}]", indexMetaData.getIndex(), reason);
    try {
        indexStateFormat.write(indexMetaData, indexMetaData.getVersion(),
                nodeEnv.indexPaths(new Index(indexMetaData.getIndex())));
    } catch (Throwable ex) {
        logger.warn("[{}]: failed to write index state", ex, indexMetaData.getIndex());
        throw new IOException("failed to write state for [" + indexMetaData.getIndex() + "]", ex);
    }
}
 
Example 14
Source File: IndicesService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public void deleteClosedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) {
    if (nodeEnv.hasNodeFile()) {
        String indexName = metaData.getIndex();
        try {
            if (clusterState.metaData().hasIndex(indexName)) {
                final IndexMetaData index = clusterState.metaData().index(indexName);
                throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
            }
            deleteIndexStore(reason, metaData, clusterState, true);
        } catch (IOException e) {
            logger.warn("[{}] failed to delete closed index", e, metaData.getIndex());
        }
    }
}
 
Example 15
Source File: IndexSettings.java    From crate with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a new {@link IndexSettings} instance. The given node settings will be merged with the settings in the metadata
 * while index level settings will overwrite node settings.
 *
 * @param indexMetaData the index metadata this settings object is associated with
 * @param nodeSettings the nodes settings this index is allocated on.
 */
public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSettings, IndexScopedSettings indexScopedSettings) {
    scopedSettings = indexScopedSettings.copy(nodeSettings, indexMetaData);
    this.nodeSettings = nodeSettings;
    this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
    this.index = indexMetaData.getIndex();
    version = IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(settings);
    logger = Loggers.getLogger(getClass(), index);
    nodeName = Node.NODE_NAME_SETTING.get(settings);
    this.indexMetaData = indexMetaData;
    numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);

    this.defaultAllowUnmappedFields = scopedSettings.get(ALLOW_UNMAPPED);
    this.durability = scopedSettings.get(INDEX_TRANSLOG_DURABILITY_SETTING);
    defaultFields = scopedSettings.get(DEFAULT_FIELD_SETTING);
    syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings);
    refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING);
    flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING);
    translogRetentionAge = scopedSettings.get(INDEX_TRANSLOG_RETENTION_AGE_SETTING);
    translogRetentionSize = scopedSettings.get(INDEX_TRANSLOG_RETENTION_SIZE_SETTING);
    generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING);
    flushAfterMergeThresholdSize = scopedSettings.get(INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING);
    mergeSchedulerConfig = new MergeSchedulerConfig(this);
    gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis();
    softDeleteEnabled = version.onOrAfter(Version.ES_V_6_5_1) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING);
    softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING);
    warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING);
    maxNgramDiff = scopedSettings.get(MAX_NGRAM_DIFF_SETTING);
    maxShingleDiff = scopedSettings.get(MAX_SHINGLE_DIFF_SETTING);
    maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD);
    this.mergePolicyConfig = new MergePolicyConfig(logger, this);
    singleType = INDEX_MAPPING_SINGLE_TYPE_SETTING.get(indexMetaData.getSettings()); // get this from metadata - it's not registered
    if (singleType == false) {
        throw new AssertionError(
            index.toString() + "multiple types are only allowed on pre 6.x indices but version is: [" + version + "]");
    }

    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, mergePolicyConfig::setDeletesPctAllowed);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, mergePolicyConfig::setFloorSegmentSetting);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, mergePolicyConfig::setMaxMergesAtOnce);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, mergePolicyConfig::setMaxMergesAtOnceExplicit);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, mergePolicyConfig::setMaxMergedSegment);
    scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, mergePolicyConfig::setSegmentsPerTier);

    scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
        mergeSchedulerConfig::setMaxThreadAndMergeCount);
    scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle);
    scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability);
    scopedSettings.addSettingsUpdateConsumer(MAX_NGRAM_DIFF_SETTING, this::setMaxNgramDiff);
    scopedSettings.addSettingsUpdateConsumer(MAX_SHINGLE_DIFF_SETTING, this::setMaxShingleDiff);
    scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer);
    scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes);
    scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize);
    scopedSettings.addSettingsUpdateConsumer(
        INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING,
        this::setFlushAfterMergeThresholdSize);
    scopedSettings.addSettingsUpdateConsumer(
            INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING,
            this::setGenerationThresholdSize);
    scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_RETENTION_AGE_SETTING, this::setTranslogRetentionAge);
    scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_RETENTION_SIZE_SETTING, this::setTranslogRetentionSize);
    scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval);
    scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
    scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields);
    scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations);
}
 
Example 16
Source File: CancelAllocationCommand.java    From crate with Apache License 2.0 4 votes vote down vote up
@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
    DiscoveryNode discoNode = allocation.nodes().resolveNode(node);
    ShardRouting shardRouting = null;
    RoutingNodes routingNodes = allocation.routingNodes();
    RoutingNode routingNode = routingNodes.node(discoNode.getId());
    IndexMetaData indexMetaData = null;
    if (routingNode != null) {
        indexMetaData = allocation.metaData().index(index());
        if (indexMetaData == null) {
            throw new IndexNotFoundException(index());
        }
        ShardId shardId = new ShardId(indexMetaData.getIndex(), shardId());
        shardRouting = routingNode.getByShardId(shardId);
    }
    if (shardRouting == null) {
        if (explain) {
            return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command",
                "can't cancel " + shardId + ", failed to find it on node " + discoNode));
        }
        throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode);
    }
    if (shardRouting.primary() && allowPrimary == false) {
        if ((shardRouting.initializing() && shardRouting.relocatingNodeId() != null) == false) {
            // only allow cancelling initializing shard of primary relocation without allowPrimary flag
            if (explain) {
                return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command",
                    "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and " +
                        shardRouting.state().name().toLowerCase(Locale.ROOT)));
            }
            throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " +
                discoNode + ", shard is primary and " + shardRouting.state().name().toLowerCase(Locale.ROOT));
        }
    }
    routingNodes.failShard(LogManager.getLogger(CancelAllocationCommand.class), shardRouting,
        new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null), indexMetaData, allocation.changes());
    // TODO: We don't have to remove a cancelled shard from in-sync set once we have a strict resync implementation.
    allocation.removeAllocationId(shardRouting);
    return new RerouteExplanation(this, allocation.decision(Decision.YES, "cancel_allocation_command",
            "shard " + shardId + " on node " + discoNode + " can be cancelled"));
}
 
Example 17
Source File: OpenTableClusterStateTaskExecutor.java    From crate with Apache License 2.0 4 votes vote down vote up
@Override
protected ClusterState execute(ClusterState currentState, OpenCloseTableOrPartitionRequest request) throws Exception {
    Context context = prepare(currentState, request);
    Set<IndexMetaData> indicesToOpen = context.indicesMetaData();
    IndexTemplateMetaData templateMetaData = context.templateMetaData();

    if (indicesToOpen.isEmpty() && templateMetaData == null) {
        return currentState;
    }

    MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
    ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
        .blocks(currentState.blocks());
    final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion()
        .minimumIndexCompatibilityVersion();
    for (IndexMetaData closedMetaData : indicesToOpen) {
        final String indexName = closedMetaData.getIndex().getName();
        IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build();
        // The index might be closed because we couldn't import it due to old incompatible version
        // We need to check that this index can be upgraded to the current version
        indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData, minIndexCompatibilityVersion);
        try {
            indicesService.verifyIndexMetadata(indexMetaData, indexMetaData);
        } catch (Exception e) {
            throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e);
        }

        mdBuilder.put(indexMetaData, true);
        blocksBuilder.removeIndexBlock(indexName, INDEX_CLOSED_BLOCK);
    }

    // remove closed flag at possible partitioned table template
    if (templateMetaData != null) {
        mdBuilder.put(updateOpenCloseOnPartitionTemplate(templateMetaData, true));
    }

    // The MetaData will always be overridden (and not merged!) when applying it on a cluster state builder.
    // So we must re-build the state with the latest modifications before we pass this state to possible modifiers.
    // Otherwise they would operate on the old MetaData and would just ignore any modifications.
    ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();

    // call possible registered modifiers
    if (context.partitionName() != null) {
        updatedState = ddlClusterStateService.onOpenTablePartition(updatedState, context.partitionName());
    } else {
        updatedState = ddlClusterStateService.onOpenTable(updatedState, request.tableIdent());
    }

    RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable());
    for (IndexMetaData index : indicesToOpen) {
        rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex()));
    }

    //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask
    return allocationService.reroute(
        ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(),
        "indices opened " + indicesToOpen);
}
 
Example 18
Source File: IndicesClusterStateService.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
private void applyMappings(ClusterChangedEvent event) {
    // go over and update mappings
    for (IndexMetaData indexMetaData : event.state().metaData()) {
        if (!indicesService.hasIndex(indexMetaData.getIndex())) {
            // we only create / update here
            continue;
        }
        List<String> typesToRefresh = new ArrayList<>();
        String index = indexMetaData.getIndex();
        IndexService indexService = indicesService.indexService(index);
        if (indexService == null) {
            // got deleted on us, ignore (closing the node)
            return;
        }
        try {
            MapperService mapperService = indexService.mapperService();
            // go over and add the relevant mappings (or update them)
            for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
                MappingMetaData mappingMd = cursor.value;
                String mappingType = mappingMd.type();
                CompressedXContent mappingSource = mappingMd.source();
                if (processMapping(index, mapperService, mappingType, mappingSource, mappingMd.mappingVersion())) {
                    typesToRefresh.add(mappingType);
                }
            }
            if (typesToRefresh.isEmpty() == false && sendRefreshMapping) {
                nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
                        new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.getIndexUUID(),
                                typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())
                );
            }
        } catch (Throwable t) {
            // if we failed the mappings anywhere, we need to fail the shards for this index, note, we safeguard
            // by creating the processing the mappings on the master, or on the node the mapping was introduced on,
            // so this failure typically means wrong node level configuration or something similar
            for (IndexShard indexShard : indexService) {
                ShardRouting shardRouting = indexShard.routingEntry();
                failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t);
            }
        }
    }
}