Java Code Examples for org.elasticsearch.common.collect.Tuple

The following examples show how to use org.elasticsearch.common.collect.Tuple. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
protected Tuple<XContentType, Map<String, Object>> parseSource(BytesReference source) {
  // nothing to parse...
  if (source == null || source.length() == 0) {
    return null;
  }

  try {
    Tuple<XContentType, Map<String, Object>> parsedSource = XContentHelper.convertToMap(source, false);
    logger.debug("{}: Parsed source: {}", Thread.currentThread().getName(), parsedSource);
    return parsedSource;
  }
  catch (Throwable e) {
      String sSource = "_na_";
      try {
          sSource = XContentHelper.convertToJson(source, false);
      }
      catch (Throwable e1) { /* ignore  */ }
      throw new ElasticsearchParseException("Failed to parse source [" + sSource + "]", e);
  }
}
 
Example 2
Source Project: Elasticsearch   Source File: PartitionInfos.java    License: Apache License 2.0 6 votes vote down vote up
@Nullable
private static Map<String, Object> buildValuesMap(PartitionName partitionName, MappingMetaData mappingMetaData) throws Exception{
    int i = 0;
    Map<String, Object> valuesMap = new HashMap<>();
    Iterable<Tuple<ColumnIdent, DataType>> partitionColumnInfoIterable = PartitionedByMappingExtractor.extractPartitionedByColumns(mappingMetaData.sourceAsMap());
    for (Tuple<ColumnIdent, DataType> columnInfo : partitionColumnInfoIterable) {
        String columnName = columnInfo.v1().sqlFqn();
        // produce string type values as string, not bytesref
        Object value = BytesRefs.toString(partitionName.values().get(i));
        if (!columnInfo.v2().equals(DataTypes.STRING)) {
            value = columnInfo.v2().value(value);
        }
        valuesMap.put(columnName, value);
        i++;
    }
    return valuesMap;
}
 
Example 3
private Tuple<RankerQuery, HitLogConsumer> extractRescore(LoggingSearchExtBuilder.LogSpec logSpec,
                                                          List<RescoreContext> contexts) {
    if (logSpec.getRescoreIndex() >= contexts.size()) {
        throw new IllegalArgumentException("rescore index [" + logSpec.getRescoreIndex() + "] is out of bounds, only " +
                "[" + contexts.size() + "] rescore context(s) are available");
    }
    RescoreContext context = contexts.get(logSpec.getRescoreIndex());
    if (!(context instanceof QueryRescorer.QueryRescoreContext)) {
        throw new IllegalArgumentException("Expected a [QueryRescoreContext] but found a " +
                "[" + context.getClass().getSimpleName() + "] " +
                "at index [" + logSpec.getRescoreIndex() + "]");
    }
    QueryRescorer.QueryRescoreContext qrescore = (QueryRescorer.QueryRescoreContext) context;
    return toLogger(logSpec, inspectQuery(qrescore.query())
            .orElseThrow(() -> new IllegalArgumentException("Expected a [sltr] query but found a " +
                    "[" + qrescore.query().getClass().getSimpleName() + "] " +
                    "at index [" + logSpec.getRescoreIndex() + "]")));
}
 
Example 4
Source Project: Elasticsearch   Source File: DocIndexMetaData.java    License: Apache License 2.0 6 votes vote down vote up
public DocIndexMetaData build() {
    partitionedBy = getPartitionedBy();
    columnPolicy = getColumnPolicy();
    createColumnDefinitions();
    indices = createIndexDefinitions();
    columns = ImmutableList.copyOf(columnsBuilder.build());
    partitionedByColumns = partitionedByColumnsBuilder.build();

    for (Tuple<ColumnIdent, ReferenceInfo> sysColumn : DocSysColumns.forTable(ident)) {
        referencesBuilder.put(sysColumn.v1(), sysColumn.v2());
    }
    references = referencesBuilder.build();
    generatedColumnReferences = generatedColumnReferencesBuilder.build();
    primaryKey = getPrimaryKey();
    routingCol = getRoutingCol();

    initializeGeneratedExpressions();
    return this;
}
 
Example 5
Source Project: Elasticsearch   Source File: TransportPutChunkAction.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected Tuple<PutChunkResponse, PutChunkReplicaRequest> shardOperationOnPrimary(MetaData metaData,
                                                                                  PutChunkRequest request) throws Throwable {
    PutChunkResponse response = newResponseInstance();
    transferTarget.continueTransfer(request, response);

    final PutChunkReplicaRequest replicaRequest = new PutChunkReplicaRequest();
    replicaRequest.setShardId(request.shardId());
    replicaRequest.transferId = request.transferId();
    replicaRequest.sourceNodeId = clusterService.localNode().id();
    replicaRequest.currentPos = request.currentPos();
    replicaRequest.content = request.content();
    replicaRequest.isLast = request.isLast();
    replicaRequest.index(request.index());
    return new Tuple<>(response, replicaRequest);
}
 
Example 6
@Override
protected void masterOperation(ListStoresActionRequest request, ClusterState state,
                               ActionListener<ListStoresActionResponse> listener) throws Exception {
    String[] names = indexNameExpressionResolver.concreteIndexNames(state,
            new ClusterStateRequest().indices(IndexFeatureStore.DEFAULT_STORE, IndexFeatureStore.STORE_PREFIX + "*"));
    final MultiSearchRequestBuilder req = client.prepareMultiSearch();
    final List<Tuple<String, Integer>> versions = new ArrayList<>();
    Stream.of(names)
            .filter(IndexFeatureStore::isIndexStore)
            .map((s) -> clusterService.state().metaData().getIndices().get(s))
            .filter(Objects::nonNull)
            .filter((im) -> STORE_VERSION_PROP.exists(im.getSettings()))
            .forEach((m) -> {
                req.add(countSearchRequest(m));
                versions.add(tuple(m.getIndex().getName(),STORE_VERSION_PROP.get(m.getSettings())));
            });
    if (versions.isEmpty()) {
        listener.onResponse(new ListStoresActionResponse(Collections.emptyList()));
    } else {
        req.execute(wrap((r) -> listener.onResponse(toResponse(r, versions)), listener::onFailure));
    }
}
 
Example 7
Source Project: Elasticsearch   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Query apply(Function input, Context context) throws IOException {
    Tuple<Reference, Literal> prepare = prepare(input);
    if (prepare == null) { return null; }
    String fieldName = prepare.v1().info().ident().columnIdent().fqn();
    Object value = prepare.v2().value();

    if (value instanceof BytesRef) {
        BytesRef pattern = ((BytesRef) value);
        if (isPcrePattern(pattern.utf8ToString())) {
            return new RegexQuery(new Term(fieldName, pattern));
        } else {
            return toLuceneRegexpQuery(fieldName, pattern, context);
        }
    }
    throw new IllegalArgumentException("Can only use ~ with patterns of type string");
}
 
Example 8
Source Project: Elasticsearch   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Query apply(Function input, Context context) throws IOException {
    Tuple<Reference, Literal> prepare = prepare(input);
    if (prepare == null) { return null; }
    String fieldName = prepare.v1().info().ident().columnIdent().fqn();
    Object value = prepare.v2().value();

    if (value instanceof BytesRef) {
        RegexQuery query = new RegexQuery(new Term(fieldName, BytesRefs.toBytesRef(value)));
        query.setRegexImplementation(new JavaUtilRegexCapabilities(
                JavaUtilRegexCapabilities.FLAG_CASE_INSENSITIVE |
                JavaUtilRegexCapabilities.FLAG_UNICODE_CASE));
        return query;
    }
    throw new IllegalArgumentException("Can only use ~* with patterns of type string");
}
 
Example 9
private ListStoresActionResponse toResponse(MultiSearchResponse response, List<Tuple<String, Integer>> versions) {
    assert versions.size() == response.getResponses().length;
    Iterator<Tuple<String, Integer>> vs = versions.iterator();
    Iterator<MultiSearchResponse.Item> rs = response.iterator();
    List<ListStoresAction.IndexStoreInfo> infos = new ArrayList<>(versions.size());
    while (vs.hasNext() && rs.hasNext()) {
        MultiSearchResponse.Item it = rs.next();
        Tuple<String, Integer> idxAndVersion = vs.next();
        Map<String, Integer> counts = Collections.emptyMap();
        if (!it.isFailure()) {
            Terms aggs = it.getResponse()
                    .getAggregations()
                    .get("type");
            counts = aggs
                    .getBuckets()
                    .stream()
                    .collect(toMap(MultiBucketsAggregation.Bucket::getKeyAsString,
                            (b) -> (int) b.getDocCount()));
        }
        infos.add(new ListStoresAction.IndexStoreInfo(idxAndVersion.v1(), idxAndVersion.v2(), counts));
    }
    return new ListStoresActionResponse(infos);
}
 
Example 10
Source Project: Elasticsearch   Source File: ExecutionPhasesTask.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void start() {
    FluentIterable<NodeOperation> nodeOperations = FluentIterable.from(nodeOperationTrees)
        .transformAndConcat(new Function<NodeOperationTree, Iterable<? extends NodeOperation>>() {
            @Nullable
            @Override
            public Iterable<? extends NodeOperation> apply(NodeOperationTree input) {
                return input.nodeOperations();
            }
        });

    Map<String, Collection<NodeOperation>> operationByServer = NodeOperationGrouper.groupByServer(nodeOperations);
    InitializationTracker initializationTracker = new InitializationTracker(operationByServer.size());
    List<Tuple<ExecutionPhase, RowReceiver>> handlerPhases = createHandlerPhases(initializationTracker);
    try {
        setupContext(operationByServer, handlerPhases, initializationTracker);
    } catch (Throwable throwable) {
        for (SettableFuture<TaskResult> result : results) {
            result.setException(throwable);
        }
    }
}
 
Example 11
Source Project: Elasticsearch   Source File: ExecutionPhasesTask.java    License: Apache License 2.0 6 votes vote down vote up
private void sendJobRequests(String localNodeId,
                             Map<String, Collection<NodeOperation>> operationByServer,
                             List<PageDownstreamContext> pageDownstreamContexts,
                             List<Tuple<ExecutionPhase, RowReceiver>> handlerPhases,
                             int bucketIdx,
                             InitializationTracker initializationTracker) {
    for (Map.Entry<String, Collection<NodeOperation>> entry : operationByServer.entrySet()) {
        String serverNodeId = entry.getKey();
        JobRequest request = new JobRequest(jobId(), localNodeId, entry.getValue());
        if (hasDirectResponse) {
            transportJobAction.execute(serverNodeId, request,
                new SetBucketAction(pageDownstreamContexts, bucketIdx, initializationTracker));
        } else {
            transportJobAction.execute(serverNodeId, request, new FailureOnlyResponseListener(handlerPhases, initializationTracker));
        }
        bucketIdx++;
    }
}
 
Example 12
Source Project: elasticsearch-sql   Source File: SQLFunctions.java    License: Apache License 2.0 6 votes vote down vote up
private static Tuple<String, String> coalesceTemplate(String fieldName, List<KVValue> paramer) {
    //if((doc['age2'].value != null)){doc['age2'].value} else if((doc['age1'].value != null)){doc['age1'].value}
    String name = fieldName + "_" + random();
    StringBuffer sb = new StringBuffer();
    int i = 0;
    //sb.append("def " + name + " = ");
    for (KVValue kv : paramer) {
        String field = kv.value.toString();
        if (i > 0) {
            sb.append(" else ");
        }
        sb.append("if(doc['" + field + "'].value != null){doc['" + field + "'].value}");
        i++;
    }
    return new Tuple<>(name, sb.toString());
}
 
Example 13
@SuppressWarnings("rawtypes")
private Collection<SearchGuardACLDocument> loadAcls() throws Exception {
    LOGGER.debug("Loading SearchGuard ACL...waiting up to 30s");
    Map<String, Tuple<Settings, Long>> loadedDocs = configLoader.load(CONFIG_DOCS, 30, TimeUnit.SECONDS);
    Collection<SearchGuardACLDocument> docs = new ArrayList<>(loadedDocs.size());
    for (Entry<String, Tuple<Settings, Long>> item : loadedDocs.entrySet()) {
        Settings settings = item.getValue().v1();
        Long version = item.getValue().v2();
        Map<String, Object> original = settings.getAsStructuredMap();
        if(LOGGER.isDebugEnabled()){
            logContent("Read in {}: {}", item.getKey(), settings);
        }
        switch (item.getKey()) {
        case SEARCHGUARD_ROLE_TYPE:
            docs.add(new SearchGuardRoles(version).load(original));
            break;
        case SEARCHGUARD_MAPPING_TYPE:
            docs.add(new SearchGuardRolesMapping(version).load(original));
            break;
        }
    }
    return docs;
}
 
Example 14
Source Project: Elasticsearch   Source File: TransportIndexAction.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {

    // validate, if routing is required, that we got routing
    IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
    MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
    if (mappingMd != null && mappingMd.routing().required()) {
        if (request.routing() == null) {
            throw new RoutingMissingException(request.shardId().getIndex(), request.type(), request.id());
        }
    }

    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.shardSafe(request.shardId().id());
    indexShard.checkDiskSpace(fsService);
    final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard, mappingUpdatedAction);
    final IndexResponse response = result.response;
    final Translog.Location location = result.location;
    processAfterWrite(request.refresh(), indexShard, location);
    return new Tuple<>(response, request);
}
 
Example 15
public void addTupleToRequestBody(Tuple<XContentType, BytesReference> xContentTuple) {
    if (xContentTuple != null) {
        try {
            auditInfo.put(REQUEST_BODY, XContentHelper.convertToJson(xContentTuple.v2(), false, xContentTuple.v1()));
        } catch (Exception e) {
            auditInfo.put(REQUEST_BODY, "ERROR: Unable to convert to json because of "+e.toString());
        }
    }
}
 
Example 16
private static Tuple<XContentType, BytesReference> convertSource(XContentType type, BytesReference bytes) {
    if(type == null) {
        type = XContentType.JSON;
    }

    return new Tuple<XContentType, BytesReference>(type, bytes);
}
 
Example 17
@Override
public void binaryField(final FieldInfo fieldInfo, final byte[] value) throws IOException {

    if (fieldInfo.name.equals("_source")) {
        final BytesReference bytesRef = new BytesArray(value);
        final Tuple<XContentType, Map<String, Object>> bytesRefTuple = XContentHelper.convertToMap(bytesRef, false, XContentType.JSON);
        Map<String, Object> filteredSource = bytesRefTuple.v2();
        MapUtils.deepTraverseMap(filteredSource, HASH_CB);
        final XContentBuilder xBuilder = XContentBuilder.builder(bytesRefTuple.v1().xContent()).map(filteredSource);
        delegate.binaryField(fieldInfo, BytesReference.toBytes(BytesReference.bytes(xBuilder)));
    } else {
        delegate.binaryField(fieldInfo, value);
    }
}
 
Example 18
/**
 * 解析默认信息
 *
 * @param location 配置路径
 * @return 返回new Tuple<路径, 默认词性>
 */
private Tuple<String, Nature> analysisDefaultInfo(String location) {
    Nature defaultNature = Nature.n;
    String path = location;
    int cut = location.indexOf(' ');
    if (cut > 0) {
        // 有默认词性
        String nature = location.substring(cut + 1);
        path = location.substring(0, cut);
        defaultNature = LexiconUtility.convertStringToNature(nature);
    }
    return Tuple.tuple(path, defaultNature);
}
 
Example 19
Source Project: Elasticsearch   Source File: UserProperty.java    License: Apache License 2.0 5 votes vote down vote up
public static Tuple<String, String> getDBAndTableName(String fullTableName) {
    String[] splitNames = fullTableName.split("\\.");
    String dbName = "";
    String tableName = "";
    if (splitNames.length == 1) {
        dbName = Schemas.DEFAULT_SCHEMA_NAME;
        tableName = splitNames[0];
    } else if (splitNames.length > 1) {
        dbName = splitNames[0];
        tableName = splitNames[1];
    }
    return new Tuple<String, String>(dbName, tableName);
}
 
Example 20
Source Project: Elasticsearch   Source File: PluginsService.java    License: Apache License 2.0 5 votes vote down vote up
private List<Tuple<PluginInfo,Plugin>> loadBundles(List<Bundle> bundles) {
    List<Tuple<PluginInfo, Plugin>> plugins = new ArrayList<>();

    for (Bundle bundle : bundles) {
        // jar-hell check the bundle against the parent classloader
        // pluginmanager does it, but we do it again, in case lusers mess with jar files manually
        try {
            final List<URL> jars = new ArrayList<>();
            jars.addAll(Arrays.asList(JarHell.parseClassPath()));
            jars.addAll(bundle.urls);
            JarHell.checkJarHell(jars.toArray(new URL[0]));
        } catch (Exception e) {
            throw new IllegalStateException("failed to load bundle " + bundle.urls + " due to jar hell", e);
        }

        // create a child to load the plugins in this bundle
        ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]), getClass().getClassLoader());
        for (PluginInfo pluginInfo : bundle.plugins) {
            final Plugin plugin;
            if (pluginInfo.isJvm()) {
                // reload lucene SPI with any new services from the plugin
                reloadLuceneSPI(loader);
                Class<? extends Plugin> pluginClass = loadPluginClass(pluginInfo.getClassname(), loader);
                plugin = loadPlugin(pluginClass, settings);
            } else {
                plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription());
            }
            plugins.add(new Tuple<>(pluginInfo, plugin));
        }
    }

    return Collections.unmodifiableList(plugins);
}
 
Example 21
Source Project: Elasticsearch   Source File: PluginsService.java    License: Apache License 2.0 5 votes vote down vote up
public Collection<Class<? extends LifecycleComponent>> nodeServices() {
    List<Class<? extends LifecycleComponent>> services = new ArrayList<>();
    for (Tuple<PluginInfo, Plugin> plugin : plugins) {
        services.addAll(plugin.v2().nodeServices());
    }
    return services;
}
 
Example 22
Source Project: Elasticsearch   Source File: PageDownstreamFactory.java    License: Apache License 2.0 5 votes vote down vote up
public Tuple<PageDownstream, FlatProjectorChain> createMergeNodePageDownstream(MergePhase mergeNode,
                                                                               RowReceiver rowReceiver,
                                                                               boolean requiresRepeatSupport,
                                                                               RamAccountingContext ramAccountingContext,
                                                                               Optional<Executor> executorOptional) {
    FlatProjectorChain projectorChain = null;
    if (!mergeNode.projections().isEmpty()) {
        projectorChain = FlatProjectorChain.withAttachedDownstream(
                projectionToProjectorVisitor,
                ramAccountingContext,
                mergeNode.projections(),
                rowReceiver,
                mergeNode.jobId()
        );
        rowReceiver = projectorChain.firstProjector();
    }

    PagingIterator<Void, Row> pagingIterator;
    if (mergeNode.sortedInputOutput() && mergeNode.numUpstreams() > 1) {
        pagingIterator = new SortedPagingIterator<>(
                OrderingByPosition.rowOrdering(
                        mergeNode.orderByIndices(),
                        mergeNode.reverseFlags(),
                        mergeNode.nullsFirst()
                ),
                requiresRepeatSupport
        );
    } else {
        pagingIterator = requiresRepeatSupport ?
                PassThroughPagingIterator.<Void, Row>repeatable() : PassThroughPagingIterator.<Void, Row>oneShot();
    }
    PageDownstream pageDownstream = new IteratorPageDownstream(rowReceiver, pagingIterator, executorOptional);
    return new Tuple<>(pageDownstream, projectorChain);
}
 
Example 23
Source Project: Elasticsearch   Source File: BlobTableInfo.java    License: Apache License 2.0 5 votes vote down vote up
private void registerStaticColumns() {
    for (Tuple<String, DataType> column : staticColumns) {
        ReferenceInfo info = new ReferenceInfo(new ReferenceIdent(ident(), column.v1(), null),
                RowGranularity.DOC, column.v2());
        if (info.ident().isColumn()) {
            columns.add(info);
        }
        INFOS.put(info.ident().columnIdent(), info);
    }
}
 
Example 24
@SuppressWarnings("unchecked")
public static Iterable<Tuple<ColumnIdent, DataType>> extractPartitionedByColumns(Map<String, Object> mapping) {
    Map<String, Object> metaMap = (Map<String, Object>)mapping.get("_meta");
    if (metaMap != null) {
        Object partitionedByColumnsMaybe = metaMap.get("partitioned_by");
        if (partitionedByColumnsMaybe != null && partitionedByColumnsMaybe instanceof List) {
            List<List<String>> partitionedByColumns = (List<List<String>>)partitionedByColumnsMaybe;
            return extractPartitionedByColumns(partitionedByColumns);
        }
    }
    return ImmutableList.of();
}
 
Example 25
Source Project: elasticsearch-sql   Source File: SQLFunctions.java    License: Apache License 2.0 5 votes vote down vote up
public static Tuple<String, String> log(SQLExpr base, SQLExpr strColumn, String valueName) {
    String name = "log_" + random();
    String result;
    if (valueName == null) {
        if (isProperty(strColumn)) {
            result = "def " + name + " = Math.log(doc['" + Util.expr2Object(strColumn).toString() + "'].value)/Math.log("+Util.expr2Object(base).toString()+")";
        } else {
            result = "def " + name + " = Math.log(" + Util.expr2Object(strColumn).toString() + ")/Math.log("+Util.expr2Object(base).toString()+")";
        }
    } else {
        result = Util.expr2Object(strColumn).toString()+";def "+name+" = Math.log("+valueName+")/Math.log("+Util.expr2Object(base).toString()+")";
    }
    return new Tuple(name, result);
}
 
Example 26
Source Project: Elasticsearch   Source File: TransportDeleteBlobAction.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected Tuple<DeleteBlobResponse, DeleteBlobRequest> shardOperationOnPrimary(MetaData metaData,
                                                                               DeleteBlobRequest request) throws Throwable {
    logger.trace("shardOperationOnPrimary {}", request);
    BlobShard blobShard = blobIndices.blobShardSafe(request.index(), request.shardId().id());
    boolean deleted = blobShard.delete(request.id());
    final DeleteBlobResponse response = new DeleteBlobResponse(deleted);
    return new Tuple<>(response, request);
}
 
Example 27
Source Project: Elasticsearch   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 5 votes vote down vote up
@Nullable
protected Tuple<Reference, Literal> prepare(Function input) {
    assert input != null;
    assert input.arguments().size() == 2;

    Symbol left = input.arguments().get(0);
    Symbol right = input.arguments().get(1);

    if (!(left instanceof Reference) || !(right.symbolType().isValueSymbol())) {
        return null;
    }
    assert right.symbolType() == SymbolType.LITERAL;
    return new Tuple<>((Reference)left, (Literal)right);
}
 
Example 28
Source Project: Elasticsearch   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Query apply(Function input, Context context) {
    Tuple<Reference, Literal> tuple = prepare(input);
    if (tuple == null) {
        return null;
    }
    return toQuery(tuple.v1(), tuple.v2().value(), context);
}
 
Example 29
Source Project: Elasticsearch   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Query apply(Function input, Context context) throws IOException {
    Tuple<Reference, Literal> tuple = prepare(input);
    if (tuple == null) {
        return null;
    }
    String field = tuple.v1().info().ident().columnIdent().fqn();
    Literal literal = tuple.v2();
    return termsQuery(field, literal);
}
 
Example 30
private Tuple<String, Integer> toVersion(String s) {
    if (!IndexFeatureStore.isIndexStore(s)) {
        return null;
    }
    IndexMetaData index = clusterService.state().metaData().getIndices().get(s);

    if (index != null && STORE_VERSION_PROP.exists(index.getSettings())) {
        return new Tuple<>(index.getIndex().getName(), STORE_VERSION_PROP.get(index.getSettings()));
    }
    return null;
}