Java Code Examples for org.elasticsearch.common.collect.Tuple#v2()

The following examples show how to use org.elasticsearch.common.collect.Tuple#v2() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransportCoordinateMultiSearchAction.java    From siren-join with GNU Affero General Public License v3.0 6 votes vote down vote up
private void doExecuteFilterJoins(final MultiSearchRequest request,
                                  final List<CoordinateSearchMetadata> metadatas) {
  FilterJoinCache cache = cacheService.getCacheInstance();

  for (int i = 0; i < request.requests().size(); i++) {
    // Parse query source
    Tuple<XContentType, Map<String, Object>> parsedSource = this.parseSource(request.requests().get(i).source());
    Map<String, Object> map = parsedSource.v2();

    // Unwrap "wrapper" queries
    WrapperQueryVisitor wrapperVisitor = new WrapperQueryVisitor(map);
    wrapperVisitor.traverse();

    // Query planning and execution of filter joins
    SourceMapVisitor mapVisitor = new SourceMapVisitor(map);
    mapVisitor.traverse();
    FilterJoinVisitor joinVisitor = new FilterJoinVisitor(client, mapVisitor.getFilterJoinTree(), cache, request);
    joinVisitor.traverse();
    metadatas.add(joinVisitor.getMetadata());

    // Filter joins have been replaced by a binary terms filter
    // Rebuild the query source, and delegate the execution of the search action
    request.requests().get(i).source(this.buildSource(parsedSource.v1().xContent(), map));
  }
}
 
Example 2
Source File: WrapperQueryVisitor.java    From siren-join with GNU Affero General Public License v3.0 6 votes vote down vote up
protected Map<String, Object> parseQuery(BytesReference source) {
  // nothing to parse...
  if (source == null || source.length() == 0) {
    return null;
  }

  try {
    Tuple<XContentType, Map<String, Object>> parsedSource = XContentHelper.convertToMap(source, false);
    return parsedSource.v2();
  }
  catch (Throwable e) {
    String sSource = "_na_";
    try {
      sSource = XContentHelper.convertToJson(source, false);
    }
    catch (Throwable e1) { /* ignore  */ }
    throw new ElasticsearchParseException("Failed to parse source [" + sSource + "]", e);
  }
}
 
Example 3
Source File: DlsFlsFilterLeafReader.java    From deprecated-security-advanced-modules with Apache License 2.0 5 votes vote down vote up
@Override
public void binaryField(final FieldInfo fieldInfo, final byte[] value) throws IOException {

    if (fieldInfo.name.equals("_source")) {
        final BytesReference bytesRef = new BytesArray(value);
        final Tuple<XContentType, Map<String, Object>> bytesRefTuple = XContentHelper.convertToMap(bytesRef, false, XContentType.JSON);
        Map<String, Object> filteredSource = bytesRefTuple.v2();
        MapUtils.deepTraverseMap(filteredSource, HASH_CB);
        final XContentBuilder xBuilder = XContentBuilder.builder(bytesRefTuple.v1().xContent()).map(filteredSource);
        delegate.binaryField(fieldInfo, BytesReference.toBytes(BytesReference.bytes(xBuilder)));
    } else {
        delegate.binaryField(fieldInfo, value);
    }
}
 
Example 4
Source File: BlobTableInfo.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void registerStaticColumns() {
    for (Tuple<String, DataType> column : staticColumns) {
        ReferenceInfo info = new ReferenceInfo(new ReferenceIdent(ident(), column.v1(), null),
                RowGranularity.DOC, column.v2());
        if (info.ident().isColumn()) {
            columns.add(info);
        }
        INFOS.put(info.ident().columnIdent(), info);
    }
}
 
Example 5
Source File: LuceneQueryBuilder.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Query apply(Function input, Context context) throws IOException {
    Tuple<Reference, Literal> tuple = prepare(input);
    if (tuple == null) {
        return null;
    }
    String field = tuple.v1().info().ident().columnIdent().fqn();
    Literal literal = tuple.v2();
    return termsQuery(field, literal);
}
 
Example 6
Source File: LuceneQueryBuilder.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Query apply(Function input, Context context) {
    Tuple<Reference, Literal> tuple = super.prepare(input);
    if (tuple == null) {
        return null;
    }
    Reference reference = tuple.v1();
    Literal literal = tuple.v2();
    String columnName = reference.info().ident().columnIdent().fqn();
    if (DataTypes.isCollectionType(reference.valueType()) && DataTypes.isCollectionType(literal.valueType())) {
        List<Term> terms = getTerms(columnName, literal);
        if (terms.isEmpty()) {
            return genericFunctionFilter(input, context);
        }
        Query termsQuery = new TermsQuery(terms);

        // wrap boolTermsFilter and genericFunction filter in an additional BooleanFilter to control the ordering of the filters
        // termsFilter is applied first
        // afterwards the more expensive genericFunctionFilter
        BooleanQuery.Builder filterClauses = new BooleanQuery.Builder();
        filterClauses.add(termsQuery, BooleanClause.Occur.MUST);
        filterClauses.add(genericFunctionFilter(input, context), BooleanClause.Occur.MUST);
        return filterClauses.build();
    }
    QueryBuilderHelper builder = QueryBuilderHelper.forType(tuple.v1().valueType());
    return builder.eq(columnName, tuple.v2().value());
}
 
Example 7
Source File: ContextPreparer.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Nullable
private PageDownstreamContext pageDownstreamContextForNestedLoop(int nlPhaseId,
                                                                 PreparerContext ctx,
                                                                 byte inputId,
                                                                 @Nullable MergePhase mergePhase,
                                                                 RowReceiver rowReceiver,
                                                                 RamAccountingContext ramAccountingContext) {
    if (mergePhase == null) {
        ctx.phaseIdToRowReceivers.put(toKey(nlPhaseId, inputId), rowReceiver);
        return null;
    }
    Tuple<PageDownstream, FlatProjectorChain> pageDownstreamWithChain = pageDownstreamFactory.createMergeNodePageDownstream(
            mergePhase,
            rowReceiver,
            true,
            ramAccountingContext,
            Optional.of(threadPool.executor(ThreadPool.Names.SEARCH))
    );
    return new PageDownstreamContext(
            pageDownstreamContextLogger,
            nodeName(),
            mergePhase.executionPhaseId(),
            mergePhase.name(),
            pageDownstreamWithChain.v1(),
            StreamerVisitor.streamerFromOutputs(mergePhase),
            ramAccountingContext,
            mergePhase.numUpstreams(),
            pageDownstreamWithChain.v2()
    );
}
 
Example 8
Source File: ExecutionPhasesTask.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void setupContext(Map<String, Collection<NodeOperation>> operationByServer,
                          List<Tuple<ExecutionPhase, RowReceiver>> handlerPhases,
                          InitializationTracker initializationTracker) throws Throwable {

    String localNodeId = clusterService.localNode().id();
    Collection<NodeOperation> localNodeOperations = operationByServer.remove(localNodeId);
    if (localNodeOperations == null) {
        localNodeOperations = Collections.emptyList();
    }

    JobExecutionContext.Builder builder = jobContextService.newBuilder(jobId(), localNodeId);
    Tuple<List<ExecutionSubContext>, List<ListenableFuture<Bucket>>> onHandler =
        contextPreparer.prepareOnHandler(localNodeOperations, builder, handlerPhases, new SharedShardContexts(indicesService));
    JobExecutionContext localJobContext = jobContextService.createContext(builder);
    localJobContext.start();

    List<PageDownstreamContext> pageDownstreamContexts = getHandlerPageDownstreamContexts(onHandler);
    int bucketIdx = 0;
    List<ListenableFuture<Bucket>> directResponseFutures = onHandler.v2();

    if (!localNodeOperations.isEmpty()) {
        if (directResponseFutures.isEmpty()) {
            initializationTracker.jobInitialized(null);
        } else {
            Futures.addCallback(Futures.allAsList(directResponseFutures),
                new SetBucketAction(pageDownstreamContexts, bucketIdx, initializationTracker));
            bucketIdx++;
        }
    }

    sendJobRequests(localNodeId, operationByServer, pageDownstreamContexts, handlerPhases, bucketIdx, initializationTracker);
}
 
Example 9
Source File: DocumentMapperParser.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public DocumentMapper parse(@Nullable String type, CompressedXContent source, String defaultSource) throws MapperParsingException {
    Map<String, Object> mapping = null;
    if (source != null) {
        Map<String, Object> root = XContentHelper.convertToMap(source.compressedReference(), true).v2();
        Tuple<String, Map<String, Object>> t = extractMapping(type, root);
        type = t.v1();
        mapping = t.v2();
    }
    if (mapping == null) {
        mapping = Maps.newHashMap();
    }
    return parse(type, mapping, defaultSource);
}
 
Example 10
Source File: KibanaSeed.java    From openshift-elasticsearch-plugin with Apache License 2.0 5 votes vote down vote up
public void setDashboards(final OpenshiftRequestContext context, String kibanaVersion, final String projectPrefix) {
    if (!pluginClient.indexExists(defaultKibanaIndex)) {
        LOGGER.debug("Default Kibana index '{}' does not exist. Skipping Kibana seeding", defaultKibanaIndex);
        return;
    }

    LOGGER.debug("Begin setDashboards:  projectPrefix '{}' for user '{}' projects '{}' kibanaIndex '{}'",
            projectPrefix, context.getUser(), context.getProjects(), context.getKibanaIndex());

    // We want to seed the Kibana user index initially
    // since the logic from Kibana has changed to create before this plugin
    // starts...
    Tuple<Boolean, Project> action = Tuple.tuple(initialSeedKibanaIndex(context), Project.EMPTY);

    if (context.isOperationsUser()) {
        action = seedOperationsIndexPatterns(context, kibanaVersion);
    } else {
        action = seedUsersIndexPatterns(context, kibanaVersion);
    }

    if (action.v2() != null && !Project.EMPTY.equals(action.v2())) {
        boolean defaultIndexPatternExists = pluginClient.documentExists(context.getKibanaIndex(), INDICIES_TYPE, action.v2().getName());
        GetResponse config = pluginClient.getDocument(context.getKibanaIndex(), CONFIG_DOC_TYPE, kibanaVersion);
        if(!defaultIndexPatternExists || !config.isExists() || StringUtils.isBlank(kibanaUtils.getDefaultIndexPattern(config))){
            setDefaultProject(context.getKibanaIndex(), action.v2(), kibanaVersion);
            action =  Tuple.tuple(true, action.v2());
        }
    }

    if (action.v1()) {
        pluginClient.refreshIndices(context.getKibanaIndex());
    }
}
 
Example 11
Source File: TransportCoordinateSearchAction.java    From siren-join with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
protected void doExecute(final SearchRequest request, final ActionListener<SearchResponse> listener) {
  logger.debug("{}: Execute coordinated search action", Thread.currentThread().getName());

  // A reference to the listener that will be used - can be overwritten to reference a CoordinateSearchListener
  ActionListener<SearchResponse> actionListener = listener;

  // Retrieve the singleton instance of the filterjoin cache
  FilterJoinCache cache = cacheService.getCacheInstance();

  // Parse query source
  Tuple<XContentType, Map<String, Object>> parsedSource = this.parseSource(request.source());
  if (parsedSource != null) { // can be null if this is a uri search (query parameter in extraSource)
    Map<String, Object> map = parsedSource.v2();

    // Unwrap "wrapper" queries
    WrapperQueryVisitor wrapperVisitor = new WrapperQueryVisitor(map);
    wrapperVisitor.traverse();

    // Query planning and execution of filter joins
    SourceMapVisitor mapVisitor = new SourceMapVisitor(map);
    mapVisitor.traverse();
    FilterJoinVisitor joinVisitor = new FilterJoinVisitor(client, mapVisitor.getFilterJoinTree(), cache, request);
    joinVisitor.traverse();

    // Wraps the listener with our own to inject metadata information in the response
    CoordinateSearchListener coordinateSearchListener = new CoordinateSearchListener(listener);
    coordinateSearchListener.setMetadata(joinVisitor.getMetadata());
    actionListener = coordinateSearchListener;

    // Filter joins have been replaced by a binary terms filter
    // Rebuild the query source, and delegate the execution of the search action
    request.source(this.buildSource(parsedSource.v1().xContent(), map));
  }

  // Delegate the execution of the request to the original search action
  this.searchAction.execute(request, actionListener);

  logger.debug("{}: Coordinated search action completed", Thread.currentThread().getName());
}
 
Example 12
Source File: UpdateConsumer.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
private Plan upsertByQuery(UpdateAnalyzedStatement.NestedAnalyzedStatement nestedAnalysis,
                           ConsumerContext consumerContext,
                           DocTableInfo tableInfo,
                           WhereClause whereClause) {

    Symbol versionSymbol = null;
    if(whereClause.hasVersions()){
        versionSymbol = VersionRewriter.get(whereClause.query());
        whereClause = new WhereClause(whereClause.query(), whereClause.docKeys().orNull(), whereClause.partitions());
    }


    if (!whereClause.noMatch() || !(tableInfo.isPartitioned() && whereClause.partitions().isEmpty())) {
        // for updates, we always need to collect the `_uid`
        Reference uidReference = new Reference(
                new ReferenceInfo(
                        new ReferenceIdent(tableInfo.ident(), "_uid"),
                        RowGranularity.DOC, DataTypes.STRING));

        Tuple<String[], Symbol[]> assignments = Assignments.convert(nestedAnalysis.assignments());

        Long version = null;
        if (versionSymbol != null){
            version = ValueSymbolVisitor.LONG.process(versionSymbol);
        }

        UpdateProjection updateProjection = new UpdateProjection(
                new InputColumn(0, DataTypes.STRING),
                assignments.v1(),
                assignments.v2(),
                version);

        Planner.Context plannerContext = consumerContext.plannerContext();
        Routing routing = plannerContext.allocateRouting(tableInfo, whereClause, Preference.PRIMARY.type());
        RoutedCollectPhase collectPhase = new RoutedCollectPhase(
                plannerContext.jobId(),
                plannerContext.nextExecutionPhaseId(),
                "collect",
                routing,
                tableInfo.rowGranularity(),
                ImmutableList.<Symbol>of(uidReference),
                ImmutableList.<Projection>of(updateProjection),
                whereClause,
                DistributionInfo.DEFAULT_BROADCAST
        );
        MergePhase mergeNode = MergePhase.localMerge(
                plannerContext.jobId(),
                plannerContext.nextExecutionPhaseId(),
                ImmutableList.<Projection>of(CountAggregation.PARTIAL_COUNT_AGGREGATION_PROJECTION),
                collectPhase.executionNodes().size(),
                collectPhase.outputTypes()
        );
        return new CollectAndMerge(collectPhase, mergeNode);
    } else {
        return null;
    }
}
 
Example 13
Source File: ContextPreparer.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public ExecutionSubContext visitMergePhase(final MergePhase phase, final PreparerContext context) {
    RamAccountingContext ramAccountingContext = RamAccountingContext.forExecutionPhase(circuitBreaker, phase);

    boolean upstreamOnSameNode = context.opCtx.upstreamsAreOnSameNode(phase.executionPhaseId());

    int pageSize = Paging.getWeightedPageSize(Paging.PAGE_SIZE, 1.0d / phase.executionNodes().size());
    RowReceiver rowReceiver = context.getRowReceiver(phase, pageSize);

    if (upstreamOnSameNode) {
        if (!phase.projections().isEmpty()) {
            ProjectorChainContext projectorChainContext = new ProjectorChainContext(
                    phase.executionPhaseId(),
                    phase.name(),
                    context.jobId,
                    pageDownstreamFactory.projectorFactory(),
                    phase.projections(),
                    rowReceiver,
                    ramAccountingContext);
            context.registerRowReceiver(phase.executionPhaseId(), projectorChainContext.rowReceiver());
            return projectorChainContext;
        }

        context.registerRowReceiver(phase.executionPhaseId(), rowReceiver);
        return null;
    }

    Tuple<PageDownstream, FlatProjectorChain> pageDownstreamProjectorChain =
            pageDownstreamFactory.createMergeNodePageDownstream(
                    phase,
                    rowReceiver,
                    false,
                    ramAccountingContext,
                    // no separate executor because TransportDistributedResultAction already runs in a threadPool
                    Optional.<Executor>absent());


    return new PageDownstreamContext(
            pageDownstreamContextLogger,
            nodeName(),
            phase.executionPhaseId(),
            phase.name(),
            pageDownstreamProjectorChain.v1(),
            DataTypes.getStreamer(phase.inputTypes()),
            ramAccountingContext,
            phase.numUpstreams(),
            pageDownstreamProjectorChain.v2());
}
 
Example 14
Source File: TransportShardUpsertAction.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Prepares an update request by converting it into an index request.
 * <p/>
 * TODO: detect a NOOP and return an update response if true
 */
@SuppressWarnings("unchecked")
private SourceAndVersion prepareUpdate(DocTableInfo tableInfo,
                                       ShardUpsertRequest request,
                                       ShardUpsertRequest.Item item,
                                       IndexShard indexShard) throws ElasticsearchException {
    final GetResult getResult = indexShard.getService().get(request.type(), item.id(),
            new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME},
            true, Versions.MATCH_ANY, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE, false);

    if (!getResult.isExists()) {
        throw new DocumentMissingException(new ShardId(request.index(), request.shardId().id()), request.type(), item.id());
    }

    if (getResult.internalSourceRef() == null) {
        // no source, we can't do nothing, through a failure...
        throw new DocumentSourceMissingException(new ShardId(request.index(), request.shardId().id()), request.type(), item.id());
    }

    if (item.version() != Versions.MATCH_ANY && item.version() != getResult.getVersion()) {
        throw new VersionConflictEngineException(
                indexShard.shardId(), Constants.DEFAULT_MAPPING_TYPE, item.id(), getResult.getVersion(), item.version());
    }

    Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
    final Map<String, Object> updatedSourceAsMap;
    final XContentType updateSourceContentType = sourceAndContent.v1();

    updatedSourceAsMap = sourceAndContent.v2();

    SymbolToFieldExtractorContext ctx = new SymbolToFieldExtractorContext(functions, item.insertValues());

    Map<String, Object> pathsToUpdate = new LinkedHashMap<>();
    Map<String, Object> updatedGeneratedColumns = new LinkedHashMap<>();
    for (int i = 0; i < request.updateColumns().length; i++) {
        /**
         * NOTE: mapping isn't applied. So if an Insert was done using the ES Rest Endpoint
         * the data might be returned in the wrong format (date as string instead of long)
         */
        String columnPath = request.updateColumns()[i];
        Object value = SYMBOL_TO_FIELD_EXTRACTOR.convert(item.updateAssignments()[i], ctx).apply(getResult);
        ReferenceInfo referenceInfo = tableInfo.getReferenceInfo(ColumnIdent.fromPath(columnPath));
        if (referenceInfo instanceof GeneratedReferenceInfo) {
            updatedGeneratedColumns.put(columnPath, value);

        } else {
            pathsToUpdate.put(columnPath, value);
        }
    }

    processGeneratedColumns(tableInfo, pathsToUpdate, updatedGeneratedColumns, request.validateGeneratedColumns(), getResult);

    updateSourceByPaths(updatedSourceAsMap, pathsToUpdate);

    try {
        XContentBuilder builder = XContentFactory.contentBuilder(updateSourceContentType);
        builder.map(updatedSourceAsMap);
        return new SourceAndVersion(builder.bytes(), getResult.getVersion());
    } catch (IOException e) {
        throw new ElasticsearchGenerationException("Failed to generate [" + updatedSourceAsMap + "]", e);
    }
}
 
Example 15
Source File: DocumentMapperParser.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({"unchecked"})
private DocumentMapper parse(String type, Map<String, Object> mapping, String defaultSource) throws MapperParsingException {
    if (type == null) {
        throw new MapperParsingException("Failed to derive type");
    }

    if (defaultSource != null) {
        Tuple<String, Map<String, Object>> t = extractMapping(MapperService.DEFAULT_MAPPING, defaultSource);
        if (t.v2() != null) {
            XContentHelper.mergeDefaults(mapping, t.v2());
        }
    }


    Mapper.TypeParser.ParserContext parserContext = parserContext(type);
    // parse RootObjectMapper
    DocumentMapper.Builder docBuilder = doc((RootObjectMapper.Builder) rootObjectTypeParser.parse(type, mapping, parserContext), mapperService);
    Iterator<Map.Entry<String, Object>> iterator = mapping.entrySet().iterator();
    // parse DocumentMapper
    while(iterator.hasNext()) {
        Map.Entry<String, Object> entry = iterator.next();
        String fieldName = Strings.toUnderscoreCase(entry.getKey());
        Object fieldNode = entry.getValue();

        if ("transform".equals(fieldName)) {
            deprecationLogger.deprecated("Mapping transform is deprecated and will be removed in the next major version");
            if (fieldNode instanceof Map) {
                parseTransform(docBuilder, (Map<String, Object>) fieldNode, parserContext.indexVersionCreated());
            } else if (fieldNode instanceof List) {
                for (Object transformItem: (List)fieldNode) {
                    if (!(transformItem instanceof Map)) {
                        throw new MapperParsingException("Elements of transform list must be objects but one was:  " + fieldNode);
                    }
                    parseTransform(docBuilder, (Map<String, Object>) transformItem, parserContext.indexVersionCreated());
                }
            } else {
                throw new MapperParsingException("Transform must be an object or an array but was:  " + fieldNode);
            }
            iterator.remove();
        } else {
            MetadataFieldMapper.TypeParser typeParser = rootTypeParsers.get(fieldName);
            if (typeParser != null) {
                iterator.remove();
                Map<String, Object> fieldNodeMap = (Map<String, Object>) fieldNode;
                docBuilder.put((MetadataFieldMapper.Builder)typeParser.parse(fieldName, fieldNodeMap, parserContext));
                fieldNodeMap.remove("type");
                checkNoRemainingFields(fieldName, fieldNodeMap, parserContext.indexVersionCreated());
            }
        }
    }

    ImmutableMap<String, Object> attributes = ImmutableMap.of();
    if (mapping.containsKey("_meta")) {
        attributes = ImmutableMap.copyOf((Map<String, Object>) mapping.remove("_meta"));
    }
    docBuilder.meta(attributes);

    checkNoRemainingFields(mapping, parserContext.indexVersionCreated(), "Root mapping definition has unsupported parameters: ");

    return docBuilder.build(mapperService);
}