Java Code Examples for org.elasticsearch.common.collect.Tuple#v1()

The following examples show how to use org.elasticsearch.common.collect.Tuple#v1() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BlobTableInfo.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void registerStaticColumns() {
    for (Tuple<String, DataType> column : staticColumns) {
        ReferenceInfo info = new ReferenceInfo(new ReferenceIdent(ident(), column.v1(), null),
                RowGranularity.DOC, column.v2());
        if (info.ident().isColumn()) {
            columns.add(info);
        }
        INFOS.put(info.ident().columnIdent(), info);
    }
}
 
Example 2
Source File: LuceneQueryBuilder.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Query apply(Function input, Context context) {
    Tuple<Reference, Literal> tuple = super.prepare(input);
    if (tuple == null) {
        return null;
    }
    Reference reference = tuple.v1();
    Literal literal = tuple.v2();
    String columnName = reference.info().ident().columnIdent().fqn();
    if (DataTypes.isCollectionType(reference.valueType()) && DataTypes.isCollectionType(literal.valueType())) {
        List<Term> terms = getTerms(columnName, literal);
        if (terms.isEmpty()) {
            return genericFunctionFilter(input, context);
        }
        Query termsQuery = new TermsQuery(terms);

        // wrap boolTermsFilter and genericFunction filter in an additional BooleanFilter to control the ordering of the filters
        // termsFilter is applied first
        // afterwards the more expensive genericFunctionFilter
        BooleanQuery.Builder filterClauses = new BooleanQuery.Builder();
        filterClauses.add(termsQuery, BooleanClause.Occur.MUST);
        filterClauses.add(genericFunctionFilter(input, context), BooleanClause.Occur.MUST);
        return filterClauses.build();
    }
    QueryBuilderHelper builder = QueryBuilderHelper.forType(tuple.v1().valueType());
    return builder.eq(columnName, tuple.v2().value());
}
 
Example 3
Source File: ContextPreparer.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Nullable
private PageDownstreamContext pageDownstreamContextForNestedLoop(int nlPhaseId,
                                                                 PreparerContext ctx,
                                                                 byte inputId,
                                                                 @Nullable MergePhase mergePhase,
                                                                 RowReceiver rowReceiver,
                                                                 RamAccountingContext ramAccountingContext) {
    if (mergePhase == null) {
        ctx.phaseIdToRowReceivers.put(toKey(nlPhaseId, inputId), rowReceiver);
        return null;
    }
    Tuple<PageDownstream, FlatProjectorChain> pageDownstreamWithChain = pageDownstreamFactory.createMergeNodePageDownstream(
            mergePhase,
            rowReceiver,
            true,
            ramAccountingContext,
            Optional.of(threadPool.executor(ThreadPool.Names.SEARCH))
    );
    return new PageDownstreamContext(
            pageDownstreamContextLogger,
            nodeName(),
            mergePhase.executionPhaseId(),
            mergePhase.name(),
            pageDownstreamWithChain.v1(),
            StreamerVisitor.streamerFromOutputs(mergePhase),
            ramAccountingContext,
            mergePhase.numUpstreams(),
            pageDownstreamWithChain.v2()
    );
}
 
Example 4
Source File: ExecutionPhasesTask.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private List<PageDownstreamContext> getHandlerPageDownstreamContexts(Tuple<List<ExecutionSubContext>, List<ListenableFuture<Bucket>>> onHandler) {
    final List<PageDownstreamContext> pageDownstreamContexts = new ArrayList<>(onHandler.v1().size());
    for (ExecutionSubContext handlerExecutionSubContext : onHandler.v1()) {
        if (handlerExecutionSubContext instanceof DownstreamExecutionSubContext) {
            PageDownstreamContext pageDownstreamContext = ((DownstreamExecutionSubContext) handlerExecutionSubContext).pageDownstreamContext((byte) 0);
            pageDownstreamContexts.add(pageDownstreamContext);
        }
    }
    return pageDownstreamContexts;
}
 
Example 5
Source File: ScriptService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public void onFileInit(Path file) {
    if (logger.isTraceEnabled()) {
        logger.trace("Loading script file : [{}]", file);
    }
    Tuple<String, String> scriptNameExt = scriptNameExt(file);
    if (scriptNameExt != null) {
        ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2());
        if (engineService == null) {
            logger.warn("no script engine found for [{}]", scriptNameExt.v2());
        } else {
            try {
                //we don't know yet what the script will be used for, but if all of the operations for this lang
                // with file scripts are disabled, it makes no sense to even compile it and cache it.
                if (isAnyScriptContextEnabled(engineService.types()[0], engineService, ScriptType.FILE)) {
                    logger.info("compiling script file [{}]", file.toAbsolutePath());
                    try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), Charsets.UTF_8)) {
                        String script = Streams.copyToString(reader);
                        CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.<String, String>emptyMap());
                        staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script, Collections.<String, String>emptyMap())));
                        scriptMetrics.onCompilation();
                    }
                } else {
                    logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath());
                }
            } catch (Throwable e) {
                logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1());
            }
        }
    }
}
 
Example 6
Source File: DocumentMapperParser.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public DocumentMapper parse(@Nullable String type, CompressedXContent source, String defaultSource) throws MapperParsingException {
    Map<String, Object> mapping = null;
    if (source != null) {
        Map<String, Object> root = XContentHelper.convertToMap(source.compressedReference(), true).v2();
        Tuple<String, Map<String, Object>> t = extractMapping(type, root);
        type = t.v1();
        mapping = t.v2();
    }
    if (mapping == null) {
        mapping = Maps.newHashMap();
    }
    return parse(type, mapping, defaultSource);
}
 
Example 7
Source File: KibanaSeed.java    From openshift-elasticsearch-plugin with Apache License 2.0 5 votes vote down vote up
public void setDashboards(final OpenshiftRequestContext context, String kibanaVersion, final String projectPrefix) {
    if (!pluginClient.indexExists(defaultKibanaIndex)) {
        LOGGER.debug("Default Kibana index '{}' does not exist. Skipping Kibana seeding", defaultKibanaIndex);
        return;
    }

    LOGGER.debug("Begin setDashboards:  projectPrefix '{}' for user '{}' projects '{}' kibanaIndex '{}'",
            projectPrefix, context.getUser(), context.getProjects(), context.getKibanaIndex());

    // We want to seed the Kibana user index initially
    // since the logic from Kibana has changed to create before this plugin
    // starts...
    Tuple<Boolean, Project> action = Tuple.tuple(initialSeedKibanaIndex(context), Project.EMPTY);

    if (context.isOperationsUser()) {
        action = seedOperationsIndexPatterns(context, kibanaVersion);
    } else {
        action = seedUsersIndexPatterns(context, kibanaVersion);
    }

    if (action.v2() != null && !Project.EMPTY.equals(action.v2())) {
        boolean defaultIndexPatternExists = pluginClient.documentExists(context.getKibanaIndex(), INDICIES_TYPE, action.v2().getName());
        GetResponse config = pluginClient.getDocument(context.getKibanaIndex(), CONFIG_DOC_TYPE, kibanaVersion);
        if(!defaultIndexPatternExists || !config.isExists() || StringUtils.isBlank(kibanaUtils.getDefaultIndexPattern(config))){
            setDefaultProject(context.getKibanaIndex(), action.v2(), kibanaVersion);
            action =  Tuple.tuple(true, action.v2());
        }
    }

    if (action.v1()) {
        pluginClient.refreshIndices(context.getKibanaIndex());
    }
}
 
Example 8
Source File: UpdateConsumer.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
private Plan upsertByQuery(UpdateAnalyzedStatement.NestedAnalyzedStatement nestedAnalysis,
                           ConsumerContext consumerContext,
                           DocTableInfo tableInfo,
                           WhereClause whereClause) {

    Symbol versionSymbol = null;
    if(whereClause.hasVersions()){
        versionSymbol = VersionRewriter.get(whereClause.query());
        whereClause = new WhereClause(whereClause.query(), whereClause.docKeys().orNull(), whereClause.partitions());
    }


    if (!whereClause.noMatch() || !(tableInfo.isPartitioned() && whereClause.partitions().isEmpty())) {
        // for updates, we always need to collect the `_uid`
        Reference uidReference = new Reference(
                new ReferenceInfo(
                        new ReferenceIdent(tableInfo.ident(), "_uid"),
                        RowGranularity.DOC, DataTypes.STRING));

        Tuple<String[], Symbol[]> assignments = Assignments.convert(nestedAnalysis.assignments());

        Long version = null;
        if (versionSymbol != null){
            version = ValueSymbolVisitor.LONG.process(versionSymbol);
        }

        UpdateProjection updateProjection = new UpdateProjection(
                new InputColumn(0, DataTypes.STRING),
                assignments.v1(),
                assignments.v2(),
                version);

        Planner.Context plannerContext = consumerContext.plannerContext();
        Routing routing = plannerContext.allocateRouting(tableInfo, whereClause, Preference.PRIMARY.type());
        RoutedCollectPhase collectPhase = new RoutedCollectPhase(
                plannerContext.jobId(),
                plannerContext.nextExecutionPhaseId(),
                "collect",
                routing,
                tableInfo.rowGranularity(),
                ImmutableList.<Symbol>of(uidReference),
                ImmutableList.<Projection>of(updateProjection),
                whereClause,
                DistributionInfo.DEFAULT_BROADCAST
        );
        MergePhase mergeNode = MergePhase.localMerge(
                plannerContext.jobId(),
                plannerContext.nextExecutionPhaseId(),
                ImmutableList.<Projection>of(CountAggregation.PARTIAL_COUNT_AGGREGATION_PROJECTION),
                collectPhase.executionNodes().size(),
                collectPhase.outputTypes()
        );
        return new CollectAndMerge(collectPhase, mergeNode);
    } else {
        return null;
    }
}
 
Example 9
Source File: ContextPreparer.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public ExecutionSubContext visitMergePhase(final MergePhase phase, final PreparerContext context) {
    RamAccountingContext ramAccountingContext = RamAccountingContext.forExecutionPhase(circuitBreaker, phase);

    boolean upstreamOnSameNode = context.opCtx.upstreamsAreOnSameNode(phase.executionPhaseId());

    int pageSize = Paging.getWeightedPageSize(Paging.PAGE_SIZE, 1.0d / phase.executionNodes().size());
    RowReceiver rowReceiver = context.getRowReceiver(phase, pageSize);

    if (upstreamOnSameNode) {
        if (!phase.projections().isEmpty()) {
            ProjectorChainContext projectorChainContext = new ProjectorChainContext(
                    phase.executionPhaseId(),
                    phase.name(),
                    context.jobId,
                    pageDownstreamFactory.projectorFactory(),
                    phase.projections(),
                    rowReceiver,
                    ramAccountingContext);
            context.registerRowReceiver(phase.executionPhaseId(), projectorChainContext.rowReceiver());
            return projectorChainContext;
        }

        context.registerRowReceiver(phase.executionPhaseId(), rowReceiver);
        return null;
    }

    Tuple<PageDownstream, FlatProjectorChain> pageDownstreamProjectorChain =
            pageDownstreamFactory.createMergeNodePageDownstream(
                    phase,
                    rowReceiver,
                    false,
                    ramAccountingContext,
                    // no separate executor because TransportDistributedResultAction already runs in a threadPool
                    Optional.<Executor>absent());


    return new PageDownstreamContext(
            pageDownstreamContextLogger,
            nodeName(),
            phase.executionPhaseId(),
            phase.name(),
            pageDownstreamProjectorChain.v1(),
            DataTypes.getStreamer(phase.inputTypes()),
            ramAccountingContext,
            phase.numUpstreams(),
            pageDownstreamProjectorChain.v2());
}
 
Example 10
Source File: TransportShardUpsertAction.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Prepares an update request by converting it into an index request.
 * <p/>
 * TODO: detect a NOOP and return an update response if true
 */
@SuppressWarnings("unchecked")
private SourceAndVersion prepareUpdate(DocTableInfo tableInfo,
                                       ShardUpsertRequest request,
                                       ShardUpsertRequest.Item item,
                                       IndexShard indexShard) throws ElasticsearchException {
    final GetResult getResult = indexShard.getService().get(request.type(), item.id(),
            new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME},
            true, Versions.MATCH_ANY, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE, false);

    if (!getResult.isExists()) {
        throw new DocumentMissingException(new ShardId(request.index(), request.shardId().id()), request.type(), item.id());
    }

    if (getResult.internalSourceRef() == null) {
        // no source, we can't do nothing, through a failure...
        throw new DocumentSourceMissingException(new ShardId(request.index(), request.shardId().id()), request.type(), item.id());
    }

    if (item.version() != Versions.MATCH_ANY && item.version() != getResult.getVersion()) {
        throw new VersionConflictEngineException(
                indexShard.shardId(), Constants.DEFAULT_MAPPING_TYPE, item.id(), getResult.getVersion(), item.version());
    }

    Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
    final Map<String, Object> updatedSourceAsMap;
    final XContentType updateSourceContentType = sourceAndContent.v1();

    updatedSourceAsMap = sourceAndContent.v2();

    SymbolToFieldExtractorContext ctx = new SymbolToFieldExtractorContext(functions, item.insertValues());

    Map<String, Object> pathsToUpdate = new LinkedHashMap<>();
    Map<String, Object> updatedGeneratedColumns = new LinkedHashMap<>();
    for (int i = 0; i < request.updateColumns().length; i++) {
        /**
         * NOTE: mapping isn't applied. So if an Insert was done using the ES Rest Endpoint
         * the data might be returned in the wrong format (date as string instead of long)
         */
        String columnPath = request.updateColumns()[i];
        Object value = SYMBOL_TO_FIELD_EXTRACTOR.convert(item.updateAssignments()[i], ctx).apply(getResult);
        ReferenceInfo referenceInfo = tableInfo.getReferenceInfo(ColumnIdent.fromPath(columnPath));
        if (referenceInfo instanceof GeneratedReferenceInfo) {
            updatedGeneratedColumns.put(columnPath, value);

        } else {
            pathsToUpdate.put(columnPath, value);
        }
    }

    processGeneratedColumns(tableInfo, pathsToUpdate, updatedGeneratedColumns, request.validateGeneratedColumns(), getResult);

    updateSourceByPaths(updatedSourceAsMap, pathsToUpdate);

    try {
        XContentBuilder builder = XContentFactory.contentBuilder(updateSourceContentType);
        builder.map(updatedSourceAsMap);
        return new SourceAndVersion(builder.bytes(), getResult.getVersion());
    } catch (IOException e) {
        throw new ElasticsearchGenerationException("Failed to generate [" + updatedSourceAsMap + "]", e);
    }
}