Java Code Examples for org.elasticsearch.common.lucene.Lucene

The following examples show how to use org.elasticsearch.common.lucene.Lucene. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: vscode-extension   Source File: test.java    License: MIT License 6 votes vote down vote up
private static LocalCheckpointTracker createLocalCheckpointTracker(EngineConfig engineConfig, SegmentInfos lastCommittedSegmentInfos,
    Logger logger, Supplier<Searcher> searcherSupplier, BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier) {
    try {
        final SequenceNumbers.CommitInfo seqNoStats =
            SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommittedSegmentInfos.userData.entrySet());
        final long maxSeqNo = seqNoStats.maxSeqNo;
        final long localCheckpoint = seqNoStats.localCheckpoint;
        logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint);
        final LocalCheckpointTracker tracker = localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint);
        // Operations that are optimized using max_seq_no_of_updates optimization must not be processed twice; otherwise, they will
        // create duplicates in Lucene. To avoid this we check the LocalCheckpointTracker to see if an operation was already processed.
        // Thus, we need to restore the LocalCheckpointTracker bit by bit to ensure the consistency between LocalCheckpointTracker and
        // Lucene index. This is not the only solution since we can bootstrap max_seq_no_of_updates with max_seq_no of the commit to
        // disable the MSU optimization during recovery. Here we prefer to maintain the consistency of LocalCheckpointTracker.
        if (localCheckpoint < maxSeqNo && engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
            try (Searcher searcher = searcherSupplier.get()) {
                Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo,
                    tracker::markSeqNoAsCompleted);
            }
        }
        return tracker;
    } catch (IOException ex) {
        throw new EngineCreationFailureException(engineConfig.getShardId(), "failed to create local checkpoint tracker", ex);
    }
}
 
Example 2
Source Project: vscode-extension   Source File: test.java    License: MIT License 6 votes vote down vote up
private void refreshLastCommittedSegmentInfos() {
/*
 * we have to inc-ref the store here since if the engine is closed by a tragic event
 * we don't acquire the write lock and wait until we have exclusive access. This might also
 * dec the store reference which can essentially close the store and unless we can inc the reference
 * we can't use it.
 */
    store.incRef();
    try {
        // reread the last committed segment infos
        lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
    } catch (Exception e) {
        if (isClosed.get() == false) {
            try {
                logger.warn("failed to read latest segment infos on flush", e);
            } catch (Exception inner) {
                e.addSuppressed(inner);
            }
            if (Lucene.isCorruptionException(e)) {
                throw new FlushFailedEngineException(shardId, e);
            }
        }
    } finally {
        store.decRef();
    }
}
 
Example 3
Source Project: Elasticsearch   Source File: RecoveryFileChunkRequest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    recoveryId = in.readLong();
    shardId = ShardId.readShardId(in);
    String name = in.readString();
    position = in.readVLong();
    long length = in.readVLong();
    String checksum = in.readOptionalString();
    content = in.readBytesReference();
    Version writtenBy = null;
    String versionString = in.readOptionalString();
    writtenBy = Lucene.parseVersionLenient(versionString, null);
    metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
    lastChunk = in.readBoolean();
    totalTranslogOps = in.readVInt();
    sourceThrottleTimeInNanos = in.readLong();
}
 
Example 4
Source Project: Elasticsearch   Source File: QueryCollector.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void collect(int doc) throws IOException {
    final Query query = getQuery(doc);
    if (query == null) {
        // log???
        return;
    }
    Query existsQuery = query;
    if (isNestedDoc) {
        existsQuery = new BooleanQuery.Builder()
            .add(existsQuery, Occur.MUST)
            .add(Queries.newNonNestedFilter(), Occur.FILTER)
            .build();
    }
    // run the query
    try {
        if (Lucene.exists(searcher, existsQuery)) {
            topDocsLeafCollector.collect(doc);
            postMatch(doc);
        }
    } catch (IOException e) {
        logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
    }
}
 
Example 5
Source Project: Elasticsearch   Source File: QueryCollector.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void collect(int doc) throws IOException {
    final Query query = getQuery(doc);
    if (query == null) {
        // log???
        return;
    }
    Query existsQuery = query;
    if (isNestedDoc) {
        existsQuery = new BooleanQuery.Builder()
            .add(existsQuery, Occur.MUST)
            .add(Queries.newNonNestedFilter(), Occur.FILTER)
            .build();
    }
    // run the query
    try {
        if (Lucene.exists(searcher, existsQuery)) {
            counter++;
            postMatch(doc);
        }
    } catch (IOException e) {
        logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
    }
}
 
Example 6
Source Project: Elasticsearch   Source File: FiltersAggregator.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    // no need to provide deleted docs to the filter
    final Bits[] bits = new Bits[filters.length];
    for (int i = 0; i < filters.length; ++i) {
        bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx));
    }
    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int doc, long bucket) throws IOException {
            boolean matched = false;
            for (int i = 0; i < bits.length; i++) {
                if (bits[i].get(doc)) {
                    collectBucket(sub, doc, bucketOrd(bucket, i));
                    matched = true;
                }
            }
            if (showOtherBucket && !matched) {
                collectBucket(sub, doc, bucketOrd(bucket, bits.length));
            }
        }
    };
}
 
Example 7
Source Project: Elasticsearch   Source File: ShardFetchRequest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    id = in.readLong();
    size = in.readVInt();
    docIds = new int[size];
    for (int i = 0; i < size; i++) {
        docIds[i] = in.readVInt();
    }
    byte flag = in.readByte();
    if (flag == 1) {
        lastEmittedDoc = Lucene.readFieldDoc(in);
    } else if (flag == 2) {
        lastEmittedDoc = Lucene.readScoreDoc(in);
    } else if (flag != 0) {
        throw new IOException("Unknown flag: " + flag);
    }
}
 
Example 8
Source Project: Elasticsearch   Source File: ShardFetchRequest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void writeTo(StreamOutput out) throws IOException {
    super.writeTo(out);
    out.writeLong(id);
    out.writeVInt(size);
    for (int i = 0; i < size; i++) {
        out.writeVInt(docIds[i]);
    }
    if (lastEmittedDoc == null) {
        out.writeByte((byte) 0);
    } else if (lastEmittedDoc instanceof FieldDoc) {
        out.writeByte((byte) 1);
        Lucene.writeFieldDoc(out, (FieldDoc) lastEmittedDoc);
    } else {
        out.writeByte((byte) 2);
        Lucene.writeScoreDoc(out, lastEmittedDoc);
    }
}
 
Example 9
Source Project: Elasticsearch   Source File: Segment.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readFrom(StreamInput in) throws IOException {
    name = in.readString();
    generation = Long.parseLong(name.substring(1), Character.MAX_RADIX);
    committed = in.readBoolean();
    search = in.readBoolean();
    docCount = in.readInt();
    delDocCount = in.readInt();
    sizeInBytes = in.readLong();
    version = Lucene.parseVersionLenient(in.readOptionalString(), null);
    compound = in.readOptionalBoolean();
    mergeId = in.readOptionalString();
    memoryInBytes = in.readLong();
    if (in.readBoolean()) {
        // verbose mode
        ramTree = readRamTree(in);
    }
}
 
Example 10
Source Project: Elasticsearch   Source File: Engine.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Read the last segments info from the commit pointed to by the searcher manager
 */
protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException {
    IndexSearcher searcher = sm.acquire();
    try {
        IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit();
        return Lucene.readSegmentInfos(latestCommit);
    } catch (IOException e) {
        // Fall back to reading from the store if reading from the commit fails
        try {
            return store. readLastCommittedSegmentsInfo();
        } catch (IOException e2) {
            e2.addSuppressed(e);
            throw e2;
        }
    } finally {
        sm.release(searcher);
    }
}
 
Example 11
Source Project: Elasticsearch   Source File: ParentQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
    // we forcefully apply live docs here so that deleted children don't give matching parents
    childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
    if (Lucene.isEmpty(childrenDocSet)) {
        return null;
    }
    final DocIdSetIterator childIterator = childrenDocSet.iterator();
    if (childIterator == null) {
        return null;
    }
    SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (bytesValues == null) {
        return null;
    }

    return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
 
Example 12
Source Project: Elasticsearch   Source File: ParentConstantScoreQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
    if (Lucene.isEmpty(childrenDocIdSet)) {
        return null;
    }

    SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (globalValues != null) {
        // we forcefully apply live docs here so that deleted children don't give matching parents
        childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
        if (innerIterator != null) {
            ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
                    innerIterator, parentOrds, globalValues
            );
            return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
        }
    }
    return null;
}
 
Example 13
Source Project: Elasticsearch   Source File: FieldMapper.java    License: Apache License 2.0 6 votes vote down vote up
protected void setupFieldType(BuilderContext context) {
    fieldType.setNames(buildNames(context));
    if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) {
        fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER);
        fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER);
    }
    if (fieldDataSettings != null) {
        Settings settings = Settings.builder().put(fieldType.fieldDataType().getSettings()).put(fieldDataSettings).build();
        fieldType.setFieldDataType(new FieldDataType(fieldType.fieldDataType().getType(), settings));
    }
    boolean defaultDocValues = false; // pre 2.0
    if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)) {
        defaultDocValues = fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE;
    }
    // backcompat for "fielddata: format: docvalues" for now...
    boolean fieldDataDocValues = fieldType.fieldDataType() != null
        && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(fieldType.fieldDataType().getFormat(context.indexSettings()));
    if (fieldDataDocValues && docValuesSet && fieldType.hasDocValues() == false) {
        // this forces the doc_values setting to be written, so fielddata does not mask the original setting
        defaultDocValues = true;
    }
    defaultFieldType.setHasDocValues(defaultDocValues);
    if (docValuesSet == false) {
        fieldType.setHasDocValues(defaultDocValues || fieldDataDocValues);
    }
}
 
Example 14
@Before
public void setupIndex() throws Exception {
    dir = new ByteBuffersDirectory();

    try(IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER))) {
        for (int i = 0; i < docs.length; i++) {
            Document doc = new Document();
            doc.add(new Field("_id", Integer.toString(i + 1), StoredField.TYPE));
            doc.add(newTextField("text", docs[i], Field.Store.YES));
            indexWriter.addDocument(doc);
        }
    }

    reader = DirectoryReader.open(dir);
    searcher = new IndexSearcher(reader);
}
 
Example 15
@Inject
public AnnotationIndicesAnalysis(Settings settings,
		IndicesAnalysisService indicesAnalysisService) {
	super(settings);
	indicesAnalysisService.analyzerProviderFactories().put(
			"default",
			new PreBuiltAnalyzerProviderFactory("default",
					AnalyzerScope.INDICES, new AnnotationAnalyzer(
							Lucene.ANALYZER_VERSION)));

	indicesAnalysisService.tokenFilterFactories().put("annotation_filter",
			new PreBuiltTokenFilterFactoryFactory(new TokenFilterFactory() {
				@Override
				public String name() {
					return "annotation_filter";
				}

				@Override
				public TokenStream create(TokenStream tokenStream) {
					return new InlineAnnotationFilter(tokenStream);
				}
			}));
}
 
Example 16
Source Project: crate   Source File: Engine.java    License: Apache License 2.0 6 votes vote down vote up
protected final DocsStats docsStats(IndexReader indexReader) {
    long numDocs = 0;
    long numDeletedDocs = 0;
    long sizeInBytes = 0;
    // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause
    // the next scheduled refresh to go through and refresh the stats as well
    for (LeafReaderContext readerContext : indexReader.leaves()) {
        // we go on the segment level here to get accurate numbers
        final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader());
        SegmentCommitInfo info = segmentReader.getSegmentInfo();
        numDocs += readerContext.reader().numDocs();
        numDeletedDocs += readerContext.reader().numDeletedDocs();
        try {
            sizeInBytes += info.sizeInBytes();
        } catch (IOException e) {
            logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
        }
    }
    return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
}
 
Example 17
Source Project: crate   Source File: InternalEngine.java    License: Apache License 2.0 6 votes vote down vote up
private void refreshLastCommittedSegmentInfos() {
    /*
    * we have to inc-ref the store here since if the engine is closed by a tragic event
    * we don't acquire the write lock and wait until we have exclusive access. This might also
    * dec the store reference which can essentially close the store and unless we can inc the reference
    * we can't use it.
    */
    store.incRef();
    try {
        // reread the last committed segment infos
        lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
    } catch (Exception e) {
        if (isClosed.get() == false) {
            try {
                logger.warn("failed to read latest segment infos on flush", e);
            } catch (Exception inner) {
                e.addSuppressed(inner);
            }
            if (Lucene.isCorruptionException(e)) {
                throw new FlushFailedEngineException(shardId, e);
            }
        }
    } finally {
        store.decRef();
    }
}
 
Example 18
Source Project: crate   Source File: KeywordFieldMapper.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected BytesRef indexedValueForSearch(Object value) {
    if (searchAnalyzer() == Lucene.KEYWORD_ANALYZER) {
        // keyword analyzer with the default attribute source which encodes terms using UTF8
        // in that case we skip normalization, which may be slow if there many terms need to
        // parse (eg. large terms query) since Analyzer.normalize involves things like creating
        // attributes through reflection
        // This if statement will be used whenever a normalizer is NOT configured
        return super.indexedValueForSearch(value);
    }

    if (value == null) {
        return null;
    }
    if (value instanceof BytesRef) {
        value = ((BytesRef) value).utf8ToString();
    }
    return searchAnalyzer().normalize(name(), value.toString());
}
 
Example 19
Source Project: crate   Source File: RestoreService.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) {
    if (failedShard.primary() && failedShard.initializing()) {
        RecoverySource recoverySource = failedShard.recoverySource();
        if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) {
            Snapshot snapshot = ((SnapshotRecoverySource) recoverySource).snapshot();
            // mark restore entry for this shard as failed when it's due to a file corruption. There is no need wait on retries
            // to restore this shard on another node if the snapshot files are corrupt. In case where a node just left or crashed,
            // however, we only want to acknowledge the restore operation once it has been successfully restored on another node.
            if (unassignedInfo.getFailure() != null && Lucene.isCorruptionException(unassignedInfo.getFailure().getCause())) {
                changes(snapshot).shards.put(failedShard.shardId(), new ShardRestoreStatus(failedShard.currentNodeId(),
                    RestoreInProgress.State.FAILURE, unassignedInfo.getFailure().getCause().getMessage()));
            }
        }
    }
}
 
Example 20
Source Project: crate   Source File: EngineTestCase.java    License: Apache License 2.0 6 votes vote down vote up
protected InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFactory,
                                      @Nullable BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier,
                                      @Nullable ToLongBiFunction<Engine, Engine.Operation> seqNoForOperation,
                                      EngineConfig config) throws IOException {
    final Store store = config.getStore();
    final Directory directory = store.directory();
    if (Lucene.indexExists(directory) == false) {
        store.createEmpty();
        final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
            SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
        store.associateIndexWithNewTranslog(translogUuid);

    }
    InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config);
    internalEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
    return internalEngine;
}
 
Example 21
Source Project: vscode-extension   Source File: test.java    License: MIT License 5 votes vote down vote up
private IndexWriterConfig getIndexWriterConfig() {
    final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
    iwc.setCommitOnClose(false); // we by default don't commit on close
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    iwc.setIndexDeletionPolicy(combinedDeletionPolicy);
    // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
    boolean verbose = false;
    try {
        verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
    } catch (Exception ignore) {
    }
    iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
    iwc.setMergeScheduler(mergeScheduler);
    // Give us the opportunity to upgrade old segments while performing
    // background merges
    MergePolicy mergePolicy = config().getMergePolicy();
    // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
    iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
    if (softDeleteEnabled) {
        mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
            new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
    }
    iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy));
    iwc.setSimilarity(engineConfig.getSimilarity());
    iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
    iwc.setCodec(engineConfig.getCodec());
    iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
    if (config().getIndexSort() != null) {
        iwc.setIndexSort(config().getIndexSort());
    }
    return iwc;
}
 
Example 22
Source Project: Elasticsearch   Source File: QueryCollector.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void collect(int doc) throws IOException {
    final Query query = getQuery(doc);
    if (query == null) {
        // log???
        return;
    }
    Query existsQuery = query;
    if (isNestedDoc) {
        existsQuery = new BooleanQuery.Builder()
            .add(existsQuery, Occur.MUST)
            .add(Queries.newNonNestedFilter(), Occur.FILTER)
            .build();
    }
    // run the query
    try {
        if (context.highlight() != null) {
            context.parsedQuery(new ParsedQuery(query));
            context.hitContext().cache().clear();
        }

        if (Lucene.exists(searcher, existsQuery)) {
            if (!limit || counter < size) {
                matches.add(BytesRef.deepCopyOf(current));
                if (context.highlight() != null) {
                    highlightPhase.hitExecute(context, context.hitContext());
                    hls.add(context.hitContext().hit().getHighlightFields());
                }
            }
            counter++;
            postMatch(doc);
        }
    } catch (IOException e) {
        logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
    }
}
 
Example 23
Source Project: Elasticsearch   Source File: QueryCollector.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void collect(int doc) throws IOException {
    final Query query = getQuery(doc);
    if (query == null) {
        // log???
        return;
    }
    Query existsQuery = query;
    if (isNestedDoc) {
        existsQuery = new BooleanQuery.Builder()
            .add(existsQuery, Occur.MUST)
            .add(Queries.newNonNestedFilter(), Occur.FILTER)
            .build();
    }
    // run the query
    try {
        if (context.highlight() != null) {
            context.parsedQuery(new ParsedQuery(query));
            context.hitContext().cache().clear();
        }
        if (Lucene.exists(searcher, existsQuery)) {
            if (!limit || counter < size) {
                matches.add(BytesRef.deepCopyOf(current));
                scores.add(scorer.score());
                if (context.highlight() != null) {
                    highlightPhase.hitExecute(context, context.hitContext());
                    hls.add(context.hitContext().hit().getHighlightFields());
                }
            }
            counter++;
            postMatch(doc);
        }
    } catch (IOException e) {
        logger.warn("[" + current.utf8ToString() + "] failed to execute query", e);
    }
}
 
Example 24
Source Project: Elasticsearch   Source File: InternalTopHits.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
    out.writeVInt(from);
    out.writeVInt(size);
    Lucene.writeTopDocs(out, topDocs);
    searchHits.writeTo(out);
}
 
Example 25
Source Project: Elasticsearch   Source File: FilterAggregator.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    // no need to provide deleted docs to the filter
    final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx));
    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int doc, long bucket) throws IOException {
            if (bits.get(doc)) {
                collectBucket(sub, doc, bucket);
            }
        }
    };
}
 
Example 26
Source Project: Elasticsearch   Source File: InnerHitsContext.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException {
    Query rawParentFilter;
    if (parentObjectMapper == null) {
        rawParentFilter = Queries.newNonNestedFilter();
    } else {
        rawParentFilter = parentObjectMapper.nestedTypeFilter();
    }
    BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter);
    Query childFilter = childObjectMapper.nestedTypeFilter();
    Query q = Queries.filtered(query.query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext));

    if (size() == 0) {
        return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0);
    } else {
        int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc());
        TopDocsCollector topDocsCollector;
        if (sort() != null) {
            try {
                topDocsCollector = TopFieldCollector.create(sort(), topN, true, trackScores(), trackScores());
            } catch (IOException e) {
                throw ExceptionsHelper.convertToElastic(e);
            }
        } else {
            topDocsCollector = TopScoreDocCollector.create(topN);
        }
        try {
            context.searcher().search(q, topDocsCollector);
        } finally {
            clearReleasables(Lifetime.COLLECTION);
        }
        return topDocsCollector.topDocs(from(), size());
    }
}
 
Example 27
Source Project: Elasticsearch   Source File: SearchService.java    License: Apache License 2.0 5 votes vote down vote up
private void processFailure(SearchContext context, Throwable t) {
    freeContext(context.id());
    try {
        if (Lucene.isCorruptionException(t)) {
            context.indexShard().failShard("search execution corruption failure", t);
        }
    } catch (Throwable e) {
        logger.warn("failed to process shard failure to (potentially) send back shard failure on corruption", e);
    }
}
 
Example 28
Source Project: Elasticsearch   Source File: Engine.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * fail engine due to some error. the engine will also be closed.
 * The underlying store is marked corrupted iff failure is caused by index corruption
 */
public void failEngine(String reason, @Nullable Throwable failure) {
    if (failEngineLock.tryLock()) {
        store.incRef();
        try {
            try {
                // we just go and close this engine - no way to recover
                closeNoLock("engine failed on: [" + reason + "]");
            } finally {
                if (failedEngine != null) {
                    logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason);
                    return;
                }
                logger.warn("failed engine [{}]", failure, reason);
                // we must set a failure exception, generate one if not supplied
                failedEngine = (failure != null) ? failure : new IllegalStateException(reason);
                // we first mark the store as corrupted before we notify any listeners
                // this must happen first otherwise we might try to reallocate so quickly
                // on the same node that we don't see the corrupted marker file when
                // the shard is initializing
                if (Lucene.isCorruptionException(failure)) {
                    try {
                        store.markStoreCorrupted(new IOException("failed engine (reason: [" + reason + "])", ExceptionsHelper.unwrapCorruption(failure)));
                    } catch (IOException e) {
                        logger.warn("Couldn't mark store corrupted", e);
                    }
                }
                failedEngineListener.onFailedEngine(shardId, reason, failure);
            }
        } catch (Throwable t) {
            // don't bubble up these exceptions up
            logger.warn("failEngine threw exception", t);
        } finally {
            store.decRef();
        }
    } else {
        logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason);
    }
}
 
Example 29
Source Project: Elasticsearch   Source File: Engine.java    License: Apache License 2.0 5 votes vote down vote up
/** Check whether the engine should be failed */
protected boolean maybeFailEngine(String source, Throwable t) {
    if (Lucene.isCorruptionException(t)) {
        failEngine("corrupt file (source: [" + source + "])", t);
        return true;
    } else if (ExceptionsHelper.isOOM(t)) {
        failEngine("out of memory (source: [" + source + "])", t);
        return true;
    } else if (t instanceof RecoveryFromDistributedLogFailedException) {
        failEngine("recovery from distributed log service failed", t);
        return true;
    }
    return false;
}
 
Example 30
Source Project: Elasticsearch   Source File: CommitStats.java    License: Apache License 2.0 5 votes vote down vote up
public CommitStats(SegmentInfos segmentInfos) {
    // clone the map to protect against concurrent changes
    userData = MapBuilder.<String, String>newMapBuilder().putAll(segmentInfos.getUserData()).immutableMap();
    // lucene calls the current generation, last generation.
    generation = segmentInfos.getLastGeneration();
    if (segmentInfos.getId() != null) { // id is only written starting with Lucene 5.0
        id = Base64.encodeBytes(segmentInfos.getId());
    }
    numDocs = Lucene.getNumDocs(segmentInfos);
}