org.apache.lucene.util.Version Java Examples
The following examples show how to use
org.apache.lucene.util.Version.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IndexWriterWorker.java From olat with Apache License 2.0 | 6 votes |
/** * @param id * Unique index ID. Is used to generate unique directory name. * @param tempIndexPath * Absolute directory-path where the temporary index can be generated. * @param fullIndexer * Reference to full-index */ public IndexWriterWorker(final int id, final File tempIndexDir, final OlatFullIndexer fullIndexer) { this.id = id; this.indexPartDir = new File(tempIndexDir, "part" + id); this.fullIndexer = fullIndexer; try { final Directory luceneIndexPartDir = FSDirectory.open(indexPartDir); indexWriter = new IndexWriter(luceneIndexPartDir, new StandardAnalyzer(Version.LUCENE_30), true, IndexWriter.MaxFieldLength.UNLIMITED); indexWriter.setMergeFactor(fullIndexer.getSearchModuleConfig().getIndexerWriterMergeFactor()); log.info("IndexWriter config MergeFactor=" + indexWriter.getMergeFactor()); indexWriter.setRAMBufferSizeMB(fullIndexer.getSearchModuleConfig().getIndexerWriterRamBuffer()); log.info("IndexWriter config RAMBufferSizeMB=" + indexWriter.getRAMBufferSizeMB()); indexWriter.setUseCompoundFile(false); } catch (final IOException e) { log.warn("Can not create IndexWriter"); } }
Example #2
Source File: FastHdfsKeyValueDirectoryTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testMultipleWritersOpenOnSameDirectory() throws IOException { IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer()); FastHdfsKeyValueDirectory directory = new FastHdfsKeyValueDirectory(false, _timer, _configuration, new Path(_path, "test_multiple")); IndexWriter writer1 = new IndexWriter(directory, config.clone()); addDoc(writer1, getDoc(1)); IndexWriter writer2 = new IndexWriter(directory, config.clone()); addDoc(writer2, getDoc(2)); writer1.close(); writer2.close(); DirectoryReader reader = DirectoryReader.open(directory); int maxDoc = reader.maxDoc(); assertEquals(1, maxDoc); Document document = reader.document(0); assertEquals("2", document.get("id")); reader.close(); }
Example #3
Source File: RecoveryFileChunkRequest.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); String checksum = in.readOptionalString(); content = in.readBytesReference(); Version writtenBy = null; String versionString = in.readOptionalString(); writtenBy = Lucene.parseVersionLenient(versionString, null); metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); totalTranslogOps = in.readVInt(); sourceThrottleTimeInNanos = in.readLong(); }
Example #4
Source File: TestIndexWriterOnOldIndex.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testOpenModeAndCreatedVersion() throws IOException { assumeTrue("Reenable when 8.0 is released", false); InputStream resource = getClass().getResourceAsStream("index.single-empty-doc.8.0.0.zip"); assertNotNull(resource); Path path = createTempDir(); TestUtil.unzip(resource, path); Directory dir = newFSDirectory(path); for (OpenMode openMode : OpenMode.values()) { Directory tmpDir = newDirectory(dir); assertEquals(7 /** 7.0.0 */, SegmentInfos.readLatestCommit(tmpDir).getIndexCreatedVersionMajor()); IndexWriter w = new IndexWriter(tmpDir, newIndexWriterConfig().setOpenMode(openMode)); w.commit(); w.close(); switch (openMode) { case CREATE: assertEquals(Version.LATEST.major, SegmentInfos.readLatestCommit(tmpDir).getIndexCreatedVersionMajor()); break; default: assertEquals(7 /** 7.0.0 */, SegmentInfos.readLatestCommit(tmpDir).getIndexCreatedVersionMajor()); } tmpDir.close(); } dir.close(); }
Example #5
Source File: BaseSegmentInfoFormatTestCase.java From lucene-solr with Apache License 2.0 | 6 votes |
/** Test versions */ public void testVersions() throws Exception { Codec codec = getCodec(); for (Version v : getVersions()) { for (Version minV : new Version[] { v, null}) { Directory dir = newDirectory(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo(dir, v, minV, "_123", 1, false, codec, Collections.<String,String>emptyMap(), id, Collections.emptyMap(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(info2.getVersion(), v); if (supportsMinVersion()) { assertEquals(info2.getMinVersion(), minV); } else { assertEquals(info2.getMinVersion(), null); } dir.close(); } } }
Example #6
Source File: AutoCompleter.java From webdsl with Apache License 2.0 | 6 votes |
/** * Use a different index as the auto completer index or re-open * the existing index if <code>autocompleteIndex</code> is the same value * as given in the constructor. * @param autocompleteIndexDir the autocomplete directory to use * @throws AlreadyClosedException if the Autocompleter is already closed * @throws IOException if autocompleter can not open the directory */ // TODO: we should make this final as it is called in the constructor public void setAutoCompleteIndex(Directory autocompleteIndexDir) throws IOException { // this could be the same directory as the current autocompleteIndex // modifications to the directory should be synchronized synchronized (modifyCurrentIndexLock) { ensureOpen(); if (!IndexReader.indexExists(autocompleteIndexDir)) { IndexWriter writer = new IndexWriter(autocompleteIndexDir, new IndexWriterConfig(Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT))); writer.close(); } swapSearcher(autocompleteIndexDir); } }
Example #7
Source File: LuceneHelper.java From dexter with Apache License 2.0 | 6 votes |
/** * Opens or creates a lucene index in the given directory * * @param wikiIdtToLuceneIdSerialization * - the file containing the serialized mapping between wiki-id * and Lucene documents ids * * @param indexPath * - the path of the directory with the Lucene's index */ protected LuceneHelper(File wikiIdtToLuceneIdSerialization, File indexPath) { logger.info("opening lucene index in folder {}", indexPath); config = new IndexWriterConfig(Version.LUCENE_41, ANALYZER); this.wikiIdtToLuceneIdSerialization = wikiIdtToLuceneIdSerialization; BooleanQuery.setMaxClauseCount(1000); try { index = FSDirectory.open(indexPath); // writer.commit(); } catch (Exception e) { logger.error("opening the index: {}", e.toString()); System.exit(1); } summarizer = new ArticleSummarizer(); writer = getWriter(); collectionSize = writer.numDocs(); wikiIdToLuceneId = Collections.emptyMap(); }
Example #8
Source File: AlfrescoSolrDataModel.java From SearchServices with GNU Lesser General Public License v3.0 | 6 votes |
public Solr4QueryParser getLuceneQueryParser(SearchParameters searchParameters, SolrQueryRequest req, FTSQueryParser.RerankPhase rerankPhase) { Analyzer analyzer = req.getSchema().getQueryAnalyzer(); Solr4QueryParser parser = new Solr4QueryParser(req, Version.LATEST, searchParameters.getDefaultFieldName(), analyzer, rerankPhase); parser.setNamespacePrefixResolver(namespaceDAO); parser.setDictionaryService(getDictionaryService(CMISStrictDictionaryService.DEFAULT)); parser.setTenantService(tenantService); parser.setSearchParameters(searchParameters); parser.setAllowLeadingWildcard(true); Properties props = new CoreDescriptorDecorator(req.getCore().getCoreDescriptor()).getProperties(); int topTermSpanRewriteLimit = Integer.parseInt(props.getProperty("alfresco.topTermSpanRewriteLimit", "1000")); parser.setTopTermSpanRewriteLimit(topTermSpanRewriteLimit); return parser; }
Example #9
Source File: ContextAnalyzerIndex.java From modernmt with Apache License 2.0 | 6 votes |
public ContextAnalyzerIndex(Directory directory, Rescorer rescorer) throws IOException { this.indexDirectory = directory; this.analyzer = new CorpusAnalyzer(); this.rescorer = rescorer; // Index writer setup IndexWriterConfig indexConfig = new IndexWriterConfig(Version.LUCENE_4_10_4, this.analyzer); indexConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); indexConfig.setSimilarity(new DefaultSimilarity() { @Override public float lengthNorm(FieldInvertState state) { return 1.f; } }); this.indexWriter = new IndexWriter(this.indexDirectory, indexConfig); // Ensure index exists if (!DirectoryReader.indexExists(directory)) this.indexWriter.commit(); }
Example #10
Source File: Blur024CodecTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testDocValuesFormat() throws IOException { RAMDirectory directory = new RAMDirectory(); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43)); conf.setCodec(new Blur024Codec()); IndexWriter writer = new IndexWriter(directory, conf); Document doc = new Document(); doc.add(new StringField("f", "v", Store.YES)); doc.add(new SortedDocValuesField("f", new BytesRef("v"))); writer.addDocument(doc); writer.close(); DirectoryReader reader = DirectoryReader.open(directory); AtomicReaderContext context = reader.leaves().get(0); AtomicReader atomicReader = context.reader(); SortedDocValues sortedDocValues = atomicReader.getSortedDocValues("f"); assertTrue(sortedDocValues.getClass().getName().startsWith(DiskDocValuesProducer.class.getName())); reader.close(); }
Example #11
Source File: SearchInputController.java From olat with Apache License 2.0 | 6 votes |
protected Set<String> getHighlightWords(final String searchString) { try { final Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); final TokenStream stream = analyzer.tokenStream("content", new StringReader(searchString)); final TermAttribute termAtt = stream.addAttribute(TermAttribute.class); for (boolean next = stream.incrementToken(); next; next = stream.incrementToken()) { final String term = termAtt.term(); if (log.isDebugEnabled()) { log.debug(term); } } } catch (final IOException e) { log.error("", e); } return null; }
Example #12
Source File: TestSegmentTermDocs.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testTermDocs() throws IOException { //After adding the document, we should be able to read it back in SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random())); assertTrue(reader != null); TermsEnum terms = reader.terms(DocHelper.TEXT_FIELD_2_KEY).iterator(); terms.seekCeil(new BytesRef("field")); PostingsEnum termDocs = TestUtil.docs(random(), terms, null, PostingsEnum.FREQS); if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int docId = termDocs.docID(); assertTrue(docId == 0); int freq = termDocs.freq(); assertTrue(freq == 3); } reader.close(); }
Example #13
Source File: SegmentInfo.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Construct a new complete SegmentInfo instance from input. * <p>Note: this is public only to allow access from * the codecs package.</p> */ public SegmentInfo(Directory dir, Version version, Version minVersion, String name, int maxDoc, boolean isCompoundFile, Codec codec, Map<String,String> diagnostics, byte[] id, Map<String,String> attributes, Sort indexSort) { assert !(dir instanceof TrackingDirectoryWrapper); this.dir = Objects.requireNonNull(dir); this.version = Objects.requireNonNull(version); this.minVersion = minVersion; this.name = Objects.requireNonNull(name); this.maxDoc = maxDoc; this.isCompoundFile = isCompoundFile; this.codec = codec; this.diagnostics = Map.copyOf(Objects.requireNonNull(diagnostics)); this.id = id; if (id.length != StringHelper.ID_LENGTH) { throw new IllegalArgumentException("invalid id: " + Arrays.toString(id)); } this.attributes = Map.copyOf(Objects.requireNonNull(attributes)); this.indexSort = indexSort; }
Example #14
Source File: LuceneContentSvcImpl.java From Lottery with GNU General Public License v2.0 | 5 votes |
@Transactional(readOnly = true) public void createIndex(Content content, Directory dir) throws IOException { boolean exist = IndexReader.indexExists(dir); IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer( Version.LUCENE_30), !exist, IndexWriter.MaxFieldLength.LIMITED); try { writer.addDocument(LuceneContent.createDocument(content)); } finally { writer.close(); } }
Example #15
Source File: AutoCompleter.java From webdsl with Apache License 2.0 | 5 votes |
/** * Removes all terms from the auto complete index. * @throws IOException * @throws AlreadyClosedException if the Autocompleter is already closed */ public void clearIndex() throws IOException { synchronized (modifyCurrentIndexLock) { ensureOpen(); final Directory dir = this.autoCompleteIndex; final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT)) .setOpenMode(OpenMode.CREATE)); writer.close(); swapSearcher(dir); } }
Example #16
Source File: IndexBuilder.java From exhibitor with Apache License 2.0 | 5 votes |
public void open() throws Exception { if ( !directory.exists() && !directory.mkdirs() ) { throw new IOException("Could not make: " + directory); } IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_35, new KeywordAnalyzer()).setOpenMode(IndexWriterConfig.OpenMode.CREATE); niofsDirectory = new NIOFSDirectory(directory, new SingleInstanceLockFactory()); writer = new IndexWriter(niofsDirectory, conf); }
Example #17
Source File: TestCustomAnalyzer.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testIncorrectOrder() throws Exception { expectThrows(IllegalStateException.class, () -> { CustomAnalyzer.builder() .addCharFilter("htmlstrip") .withDefaultMatchVersion(Version.LATEST) .withTokenizer("whitespace") .build(); }); }
Example #18
Source File: IndexSchema.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Constructs a schema using the specified resource name and stream. * By default, this follows the normal config path directory searching rules. * @see SolrResourceLoader#openResource */ public IndexSchema(String name, InputSource is, Version luceneVersion, SolrResourceLoader resourceLoader, Properties substitutableProperties) { this(luceneVersion, resourceLoader, substitutableProperties); this.resourceName = Objects.requireNonNull(name); try { readSchema(is); loader.inform(loader); } catch (IOException e) { throw new RuntimeException(e); } }
Example #19
Source File: LengthTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public LengthTokenFilterFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) { super(index, indexSettingsService.getSettings(), name, settings); min = settings.getAsInt("min", 0); max = settings.getAsInt("max", Integer.MAX_VALUE); if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); }
Example #20
Source File: TestSegmentMerger.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { super.setUp(); mergedDir = newDirectory(); merge1Dir = newDirectory(); merge2Dir = newDirectory(); DocHelper.setupDoc(doc1); SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1); DocHelper.setupDoc(doc2); SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2); reader1 = new SegmentReader(info1, Version.LATEST.major, newIOContext(random())); reader2 = new SegmentReader(info2, Version.LATEST.major, newIOContext(random())); }
Example #21
Source File: EdgeNGramTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
EdgeNGramTokenFilterFactory(Index index, Settings indexSettings, String name, Settings settings) { super(index, indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); this.side = parseSide(settings.get("side", "front")); this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings); }
Example #22
Source File: SolrSchemaUtilTest.java From jesterj with Apache License 2.0 | 5 votes |
@Test public void testLoadDualAnalyzerType() throws Exception { SolrSchemaUtil solrSchemaUtil = new SolrSchemaUtil(); Document schemaDocument = solrSchemaUtil.getSchemaDocument(SOLR_SCHEMA_XML, loader); assertNotNull(schemaDocument); // note we want to avoid any examples with substitution because substitution is not supported. FieldType textField = solrSchemaUtil.getFieldType(schemaDocument, "teststop", Version.LUCENE_7_6_0.toString(), 1.0f, loader); assertNotNull(textField); assertEquals("teststop", textField.getTypeName()); assertNotNull(textField.getIndexAnalyzer()); assertNotNull(textField.getQueryAnalyzer()); assertNotSame(textField.getIndexAnalyzer(), textField.getQueryAnalyzer()); }
Example #23
Source File: OLuceneFullTextIndexManager.java From orientdb-lucene with Apache License 2.0 | 5 votes |
@Override public IndexWriter createIndexWriter(Directory directory, ODocument metadata) throws IOException { Analyzer analyzer = getAnalyzer(metadata); Version version = getLuceneVersion(metadata); IndexWriterConfig iwc = new IndexWriterConfig(version, analyzer); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); facetManager = new OLuceneFacetManager(this, metadata); OLogManager.instance().debug(this, "Creating Lucene index in '%s'...", directory); return new IndexWriter(directory, iwc); }
Example #24
Source File: DictionaryCompoundWordTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public TokenStream create(TokenStream tokenStream) { if (version.onOrAfter(Version.LUCENE_4_4_0)) { return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch); } else { return new Lucene43DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch); } }
Example #25
Source File: EdgeNGramTokenizerFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
public EdgeNGramTokenizerFactory(Index index, Settings indexSettings, String name, Settings settings) { super(index, indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel())); this.matcher = parseTokenChars(settings.getAsArray("token_chars")); this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings); }
Example #26
Source File: TrimTokenFilterFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public TokenStream create(TokenStream tokenStream) { if (version.onOrAfter(Version.LUCENE_4_4_0)) { return new TrimFilter(tokenStream); } else { @SuppressWarnings("deprecation") final TokenStream filter = new Lucene43TrimFilter(tokenStream, updateOffsets); return filter; } }
Example #27
Source File: FieldTypePluginLoader.java From lucene-solr with Apache License 2.0 | 5 votes |
private Version parseConfiguredVersion(String configuredVersion, String pluginClassName) { Version version = (configuredVersion != null) ? SolrConfig.parseLuceneVersionString(configuredVersion) : schema.getDefaultLuceneMatchVersion(); if (!version.onOrAfter(Version.LUCENE_8_0_0)) { log.warn("{} is using deprecated {}" + " emulation. You should at some point declare and reindex to at least 8.0, because " + "7.x emulation is deprecated and will be removed in 9.0" , pluginClassName , version); } return version; }
Example #28
Source File: TestTieredMergePolicy.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testForcedMergeWithPending() throws Exception { final TieredMergePolicy tmp = new TieredMergePolicy(); final double maxSegmentSize = 10.0D; tmp.setMaxMergedSegmentMB(maxSegmentSize); SegmentInfos infos = new SegmentInfos(Version.LATEST.major); for (int j = 0; j < 30; ++j) { infos.add(makeSegmentCommitInfo("_" + j, 1000, 0, 1.0D, IndexWriter.SOURCE_MERGE)); } final MockMergeContext mergeContext = new MockMergeContext(SegmentCommitInfo::getDelCount); mergeContext.setMergingSegments(Collections.singleton(infos.asList().get(0))); final int expectedCount = random().nextInt(10) + 3; final MergeSpecification specification = tmp.findForcedMerges(infos, expectedCount, segmentsToMerge(infos), mergeContext); // Since we have fewer than 30 (the max merge count) segments more than the final size this would have been the final merge // so we check that it was prevented. assertNull(specification); SegmentInfos manySegmentsInfos = new SegmentInfos(Version.LATEST.major); final int manySegmentsCount = atLeast(500); for (int j = 0; j < manySegmentsCount; ++j) { manySegmentsInfos.add(makeSegmentCommitInfo("_" + j, 1000, 0, 0.1D, IndexWriter.SOURCE_MERGE)); } // We set one merge to be ongoing. Since we have more than 30 (the max merge count) times the number of segments // of that we want to merge to this is not the final merge and hence the returned specification must not be null. mergeContext.setMergingSegments(Collections.singleton(manySegmentsInfos.asList().get(0))); final MergeSpecification specificationManySegments = tmp.findForcedMerges(manySegmentsInfos, expectedCount, segmentsToMerge(manySegmentsInfos), mergeContext); assertMaxSize(specificationManySegments, maxSegmentSize); for (OneMerge merge : specificationManySegments.merges) { assertEquals("No merges of less than the max merge count are permitted while another merge is in progress", merge.segments.size(), tmp.getMaxMergeAtOnceExplicit()); } final int resultingCountManySegments = manySegmentsInfos.size() + specificationManySegments.merges.size() - specificationManySegments.merges.stream().mapToInt(spec -> spec.segments.size()).sum(); assertTrue(resultingCountManySegments >= expectedCount); }
Example #29
Source File: AclDiscoverFieldTypeDefinitionTest.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void test(int expected, boolean rowQuery, Collection<String> discoverAuthorizations) throws IOException, ParseException { DirectoryReader reader = DirectoryReader.open(_dir); SuperParser parser = new SuperParser(Version.LUCENE_43, _fieldManager, rowQuery, null, ScoreType.SUPER, new Term( BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE)); Query query = parser.parse("fam.string:value"); Collection<String> readAuthorizations = null; Set<String> discoverableFields = new HashSet<String>(); discoverableFields.add("rowid"); discoverableFields.add("recordid"); discoverableFields.add("family"); IndexSearcher searcher = new SecureIndexSearcher(reader, getAccessControlFactory(), readAuthorizations, discoverAuthorizations, discoverableFields, null); TopDocs topDocs = searcher.search(query, 10); assertEquals(expected, topDocs.totalHits); for (int i = 0; i < expected; i++) { int doc = topDocs.scoreDocs[i].doc; Document document = searcher.doc(doc); List<IndexableField> fields = document.getFields(); for (IndexableField field : fields) { assertTrue(discoverableFields.contains(field.name())); } } reader.close(); }
Example #30
Source File: Indexer.java From sql-layer with GNU Affero General Public License v3.0 | 5 votes |
public Indexer(FullTextIndexShared index, Analyzer analyzer) throws IOException { IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer); iwc.setMaxBufferedDeleteTerms(1); // The deletion needs to be reflected immediately (on disk) this.index = index; this.writer = new IndexWriter(index.open(), iwc); }