Java Code Examples for org.apache.lucene.index.DirectoryReader#maxDoc()
The following examples show how to use
org.apache.lucene.index.DirectoryReader#maxDoc() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FastHdfsKeyValueDirectoryTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testMultipleWritersOpenOnSameDirectory() throws IOException { IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer()); FastHdfsKeyValueDirectory directory = new FastHdfsKeyValueDirectory(false, _timer, _configuration, new Path(_path, "test_multiple")); IndexWriter writer1 = new IndexWriter(directory, config.clone()); addDoc(writer1, getDoc(1)); IndexWriter writer2 = new IndexWriter(directory, config.clone()); addDoc(writer2, getDoc(2)); writer1.close(); writer2.close(); DirectoryReader reader = DirectoryReader.open(directory); int maxDoc = reader.maxDoc(); assertEquals(1, maxDoc); Document document = reader.document(0); assertEquals("2", document.get("id")); reader.close(); }
Example 2
Source File: TestPerfTasksLogic.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testDocMakerThreadSafety() throws Exception { // 1. alg definition (required in every "logic" test) String algLines[] = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.SortableSingleDocSource", "doc.term.vector=false", "log.step.AddDoc=10000", "content.source.forever=true", "directory=ByteBuffersDirectory", "doc.reuse.fields=false", "doc.stored=true", "doc.tokenized=false", "doc.index.props=true", "# ----- alg ", "CreateIndex", "[ { AddDoc > : 250 ] : 4", "CloseIndex", }; // 2. we test this value later CountingSearchTestTask.numSearches = 0; // 3. execute the algorithm (required in every "logic" test) Benchmark benchmark = execBenchmark(algLines); DirectoryReader r = DirectoryReader.open(benchmark.getRunData().getDirectory()); final int maxDoc = r.maxDoc(); assertEquals(1000, maxDoc); for(int i=0;i<1000;i++) { assertNotNull("doc " + i + " has null country", r.document(i).getField("country")); } r.close(); }
Example 3
Source File: TestOrdinalMappingLeafReader.java From lucene-solr with Apache License 2.0 | 5 votes |
private void verifyResults(Directory indexDir, Directory taxoDir) throws IOException { DirectoryReader indexReader = DirectoryReader.open(indexDir); DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir); IndexSearcher searcher = newSearcher(indexReader); FacetsCollector collector = new FacetsCollector(); FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, collector); // tag facets Facets tagFacets = new FastTaxonomyFacetCounts("$tags", taxoReader, facetConfig, collector); FacetResult result = tagFacets.getTopChildren(10, "tag"); for (LabelAndValue lv: result.labelValues) { if (VERBOSE) { System.out.println(lv); } assertEquals(NUM_DOCS, lv.value.intValue()); } // id facets Facets idFacets = new FastTaxonomyFacetCounts(taxoReader, facetConfig, collector); FacetResult idResult = idFacets.getTopChildren(10, "id"); assertEquals(NUM_DOCS, idResult.childCount); assertEquals(NUM_DOCS * 2, idResult.value); // each "id" appears twice BinaryDocValues bdv = MultiDocValues.getBinaryValues(indexReader, "bdv"); BinaryDocValues cbdv = MultiDocValues.getBinaryValues(indexReader, "cbdv"); for (int i = 0; i < indexReader.maxDoc(); i++) { assertEquals(i, bdv.nextDoc()); assertEquals(i, cbdv.nextDoc()); assertEquals(Integer.parseInt(cbdv.binaryValue().utf8ToString()), Integer.parseInt(bdv.binaryValue().utf8ToString())*2); } IOUtils.close(indexReader, taxoReader); }
Example 4
Source File: FastHdfsKeyValueDirectoryTest.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private int getDocumentCount(Directory directory) throws IOException { if (DirectoryReader.indexExists(directory)) { DirectoryReader reader = DirectoryReader.open(directory); int maxDoc = reader.maxDoc(); reader.close(); return maxDoc; } return 0; }
Example 5
Source File: IndexLoader.java From solr-autocomplete with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws CorruptIndexException, IOException, SolrServerException { if (args.length < 3) { System.err.println("Usage: java -Dfile.encoding=UTF8 -Dclient.encoding.override=UTF-8 -Xmx256m -Xms256m -server " + IndexLoader.class.getName() + " </path/to/index> <AutoCompleteSolrUrl> <indexField1,acField1> [indexField2,acField2 ... ]"); System.exit(0); } Map<String,String> fieldMap = getFieldMapping(args, 2); DirectoryReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(args[0]))); int docs = reader.maxDoc(); SolrClient solr = new ConcurrentUpdateSolrClient.Builder(args[1]).withQueueSize(10000).withThreadCount(2).build(); Set<SolrInputDocument> batch = new HashSet<SolrInputDocument>(1000); Bits liveDocs = MultiFields.getLiveDocs(reader); // go through all docs in the index for (int i = 0; i < docs; i++) { // process doc only if not deleted if (liveDocs == null || liveDocs.get(i)) { // loop through all fields to be looked at SolrInputDocument doc = new SolrInputDocument(); Iterator<String> iter = fieldMap.keySet().iterator(); boolean phraseFieldEmpty = false; while (iter.hasNext()) { String indexField = iter.next(); String acField = fieldMap.get(indexField); IndexableField field = reader.document(i).getField(indexField); String value = field != null ? reader.document(i).getField(indexField).stringValue() : null; if (field != null && value != null && !value.isEmpty()) { doc.addField(acField, value); } else { // not very relevant piece of info // System.err.println("Field is null or empty, skipping: " + indexField); if (acField.equalsIgnoreCase("phrase")) { System.err.println("Since AC phrase field would be null, this doc will not be created: " + reader.document(i)); phraseFieldEmpty = true; break; } } } if (!phraseFieldEmpty) { solr.add(doc); if (docs % 1000 == 0) { System.out.println("Docs: " + docs); } } } } if (!batch.isEmpty()) solr.add(batch); reader.close(); System.out.println("Optimizing..."); solr.optimize(); solr.close(); }