org.apache.lucene.store.ByteBuffersDirectory Java Examples
The following examples show how to use
org.apache.lucene.store.ByteBuffersDirectory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSizeBoundedForceMerge.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testLastSegmentTooLarge() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexWriterConfig conf = newWriterConfig(); IndexWriter writer = new IndexWriter(dir, conf); addDocs(writer, 3); addDocs(writer, 3); addDocs(writer, 3); addDocs(writer, 5); writer.close(); conf = newWriterConfig(); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMaxMergeDocs(3); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); writer.forceMerge(1); writer.close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(2, sis.size()); }
Example #2
Source File: SingleFieldTestDb.java From lucene-solr with Apache License 2.0 | 6 votes |
public SingleFieldTestDb(Random random, String[] documents, String fName) { try { db = new MockDirectoryWrapper(random, new ByteBuffersDirectory()); docs = documents; fieldName = fName; IndexWriter writer = new IndexWriter(db, new IndexWriterConfig(new MockAnalyzer(random))); for (int j = 0; j < docs.length; j++) { Document d = new Document(); d.add(new TextField(fieldName, docs[j], Field.Store.NO)); writer.addDocument(d); } writer.close(); } catch (java.io.IOException ioe) { throw new Error(ioe); } }
Example #3
Source File: QueryAutoStopWordAnalyzerTest.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public void setUp() throws Exception { super.setUp(); dir = new ByteBuffersDirectory(); appAnalyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(appAnalyzer)); int numDocs = 200; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); String variedFieldValue = variedFieldValues[i % variedFieldValues.length]; String repetitiveFieldValue = repetitiveFieldValues[i % repetitiveFieldValues.length]; doc.add(new TextField("variedField", variedFieldValue, Field.Store.YES)); doc.add(new TextField("repetitiveField", repetitiveFieldValue, Field.Store.YES)); writer.addDocument(doc); } writer.close(); reader = DirectoryReader.open(dir); }
Example #4
Source File: PayloadHelper.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Sets up a RAM-resident Directory, and adds documents (using English.intToEnglish()) with two fields: field and multiField * and analyzes them using the PayloadAnalyzer * @param similarity The Similarity class to use in the Searcher * @param numDocs The num docs to add * @return An IndexSearcher */ // TODO: randomize public IndexSearcher setUp(Random random, Similarity similarity, int numDocs) throws IOException { Directory directory = new MockDirectoryWrapper(random, new ByteBuffersDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(); // TODO randomize this IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( analyzer).setSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new TextField(FIELD, English.intToEnglish(i), Field.Store.YES)); doc.add(new TextField(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES)); doc.add(new TextField(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES)); writer.addDocument(doc); } writer.forceMerge(1); reader = DirectoryReader.open(writer); writer.close(); IndexSearcher searcher = LuceneTestCase.newSearcher(LuceneTestCase.getOnlyLeafReader(reader)); searcher.setSimilarity(similarity); return searcher; }
Example #5
Source File: BaseCompressingDocValuesFormatTestCase.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testSingleBigValueCompression() throws IOException { try (final Directory dir = new ByteBuffersDirectory()) { final IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); final IndexWriter iwriter = new IndexWriter(dir, iwc); final Document doc = new Document(); final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0); doc.add(dvf); for (int i = 0; i < 20000; ++i) { dvf.setLongValue(i & 1023); iwriter.addDocument(doc); } iwriter.forceMerge(1); final long size1 = dirSize(dir); dvf.setLongValue(Long.MAX_VALUE); iwriter.addDocument(doc); iwriter.forceMerge(1); final long size2 = dirSize(dir); // make sure the new value did not grow the bpv for every other value assertTrue(size2 < size1 + (20000 * (63 - 10)) / 8); } }
Example #6
Source File: SweetSpotSimilarityTest.java From lucene-solr with Apache License 2.0 | 6 votes |
private static float computeNorm(Similarity sim, String field, int length) throws IOException { String value = IntStream.range(0, length).mapToObj(i -> "a").collect(Collectors.joining(" ")); Directory dir = new ByteBuffersDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); w.addDocument(Collections.singleton(newTextField(field, value, Store.NO))); DirectoryReader reader = DirectoryReader.open(w); w.close(); IndexSearcher searcher = new IndexSearcher(reader); searcher.setSimilarity(sim); Explanation expl = searcher.explain(new TermQuery(new Term(field, "a")), 0); reader.close(); dir.close(); Explanation norm = findExplanation(expl, "fieldNorm"); assertNotNull(norm); return norm.getValue().floatValue(); }
Example #7
Source File: StemmerTestBase.java From lucene-solr with Apache License 2.0 | 6 votes |
static void init(boolean ignoreCase, String affix, String... dictionaries) throws IOException, ParseException { if (dictionaries.length == 0) { throw new IllegalArgumentException("there must be at least one dictionary"); } InputStream affixStream = StemmerTestBase.class.getResourceAsStream(affix); if (affixStream == null) { throw new FileNotFoundException("file not found: " + affix); } InputStream dictStreams[] = new InputStream[dictionaries.length]; for (int i = 0; i < dictionaries.length; i++) { dictStreams[i] = StemmerTestBase.class.getResourceAsStream(dictionaries[i]); if (dictStreams[i] == null) { throw new FileNotFoundException("file not found: " + dictStreams[i]); } } try { Dictionary dictionary = new Dictionary(new ByteBuffersDirectory(), "dictionary", affixStream, Arrays.asList(dictStreams), ignoreCase); stemmer = new Stemmer(dictionary); } finally { IOUtils.closeWhileHandlingException(affixStream); IOUtils.closeWhileHandlingException(dictStreams); } }
Example #8
Source File: TestPointValues.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testMergedStatsOneSegmentWithoutPoints() throws IOException { Directory dir = new ByteBuffersDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null).setMergePolicy(NoMergePolicy.INSTANCE)); w.addDocument(new Document()); DirectoryReader.open(w).close(); Document doc = new Document(); doc.add(new IntPoint("field", Integer.MIN_VALUE)); w.addDocument(doc); IndexReader reader = DirectoryReader.open(w); assertArrayEquals(new byte[4], PointValues.getMinPackedValue(reader, "field")); assertArrayEquals(new byte[4], PointValues.getMaxPackedValue(reader, "field")); assertEquals(1, PointValues.getDocCount(reader, "field")); assertEquals(1, PointValues.size(reader, "field")); assertNull(PointValues.getMinPackedValue(reader, "field2")); assertNull(PointValues.getMaxPackedValue(reader, "field2")); assertEquals(0, PointValues.getDocCount(reader, "field2")); assertEquals(0, PointValues.size(reader, "field2")); }
Example #9
Source File: TestPointValues.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testMergedStatsAllPointsDeleted() throws IOException { Directory dir = new ByteBuffersDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); w.addDocument(new Document()); Document doc = new Document(); doc.add(new IntPoint("field", Integer.MIN_VALUE)); doc.add(new StringField("delete", "yes", Store.NO)); w.addDocument(doc); w.forceMerge(1); w.deleteDocuments(new Term("delete", "yes")); w.addDocument(new Document()); w.forceMerge(1); IndexReader reader = DirectoryReader.open(w); assertNull(PointValues.getMinPackedValue(reader, "field")); assertNull(PointValues.getMaxPackedValue(reader, "field")); assertEquals(0, PointValues.getDocCount(reader, "field")); assertEquals(0, PointValues.size(reader, "field")); }
Example #10
Source File: TestPendingDeletes.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testIsFullyDeleted() throws IOException { Directory dir = new ByteBuffersDirectory(); SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 3, false, Codec.getDefault(), Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null); SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, -1, -1, -1, StringHelper.randomId()); FieldInfos fieldInfos = FieldInfos.EMPTY; si.getCodec().fieldInfosFormat().write(dir, si, "", fieldInfos, IOContext.DEFAULT); PendingDeletes deletes = newPendingDeletes(commitInfo); for (int i = 0; i < 3; i++) { assertTrue(deletes.delete(i)); if (random().nextBoolean()) { assertTrue(deletes.writeLiveDocs(dir)); } assertEquals(i == 2, deletes.isFullyDeleted(() -> null)); } }
Example #11
Source File: TestIndexWriterReader.java From lucene-solr with Apache License 2.0 | 6 votes |
/** Make sure if all we do is open NRT reader against * writer, we don't see merge starvation. */ public void testTooManySegments() throws Exception { Directory dir = getAssertNoDeletesDirectory(new ByteBuffersDirectory()); // Don't use newIndexWriterConfig, because we need a // "sane" mergePolicy: IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); IndexWriter w = new IndexWriter(dir, iwc); // Create 500 segments: for(int i=0;i<500;i++) { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); // Make sure segment count never exceeds 100: assertTrue(r.leaves().size() < 100); r.close(); } w.close(); dir.close(); }
Example #12
Source File: TestIndexWriter.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testDeleteAllNRTLeftoverFiles() throws Exception { MockDirectoryWrapper d = new MockDirectoryWrapper(random(), new ByteBuffersDirectory()); IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))); Document doc = new Document(); for(int i = 0; i < 20; i++) { for(int j = 0; j < 100; ++j) { w.addDocument(doc); } w.commit(); DirectoryReader.open(w).close(); w.deleteAll(); w.commit(); // Make sure we accumulate no files except for empty // segments_N and segments.gen: assertTrue(d.listAll().length <= 2); } w.close(); d.close(); }
Example #13
Source File: TestIndexWriter.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testNRTReaderVersion() throws Exception { Directory d = new MockDirectoryWrapper(random(), new ByteBuffersDirectory()); IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newStringField("id", "0", Field.Store.YES)); w.addDocument(doc); DirectoryReader r = w.getReader(); long version = r.getVersion(); r.close(); w.addDocument(doc); r = w.getReader(); long version2 = r.getVersion(); r.close(); assert(version2 > version); w.deleteDocuments(new Term("id", "0")); r = w.getReader(); w.close(); long version3 = r.getVersion(); r.close(); assert(version3 > version2); d.close(); }
Example #14
Source File: LuceneEngine.java From jstarcraft-core with Apache License 2.0 | 6 votes |
public LuceneEngine(IndexWriterConfig config, Path path) { try { this.config = config; Directory transienceDirectory = new ByteBuffersDirectory(); this.transienceManager = new TransienceManager((IndexWriterConfig) BeanUtils.cloneBean(config), transienceDirectory); Directory persistenceDirectory = FSDirectory.open(path); this.persistenceManager = new PersistenceManager((IndexWriterConfig) BeanUtils.cloneBean(config), persistenceDirectory); this.searcher = new LuceneSearcher(this.transienceManager, this.persistenceManager); this.semaphore = new AtomicInteger(); ReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); } catch (Exception exception) { throw new StorageException(exception); } }
Example #15
Source File: TestAddIndexes.java From lucene-solr with Apache License 2.0 | 6 votes |
public RunAddIndexesThreads(int numCopy) throws Throwable { NUM_COPY = numCopy; dir = new MockDirectoryWrapper(random(), new ByteBuffersDirectory()); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())) .setMaxBufferedDocs(2)); for (int i = 0; i < NUM_INIT_DOCS; i++) addDoc(writer); writer.close(); dir2 = newDirectory(); writer2 = new IndexWriter(dir2, new IndexWriterConfig(new MockAnalyzer(random()))); writer2.commit(); readers = new DirectoryReader[NUM_COPY]; for(int i=0;i<NUM_COPY;i++) readers[i] = DirectoryReader.open(dir); int numThreads = TEST_NIGHTLY ? 5 : 2; threads = new Thread[numThreads]; }
Example #16
Source File: TestTryDelete.java From lucene-solr with Apache License 2.0 | 6 votes |
private static Directory createIndex () throws IOException { Directory directory = new ByteBuffersDirectory(); IndexWriter writer = getWriter(directory); for (int i = 0; i < 10; i++) { Document doc = new Document(); doc.add(new StringField("foo", String.valueOf(i), Store.YES)); writer.addDocument(doc); } writer.commit(); writer.close(); return directory; }
Example #17
Source File: TestSizeBoundedForceMerge.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testFirstSegmentTooLarge() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexWriterConfig conf = newWriterConfig(); IndexWriter writer = new IndexWriter(dir, conf); addDocs(writer, 5); addDocs(writer, 3); addDocs(writer, 3); addDocs(writer, 3); writer.close(); conf = newWriterConfig(); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMaxMergeDocs(3); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); writer.forceMerge(1); writer.close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(2, sis.size()); }
Example #18
Source File: TestSizeBoundedForceMerge.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testAllSegmentsSmall() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexWriterConfig conf = newWriterConfig(); IndexWriter writer = new IndexWriter(dir, conf); addDocs(writer, 3); addDocs(writer, 3); addDocs(writer, 3); addDocs(writer, 3); writer.close(); conf = newWriterConfig(); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMaxMergeDocs(3); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); writer.forceMerge(1); writer.close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(1, sis.size()); }
Example #19
Source File: TestSizeBoundedForceMerge.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testAllSegmentsLarge() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexWriterConfig conf = newWriterConfig(); IndexWriter writer = new IndexWriter(dir, conf); addDocs(writer, 3); addDocs(writer, 3); addDocs(writer, 3); writer.close(); conf = newWriterConfig(); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMaxMergeDocs(2); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); writer.forceMerge(1); writer.close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(3, sis.size()); }
Example #20
Source File: TestSizeBoundedForceMerge.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testOneLargeOneSmall() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexWriterConfig conf = newWriterConfig(); IndexWriter writer = new IndexWriter(dir, conf); addDocs(writer, 3); addDocs(writer, 5); addDocs(writer, 3); addDocs(writer, 5); writer.close(); conf = newWriterConfig(); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMaxMergeDocs(3); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); writer.forceMerge(1); writer.close(); SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(4, sis.size()); }
Example #21
Source File: TestSizeBoundedForceMerge.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testSingleNonMergeableSegment() throws Exception { Directory dir = new ByteBuffersDirectory(); IndexWriterConfig conf = newWriterConfig(); IndexWriter writer = new IndexWriter(dir, conf); addDocs(writer, 3, true); writer.close(); conf = newWriterConfig(); LogMergePolicy lmp = new LogDocMergePolicy(); lmp.setMaxMergeDocs(3); conf.setMergePolicy(lmp); writer = new IndexWriter(dir, conf); writer.forceMerge(1); writer.close(); // Verify that the last segment does not have deletions. SegmentInfos sis = SegmentInfos.readLatestCommit(dir); assertEquals(1, sis.size()); }
Example #22
Source File: TestMultiTermsEnum.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testNoTermsInField() throws Exception { Directory directory = new ByteBuffersDirectory(); IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new MockAnalyzer(random()))); Document document = new Document(); document.add(new StringField("deleted", "0", Field.Store.YES)); writer.addDocument(document); DirectoryReader reader = DirectoryReader.open(writer); writer.close(); Directory directory2 = new ByteBuffersDirectory(); writer = new IndexWriter(directory2, new IndexWriterConfig(new MockAnalyzer(random()))); List<LeafReaderContext> leaves = reader.leaves(); CodecReader[] codecReaders = new CodecReader[leaves.size()]; for (int i = 0; i < leaves.size(); i++) { codecReaders[i] = new MigratingCodecReader((CodecReader) leaves.get(i).reader()); } writer.addIndexes(codecReaders); // <- bang IOUtils.close(writer, reader, directory); }
Example #23
Source File: HdfsDirectoryTest.java From lucene-solr with Apache License 2.0 | 6 votes |
@Test public void testRandomAccessWrites() throws IOException { int i = 0; try { Set<String> names = new HashSet<>(); for (; i< 10; i++) { Directory fsDir = new ByteBuffersDirectory(); String name = getName(); System.out.println("Working on pass [" + i +"] contains [" + names.contains(name) + "]"); names.add(name); createFile(name,fsDir,directory); assertInputsEquals(name,fsDir,directory); fsDir.close(); } } catch (Exception e) { e.printStackTrace(); fail("Test failed on pass [" + i + "]"); } }
Example #24
Source File: ExplorerQueryTests.java From elasticsearch-learning-to-rank with Apache License 2.0 | 6 votes |
@Before public void setupIndex() throws Exception { dir = new ByteBuffersDirectory(); try(IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER))) { for (int i = 0; i < docs.length; i++) { Document doc = new Document(); doc.add(new Field("_id", Integer.toString(i + 1), StoredField.TYPE)); doc.add(newTextField("text", docs[i], Field.Store.YES)); indexWriter.addDocument(doc); } } reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); }
Example #25
Source File: LuceneBatchIteratorTest.java From crate with Apache License 2.0 | 6 votes |
@Before public void prepareSearcher() throws Exception { IndexWriter iw = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); String columnName = "x"; expectedResult = new ArrayList<>(20); for (long i = 0; i < 20; i++) { Document doc = new Document(); doc.add(new NumericDocValuesField(columnName, i)); iw.addDocument(doc); expectedResult.add(new Object[] { i }); } iw.commit(); indexSearcher = new IndexSearcher(DirectoryReader.open(iw)); LongColumnReference columnReference = new LongColumnReference(columnName); columnRefs = Collections.singletonList(columnReference); }
Example #26
Source File: LuceneOrderedDocCollectorTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testSearchNoScores() throws Exception { IndexWriter w = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(); fieldType.setName("x"); fieldType.freeze(); for (int i = 0; i < 3; i++) { addDoc(w, fieldType, "Arthur"); } addDoc(w, fieldType, "Arthur"); // not "Arthur" to lower score w.commit(); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w, true, true)); List<LuceneCollectorExpression<?>> columnReferences = Collections.singletonList(new ScoreCollectorExpression()); Query query = fieldType.termsQuery(Collections.singletonList("Arthur"), null); LuceneOrderedDocCollector collector = collector(searcher, columnReferences, query, null, false); KeyIterable<ShardId, Row> result = collector.collect(); assertThat(Iterables.size(result), is(2)); Iterator<Row> values = result.iterator(); assertThat(values.next().get(0), Matchers.is(Float.NaN)); assertThat(values.next().get(0), Matchers.is(Float.NaN)); }
Example #27
Source File: LuceneOrderedDocCollectorTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testSearchWithScores() throws Exception { IndexWriter w = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType(); fieldType.setName("x"); fieldType.freeze(); for (int i = 0; i < 3; i++) { addDoc(w, fieldType, "Arthur"); } addDoc(w, fieldType, "Arthur"); // not "Arthur" to lower score w.commit(); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w, true, true)); List<LuceneCollectorExpression<?>> columnReferences = Collections.singletonList(new ScoreCollectorExpression()); Query query = fieldType.termsQuery(Collections.singletonList("Arthur"), null); LuceneOrderedDocCollector collector = collector(searcher, columnReferences, query, null, true); KeyIterable<ShardId, Row> result = collector.collect(); assertThat(Iterables.size(result), is(2)); Iterator<Row> values = result.iterator(); assertThat(values.next().get(0), Matchers.is(1.0F)); assertThat(values.next().get(0), Matchers.is(1.0F)); }
Example #28
Source File: GroupByOptimizedIteratorTest.java From crate with Apache License 2.0 | 6 votes |
@Before public void prepare() throws Exception { IndexWriter iw = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); columnName = "x"; expectedResult = new ArrayList<>(20); for (long i = 0; i < 20; i++) { Document doc = new Document(); String val = "val_" + i; doc.add(new SortedSetDocValuesField(columnName, new BytesRef(val))); iw.addDocument(doc); expectedResult.add(new Object[] { val, 1L }); } iw.commit(); indexSearcher = new IndexSearcher(DirectoryReader.open(iw)); inExpr = new InputCollectExpression(0); CountAggregation aggregation = (CountAggregation) getFunctions().getQualified( CountAggregation.COUNT_STAR_SIGNATURE, Collections.emptyList(), CountAggregation.COUNT_STAR_SIGNATURE.getReturnType().createType() ); aggregationContexts = List.of(new AggregationContext(aggregation, () -> true, List.of())); }
Example #29
Source File: GroupByOptimizedIteratorTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testHighCardinalityRatioReturnsTrueForHighCardinality() throws Exception { IndexWriter iw = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); String columnName = "x"; for (int i = 0; i < 10; i++) { Document doc = new Document(); BytesRef value = new BytesRef(Integer.toString(i)); doc.add(new Field(columnName, value, KeywordFieldMapper.Defaults.FIELD_TYPE.clone())); iw.addDocument(doc); } iw.commit(); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(iw)); assertThat( GroupByOptimizedIterator.hasHighCardinalityRatio(() -> new Engine.Searcher("dummy", indexSearcher, () -> {}), "x"), is(true) ); }
Example #30
Source File: GroupByOptimizedIteratorTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testHighCardinalityRatioReturnsTrueForLowCardinality() throws Exception { IndexWriter iw = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); String columnName = "x"; for (int i = 0; i < 10; i++) { Document doc = new Document(); BytesRef value = new BytesRef("1"); doc.add(new Field(columnName, value, KeywordFieldMapper.Defaults.FIELD_TYPE.clone())); iw.addDocument(doc); } iw.commit(); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(iw)); assertThat( GroupByOptimizedIterator.hasHighCardinalityRatio(() -> new Engine.Searcher("dummy", indexSearcher, () -> {}), "x"), is(false) ); }