Java Code Examples for org.apache.lucene.util.ArrayUtil

The following examples show how to use org.apache.lucene.util.ArrayUtil. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: crate   Source File: BigByteArray.java    License: Apache License 2.0 6 votes vote down vote up
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newBytePage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
 
Example 2
Source Project: lucene-solr   Source File: TestByteBuffersDataOutput.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testLargeArrayAdd() {
  ByteBuffersDataOutput o = new ByteBuffersDataOutput();
  int MB = 1024 * 1024;
  final byte [] bytes;
  if (LuceneTestCase.TEST_NIGHTLY) {
    bytes = randomBytesOfLength(5 * MB, 15 * MB);
  } else {
    bytes = randomBytesOfLength(MB/2, MB);
  }
  int offset = randomIntBetween(0, 100);
  int len = bytes.length - offset;
  o.writeBytes(bytes, offset, len);
  assertEquals(len, o.size());
  Assert.assertArrayEquals(ArrayUtil.copyOfSubArray(bytes, offset, offset + len), o.toArrayCopy());
}
 
Example 3
Source Project: lucene-solr   Source File: DaciukMihovAutomatonBuilder.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build a minimal, deterministic automaton from a sorted list of {@link BytesRef} representing
 * strings in UTF-8. These strings must be binary-sorted.
 */
public static Automaton build(Collection<BytesRef> input) {
  final DaciukMihovAutomatonBuilder builder = new DaciukMihovAutomatonBuilder();
  
  char[] chars = new char[0];
  CharsRef ref = new CharsRef();
  for (BytesRef b : input) {
    chars = ArrayUtil.grow(chars, b.length);
    final int len = UnicodeUtil.UTF8toUTF16(b, chars);
    ref.chars = chars;
    ref.length = len;
    builder.add(ref);
  }
  
  Automaton.Builder a = new Automaton.Builder();
  convert(a,
      builder.complete(), 
      new IdentityHashMap<State,Integer>());

  return a.finish();
}
 
Example 4
@Override
public void setDocument(int docId) {
    bytes = values.get(docId);
    in.reset(bytes.bytes, bytes.offset, bytes.length);
    if (!in.eof()) {
        // first value uses vLong on top of zig-zag encoding, then deltas are encoded using vLong
        long previousValue = longs[0] = ByteUtils.zigZagDecode(ByteUtils.readVLong(in));
        count = 1;
        while (!in.eof()) {
            longs = ArrayUtil.grow(longs, count + 1);
            previousValue = longs[count++] = previousValue + ByteUtils.readVLong(in);
        }
    } else {
        count = 0;
    }
}
 
Example 5
Source Project: Elasticsearch   Source File: BigByteArray.java    License: Apache License 2.0 6 votes vote down vote up
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newBytePage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
 
Example 6
Source Project: Elasticsearch   Source File: BigObjectArray.java    License: Apache License 2.0 6 votes vote down vote up
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newObjectPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
 
Example 7
Source Project: Elasticsearch   Source File: BigLongArray.java    License: Apache License 2.0 6 votes vote down vote up
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
@Override
public void resize(long newSize) {
    final int numPages = numPages(newSize);
    if (numPages > pages.length) {
        pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
    }
    for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
        pages[i] = newLongPage(i);
    }
    for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
        pages[i] = null;
        releasePage(i);
    }
    this.size = newSize;
}
 
Example 8
Source Project: lucene-solr   Source File: SortingLeafReader.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public int nextPosition() throws IOException {
  final int token = postingInput.readVInt();
  pos += token >>> 1;
  if (storeOffsets) {
    startOffset = endOffset + postingInput.readVInt();
    endOffset = startOffset + postingInput.readVInt();
  }
  if ((token & 1) != 0) {
    payload.offset = 0;
    payload.length = postingInput.readVInt();
    if (payload.length > payload.bytes.length) {
      payload.bytes = new byte[ArrayUtil.oversize(payload.length, 1)];
    }
    postingInput.readBytes(payload.bytes, 0, payload.length);
  } else {
    payload.length = 0;
  }
  return pos;
}
 
Example 9
Source Project: lucene-solr   Source File: SearchImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Optional<SearchResults> prevPage() {
  if (currentPage < 0 || query == null) {
    throw new LukeException(new IllegalStateException("Search session not started."));
  }

  // return to previous page
  currentPage -= 1;

  if (currentPage < 0) {
    log.warn("No more previous search results are available.");
    return Optional.empty();
  }

  try {
    // there should be cached results for this page
    int from = currentPage * pageSize;
    int to = Math.min(from + pageSize, docs.length);
    ScoreDoc[] part = ArrayUtil.copyOfSubArray(docs, from, to);
    return Optional.of(SearchResults.of(totalHits, part, from, searcher, fieldsToLoad));
  } catch (IOException e) {
    throw new LukeException("Search Failed.", e);
  }
}
 
Example 10
void startBlock(FieldReader.SegmentTermsEnum.Frame frame, boolean isFloor) {
  totalBlockCount++;
  if (isFloor) {
    if (frame.fp == frame.fpOrig) {
      floorBlockCount++;
    }
    floorSubBlockCount++;
  } else {
    nonFloorBlockCount++;
  }

  if (blockCountByPrefixLen.length <= frame.prefix) {
    blockCountByPrefixLen = ArrayUtil.grow(blockCountByPrefixLen, 1+frame.prefix);
  }
  blockCountByPrefixLen[frame.prefix]++;
  startBlockCount++;
  totalBlockSuffixBytes += frame.suffixesReader.length();
  totalBlockStatsBytes += frame.statsReader.length();
}
 
Example 11
/**
 * Saves the existing attribute states
 */
private void saveState() {
    // otherwise, we have delimiters, save state
    savedStartOffset = offsetAttribute.startOffset();
    savedEndOffset = offsetAttribute.endOffset();
    // if length by start + end offsets doesn't match the term text then assume this is a synonym and don't adjust the offsets.
    hasIllegalOffsets = savedEndOffset - savedStartOffset != termAttribute.length();
    savedType = typeAttribute.type();

    if (savedBuffer.length < termAttribute.length()) {
        savedBuffer = new char[ArrayUtil.oversize(termAttribute.length(), Character.BYTES)];
    }

    System.arraycopy(termAttribute.buffer(), 0, savedBuffer, 0, termAttribute.length());
    iterator.text = savedBuffer;

    hasSavedState = true;
}
 
Example 12
Source Project: lucene-solr   Source File: SimpleTextBKDWriter.java    License: Apache License 2.0 6 votes vote down vote up
public static void verifyParams(int numDims, int numIndexDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
  // We encode dim in a single byte in the splitPackedValues, but we only expose 4 bits for it now, in case we want to use
  // remaining 4 bits for another purpose later
  if (numDims < 1 || numDims > MAX_DIMS) {
    throw new IllegalArgumentException("numDims must be 1 .. " + MAX_DIMS + " (got: " + numDims + ")");
  }
  if (numIndexDims < 1 || numIndexDims > MAX_INDEX_DIMS) {
    throw new IllegalArgumentException("numIndexDims must be 1 .. " + MAX_INDEX_DIMS + " (got: " + numIndexDims + ")");
  }
  if (numIndexDims > numDims) {
    throw new IllegalArgumentException("numIndexDims cannot exceed numDims (" + numDims + ") (got: " + numIndexDims + ")");
  }
  if (maxPointsInLeafNode <= 0) {
    throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode);
  }
  if (maxPointsInLeafNode > ArrayUtil.MAX_ARRAY_LENGTH) {
    throw new IllegalArgumentException("maxPointsInLeafNode must be <= ArrayUtil.MAX_ARRAY_LENGTH (= " + ArrayUtil.MAX_ARRAY_LENGTH + "); got " + maxPointsInLeafNode);
  }
  if (maxMBSortInHeap < 0.0) {
    throw new IllegalArgumentException("maxMBSortInHeap must be >= 0.0 (got: " + maxMBSortInHeap + ")");
  }
  if (totalPointCount < 0) {
    throw new IllegalArgumentException("totalPointCount must be >=0 (got: " + totalPointCount + ")");
  }
}
 
Example 13
Source Project: lucene-solr   Source File: BKDWriter.java    License: Apache License 2.0 6 votes vote down vote up
public static void verifyParams(int numDims, int numIndexDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
  // We encode dim in a single byte in the splitPackedValues, but we only expose 4 bits for it now, in case we want to use
  // remaining 4 bits for another purpose later
  if (numDims < 1 || numDims > MAX_DIMS) {
    throw new IllegalArgumentException("numDims must be 1 .. " + MAX_DIMS + " (got: " + numDims + ")");
  }
  if (numIndexDims < 1 || numIndexDims > MAX_INDEX_DIMS) {
    throw new IllegalArgumentException("numIndexDims must be 1 .. " + MAX_INDEX_DIMS + " (got: " + numIndexDims + ")");
  }
  if (numIndexDims > numDims) {
    throw new IllegalArgumentException("numIndexDims cannot exceed numDims (" + numDims + ") (got: " + numIndexDims + ")");
  }
  if (maxPointsInLeafNode <= 0) {
    throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode);
  }
  if (maxPointsInLeafNode > ArrayUtil.MAX_ARRAY_LENGTH) {
    throw new IllegalArgumentException("maxPointsInLeafNode must be <= ArrayUtil.MAX_ARRAY_LENGTH (= " + ArrayUtil.MAX_ARRAY_LENGTH + "); got " + maxPointsInLeafNode);
  }
  if (maxMBSortInHeap < 0.0) {
    throw new IllegalArgumentException("maxMBSortInHeap must be >= 0.0 (got: " + maxMBSortInHeap + ")");
  }
  if (totalPointCount < 0) {
    throw new IllegalArgumentException("totalPointCount must be >=0 (got: " + totalPointCount + ")");
  }
}
 
Example 14
Source Project: lucene-solr   Source File: TestBufferedIndexInput.java    License: Apache License 2.0 6 votes vote down vote up
private void checkReadBytes(IndexInput input, int size, int pos) throws IOException{
  // Just to see that "offset" is treated properly in readBytes(), we
  // add an arbitrary offset at the beginning of the array
  int offset = size % 10; // arbitrary
  buffer = ArrayUtil.grow(buffer, offset+size);
  assertEquals(pos, input.getFilePointer());
  long left = TEST_FILE_LENGTH - input.getFilePointer();
  if (left <= 0) {
    return;
  } else if (left < size) {
    size = (int) left;
  }
  input.readBytes(buffer, offset, size);
  assertEquals(pos+size, input.getFilePointer());
  for(int i=0; i<size; i++) {
    assertEquals("pos=" + i + " filepos=" + (pos+i), byten(pos+i), buffer[offset+i]);
  }
}
 
Example 15
Source Project: lucene-solr   Source File: BufferedInputIterator.java    License: Apache License 2.0 6 votes vote down vote up
/** Creates a new iterator, buffering entries from the specified iterator */
public BufferedInputIterator(InputIterator source) throws IOException {
  BytesRef spare;
  int freqIndex = 0;
  hasPayloads = source.hasPayloads();
  hasContexts = source.hasContexts();
  while((spare = source.next()) != null) {
    entries.append(spare);
    if (hasPayloads) {
      payloads.append(source.payload());
    }
    if (hasContexts) {
      contextSets.add(source.contexts());
    }
    if (freqIndex >= freqs.length) {
      freqs = ArrayUtil.grow(freqs, freqs.length+1);
    }
    freqs[freqIndex++] = source.weight();
  }
 
}
 
Example 16
Source Project: lucene-solr   Source File: DirectMonotonicWriter.java    License: Apache License 2.0 6 votes vote down vote up
DirectMonotonicWriter(IndexOutput metaOut, IndexOutput dataOut, long numValues, int blockShift) {
  if (blockShift < MIN_BLOCK_SHIFT || blockShift > MAX_BLOCK_SHIFT) {
    throw new IllegalArgumentException("blockShift must be in [" + MIN_BLOCK_SHIFT + "-" + MAX_BLOCK_SHIFT + "], got " + blockShift);
  }
  if (numValues < 0) {
    throw new IllegalArgumentException("numValues can't be negative, got " + numValues);
  }
  final long numBlocks = numValues == 0 ? 0 : ((numValues - 1) >>> blockShift) + 1;
  if (numBlocks > ArrayUtil.MAX_ARRAY_LENGTH) {
    throw new IllegalArgumentException("blockShift is too low for the provided number of values: blockShift=" + blockShift +
        ", numValues=" + numValues + ", MAX_ARRAY_LENGTH=" + ArrayUtil.MAX_ARRAY_LENGTH);
  }
  this.meta = metaOut;
  this.data = dataOut;
  this.numValues = numValues;
  final int blockSize = 1 << blockShift;
  this.buffer = new long[(int) Math.min(numValues, blockSize)];
  this.bufferSize = 0;
  this.baseDataPointer = dataOut.getFilePointer();
}
 
Example 17
Source Project: lucene-solr   Source File: TermsWithScoreCollector.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void collect(int doc) throws IOException {
  if (docValues.advanceExact(doc)) {
    long ord;
    while ((ord = docValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
      int termID = collectedTerms.add(docValues.lookupOrd(ord));
      if (termID < 0) {
        termID = -termID - 1;
      } else {
        if (termID >= scoreSums.length) {
          scoreSums = ArrayUtil.grow(scoreSums);
          scoreCounts = ArrayUtil.grow(scoreCounts);
        }
      }
    
      scoreSums[termID] += scorer.score();
      scoreCounts[termID]++;
    }
  }
}
 
Example 18
Source Project: lucene-solr   Source File: DocValuesTermsQuery.java    License: Apache License 2.0 6 votes vote down vote up
public DocValuesTermsQuery(String field, Collection<BytesRef> terms) {
  this.field = Objects.requireNonNull(field);
  Objects.requireNonNull(terms, "Collection of terms must not be null");
  BytesRef[] sortedTerms = terms.toArray(new BytesRef[terms.size()]);
  ArrayUtil.timSort(sortedTerms);
  PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
  BytesRef previous = null;
  for (BytesRef term : sortedTerms) {
    if (term.equals(previous) == false) {
      builder.add(field, term);
    }
    previous = term;
  }
  termData = builder.finish();
  termDataHashCode = termData.hashCode();
}
 
Example 19
Source Project: lucene-solr   Source File: JapaneseTokenizer.java    License: Apache License 2.0 6 votes vote down vote up
private void reserve(int n) {
  if (capacity < n) {
    int oversize = ArrayUtil.oversize(n, Integer.BYTES);
    nodeDicType = new Type[oversize];
    nodeWordID = new int[oversize];
    nodeMark = new int[oversize];
    nodeLeftID = new int[oversize];
    nodeRightID = new int[oversize];
    nodeWordCost = new int[oversize];
    nodeLeftCost = new int[oversize];
    nodeRightCost = new int[oversize];
    nodeLeftNode = new int[oversize];
    nodeRightNode = new int[oversize];
    nodeLeft = new int[oversize];
    nodeRight = new int[oversize];
    nodeLeftChain = new int[oversize];
    nodeRightChain = new int[oversize];
    capacity = oversize;
  }
}
 
Example 20
Source Project: lucene-solr   Source File: BinaryDictionaryWriter.java    License: Apache License 2.0 6 votes vote down vote up
void addMapping(int sourceId, int wordId) {
  if (wordId <= lastWordId) {
    throw new IllegalStateException("words out of order: " + wordId + " vs lastID: " + lastWordId);
  }
  
  if (sourceId > lastSourceId) {
    targetMapOffsets = ArrayUtil.grow(targetMapOffsets, sourceId + 1);
    for (int i = lastSourceId + 1; i <= sourceId; i++) {
      targetMapOffsets[i] = targetMapEndOffset;
    }
  } else if (sourceId != lastSourceId) {
    throw new IllegalStateException("source ids not in increasing order: lastSourceId=" + lastSourceId + " vs sourceId=" + sourceId);
  }

  targetMap = ArrayUtil.grow(targetMap, targetMapEndOffset + 1);
  targetMap[targetMapEndOffset] = wordId;
  targetMapEndOffset++;

  lastSourceId = sourceId;
  lastWordId = wordId;
}
 
Example 21
Source Project: lucene-solr   Source File: SortedSetDocValuesWriter.java    License: Apache License 2.0 6 votes vote down vote up
private void addOneValue(BytesRef value) {
  int termID = hash.add(value);
  if (termID < 0) {
    termID = -termID-1;
  } else {
    // reserve additional space for each unique value:
    // 1. when indexing, when hash is 50% full, rehash() suddenly needs 2*size ints.
    //    TODO: can this same OOM happen in THPF?
    // 2. when flushing, we need 1 int per value (slot in the ordMap).
    iwBytesUsed.addAndGet(2 * Integer.BYTES);
  }
  
  if (currentUpto == currentValues.length) {
    currentValues = ArrayUtil.grow(currentValues, currentValues.length+1);
    iwBytesUsed.addAndGet((currentValues.length - currentUpto) * Integer.BYTES);
  }
  
  currentValues[currentUpto] = termID;
  currentUpto++;
}
 
Example 22
/**
 * Adjusts <code>expDocNrs</code> based on the filler docs injected in the index, 
 * and if neccessary wraps the <code>q</code> in a BooleanQuery that will filter out all 
 * filler docs using the {@link #EXTRA} field.
 * 
 * @see #replaceIndex
 */
@Override
public void qtest(Query q, int[] expDocNrs) throws Exception {

  expDocNrs = ArrayUtil.copyOfSubArray(expDocNrs, 0, expDocNrs.length);
  for (int i=0; i < expDocNrs.length; i++) {
    expDocNrs[i] = PRE_FILLER_DOCS + ((NUM_FILLER_DOCS + 1) * expDocNrs[i]);
  }

  if (null != EXTRA) {
    BooleanQuery.Builder builder = new BooleanQuery.Builder();
    builder.add(new BooleanClause(q, BooleanClause.Occur.MUST));
    builder.add(new BooleanClause(new TermQuery(new Term(EXTRA, EXTRA)), BooleanClause.Occur.MUST_NOT));
    q = builder.build();
  }
  super.qtest(q, expDocNrs);
}
 
Example 23
/**
 * Writes the specified byte to this byte array output stream.
 *
 * @param b the byte to be written.
 */
public void write(int b) {
    int newcount = count + 1;
    if (newcount > buf.length) {
        buf = Arrays.copyOf(buf, ArrayUtil.oversize(newcount, 1));
    }
    buf[count] = (byte) b;
    count = newcount;
}
 
Example 24
/**
 * Stem a word contained in a portion of a char[] array. Returns true if the
 * stemming process resulted in a word different from the input. You can
 * retrieve the result with getResultLength()/getResultBuffer() or
 * toString().
 */
public boolean stem(char[] wordBuffer, int offset, int wordLen)
{
    reset();
    if (b.length < wordLen)
    {
        b = new char[ArrayUtil.oversize(wordLen, Character.BYTES)];
    }
    System.arraycopy(wordBuffer, offset, b, 0, wordLen);
    i = wordLen;
    return stem(0);
}
 
Example 25
Source Project: lucene-solr   Source File: TestPhraseQuery.java    License: Apache License 2.0 5 votes vote down vote up
public void testTopPhrases() throws IOException {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
  String[] docs = ArrayUtil.copyOfSubArray(DOCS, 0, DOCS.length);
  Collections.shuffle(Arrays.asList(docs), random());
  for (String value : DOCS) {
    Document doc = new Document();
    doc.add(new TextField("f", value, Store.NO));
    w.addDocument(doc);
  }
  IndexReader r = DirectoryReader.open(w);
  w.close();
  IndexSearcher searcher = newSearcher(r);
  for (Query query : Arrays.asList(
      new PhraseQuery("f", "b", "c"), // common phrase
      new PhraseQuery("f", "e", "f"), // always appear next to each other
      new PhraseQuery("f", "d", "d")  // repeated term
      )) {
    for (int topN = 1; topN <= 2; ++topN) {
      TopScoreDocCollector collector1 = TopScoreDocCollector.create(topN, null, Integer.MAX_VALUE);
      searcher.search(query, collector1);
      ScoreDoc[] hits1 = collector1.topDocs().scoreDocs;
      TopScoreDocCollector collector2 = TopScoreDocCollector.create(topN, null, 1);
      searcher.search(query, collector2);
      ScoreDoc[] hits2 = collector2.topDocs().scoreDocs;
      assertTrue("" + query, hits1.length > 0);
      CheckHits.checkEqual(query, hits1, hits2);
    }
  }
  r.close();
  dir.close();
}
 
Example 26
Source Project: lucene-solr   Source File: BlendedTermQuery.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Expert: Add a {@link Term} with the provided boost and context.
 * This method is useful if you already have a {@link TermStates}
 * object constructed for the given term.
 */
public Builder add(Term term, float boost, TermStates context) {
  if (numTerms >= IndexSearcher.getMaxClauseCount()) {
    throw new IndexSearcher.TooManyClauses();
  }
  terms = ArrayUtil.grow(terms, numTerms + 1);
  boosts = ArrayUtil.grow(boosts, numTerms + 1);
  contexts = ArrayUtil.grow(contexts, numTerms + 1);
  terms[numTerms] = term;
  boosts[numTerms] = boost;
  contexts[numTerms] = context;
  numTerms += 1;
  return this;
}
 
Example 27
Source Project: Elasticsearch   Source File: GeoPointDVAtomicFieldData.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MultiGeoPointValues getGeoPointValues() {
    return new MultiGeoPointValues() {
        GeoPoint[] points = new GeoPoint[0];
        private int count = 0;

        @Override
        public void setDocument(int docId) {
            values.setDocument(docId);
            count = values.count();
            if (count > points.length) {
                final int previousLength = points.length;
                points = Arrays.copyOf(points, ArrayUtil.oversize(count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
                for (int i = previousLength; i < points.length; ++i) {
                    points[i] = new GeoPoint(Double.NaN, Double.NaN);
                }
            }
            for (int i=0; i<count; ++i) {
                points[i].resetFromIndexHash(values.valueAt(i));
            }
        }

        @Override
        public int count() {
            return count;
        }

        @Override
        public GeoPoint valueAt(int index) {
            return points[index];
        }
    };
}
 
Example 28
Source Project: lucene-solr   Source File: FiniteStringsIterator.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Grow path stack, if required.
 */
private void growStack(int depth) {
  if (nodes.length == depth) {
    PathNode[] newNodes = new PathNode[ArrayUtil.oversize(nodes.length + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
    System.arraycopy(nodes, 0, newNodes, 0, nodes.length);
    for (int i = depth, end = newNodes.length; i < end; i++) {
      newNodes[i] = new PathNode();
    }
    nodes = newNodes;
  }
}
 
Example 29
Source Project: lucene-solr   Source File: CompressingStoredFieldsWriter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void finishDocument() throws IOException {
  if (numBufferedDocs == this.numStoredFields.length) {
    final int newLength = ArrayUtil.oversize(numBufferedDocs + 1, 4);
    this.numStoredFields = ArrayUtil.growExact(this.numStoredFields, newLength);
    endOffsets = ArrayUtil.growExact(endOffsets, newLength);
  }
  this.numStoredFields[numBufferedDocs] = numStoredFieldsInDoc;
  numStoredFieldsInDoc = 0;
  endOffsets[numBufferedDocs] = Math.toIntExact(bufferedDocs.size());
  ++numBufferedDocs;
  if (triggerFlush()) {
    flush();
  }
}
 
Example 30
Source Project: Elasticsearch   Source File: AbstractBigArray.java    License: Apache License 2.0 5 votes vote down vote up
private static <T> T[] grow(T[] array, int minSize) {
    if (array.length < minSize) {
        final int newLen = ArrayUtil.oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
        array = Arrays.copyOf(array, newLen);
    }
    return array;
}