Java Code Examples for org.apache.lucene.index.DirectoryReader#listCommits()

The following examples show how to use org.apache.lucene.index.DirectoryReader#listCommits() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexReplicationHandler.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Constructor with the given index directory and callback to notify when the
 * indexes were updated.
 */
public IndexReplicationHandler(Directory indexDir, Callable<Boolean> callback) throws IOException {
  this.callback = callback;
  this.indexDir = indexDir;
  currentRevisionFiles = null;
  currentVersion = null;
  if (DirectoryReader.indexExists(indexDir)) {
    final List<IndexCommit> commits = DirectoryReader.listCommits(indexDir);
    final IndexCommit commit = commits.get(commits.size() - 1);
    currentRevisionFiles = IndexRevision.revisionFiles(commit);
    currentVersion = IndexRevision.revisionVersion(commit);
    final InfoStream infoStream = InfoStream.getDefault();
    if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
      infoStream.message(INFO_STREAM_COMPONENT, "constructor(): currentVersion=" + currentVersion
          + " currentRevisionFiles=" + currentRevisionFiles);
      infoStream.message(INFO_STREAM_COMPONENT, "constructor(): commit=" + commit);
    }
  }
}
 
Example 2
Source File: FastHdfsKeyValueDirectoryTest.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
private void assertFiles(Set<String> expected, int run, int commit, FastHdfsKeyValueDirectory directory)
    throws IOException {
  Set<String> actual;
  if (DirectoryReader.indexExists(directory)) {
    List<IndexCommit> listCommits = DirectoryReader.listCommits(directory);
    // assertEquals(1, listCommits.size());
    IndexCommit indexCommit = listCommits.get(0);
    actual = new TreeSet<String>(indexCommit.getFileNames());
  } else {
    actual = new TreeSet<String>();
  }

  Set<String> missing = new TreeSet<String>(expected);
  missing.removeAll(actual);
  Set<String> extra = new TreeSet<String>(actual);
  extra.removeAll(expected);
  assertEquals("Pass [" + run + "] Missing Files " + " Extra Files " + extra + "", expected, actual);
}
 
Example 3
Source File: OpenReaderTask.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static IndexCommit findIndexCommit(Directory dir, String userData) throws IOException {
  Collection<IndexCommit> commits = DirectoryReader.listCommits(dir);
  for (final IndexCommit ic : commits) {
    Map<String,String> map = ic.getUserData();
    String ud = null;
    if (map != null) {
      ud = map.get(USER_DATA);
    }
    if (ud != null && ud.equals(userData)) {
      return ic;
    }
  }

  throw new IOException("index does not contain commit with userData: " + userData);
}
 
Example 4
Source File: IndexReplicationHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the last {@link IndexCommit} found in the {@link Directory}, or
 * {@code null} if there are no commits.
 */
public static IndexCommit getLastCommit(Directory dir) throws IOException {
  try {
    if (DirectoryReader.indexExists(dir)) {
      List<IndexCommit> commits = DirectoryReader.listCommits(dir);
      // listCommits guarantees that we get at least one commit back, or
      // IndexNotFoundException which we handle below
      return commits.get(commits.size() - 1);
    }
  } catch (IndexNotFoundException e) {
    // ignore the exception and return null
  }
  return null;
}
 
Example 5
Source File: CommitsImpl.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private Map<Long, IndexCommit> initCommitMap() {
  try {
    List<IndexCommit> indexCommits = DirectoryReader.listCommits(dir);
    Map<Long, IndexCommit> map = new TreeMap<>();
    for (IndexCommit ic : indexCommits) {
      map.put(ic.getGeneration(), ic);
    }
    return map;
  } catch (IOException e) {
    throw new LukeException("Failed to get commits list.", e);
  }
}
 
Example 6
Source File: TestSolrCoreSnapshots.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private List<IndexCommit> listCommits(String directory) throws Exception {
  Directory dir = new NIOFSDirectory(Paths.get(directory));
  try {
    return DirectoryReader.listCommits(dir);
  } catch (IndexNotFoundException ex) {
    // This can happen when the delete snapshot functionality cleans up the index files (when the directory
    // storing these files is not the *current* index directory).
    return Collections.emptyList();
  }
}
 
Example 7
Source File: MergeSortRowIdMatcher.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
public MergeSortRowIdMatcher(Directory directory, long generation, Configuration configuration, Path cachePath,
    Progressable progressable) throws IOException {
  List<IndexCommit> listCommits = DirectoryReader.listCommits(directory);
  _indexCommit = findIndexCommit(listCommits, generation);
  _configuration = configuration;
  _cachePath = cachePath;
  _directory = directory;
  _progressable = progressable == null ? NO_OP : progressable;
  _readers = openReaders();
}
 
Example 8
Source File: LookupBuilderReducer.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private MergeSortRowIdMatcher getMergeSortRowIdMatcher(Text rowId,
    Reducer<Text, NullWritable, Text, BooleanWritable>.Context context) throws IOException {
  BlurPartitioner blurPartitioner = new BlurPartitioner();
  int shard = blurPartitioner.getShard(rowId, _numberOfShardsInTable);
  String shardName = ShardUtil.getShardName(shard);

  Path shardPath = new Path(_tablePath, shardName);
  HdfsDirectory hdfsDirectory = new HdfsDirectory(_configuration, shardPath);
  SnapshotIndexDeletionPolicy policy = new SnapshotIndexDeletionPolicy(_configuration,
      SnapshotIndexDeletionPolicy.getGenerationsPath(shardPath));
  Long generation = policy.getGeneration(_snapshot);
  if (generation == null) {
    hdfsDirectory.close();
    throw new IOException("Snapshot [" + _snapshot + "] not found in shard [" + shardPath + "]");
  }

  BlurConfiguration bc = new BlurConfiguration();
  BlockCacheDirectoryFactoryV2 blockCacheDirectoryFactoryV2 = new BlockCacheDirectoryFactoryV2(bc,
      _totalNumberOfBytes);
  _closer.register(blockCacheDirectoryFactoryV2);
  Directory dir = blockCacheDirectoryFactoryV2.newDirectory("table", "shard", hdfsDirectory, null);
  List<IndexCommit> listCommits = DirectoryReader.listCommits(dir);
  IndexCommit indexCommit = ExistingDataIndexLookupMapper.findIndexCommit(listCommits, generation, shardPath);
  DirectoryReader reader = DirectoryReader.open(indexCommit);
  _rowIdsFromIndex.setValue(getTotalNumberOfRowIds(reader));

  Path cachePath = MergeSortRowIdMatcher.getCachePath(_cachePath, _table, shardName);
  return new MergeSortRowIdMatcher(dir, generation, _configuration, cachePath, context);
}
 
Example 9
Source File: ExistingDataIndexLookupMapper.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private IndexSearcher getIndexSearcher(String rowId) throws IOException {
  int shard = _blurPartitioner.getShard(rowId, _numberOfShardsInTable);
  if (_indexSearcher != null) {
    if (shard != _indexShard) {
      throw new IOException("Input data is not partitioned correctly.");
    }
    return _indexSearcher;
  } else {
    _indexShard = shard;
    Path shardPath = new Path(_tablePath, ShardUtil.getShardName(_indexShard));
    HdfsDirectory hdfsDirectory = new HdfsDirectory(_configuration, shardPath);
    SnapshotIndexDeletionPolicy policy = new SnapshotIndexDeletionPolicy(_configuration,
        SnapshotIndexDeletionPolicy.getGenerationsPath(shardPath));
    Long generation = policy.getGeneration(_snapshot);
    if (generation == null) {
      hdfsDirectory.close();
      throw new IOException("Snapshot [" + _snapshot + "] not found in shard [" + shardPath + "]");
    }

    BlurConfiguration bc = new BlurConfiguration();
    BlockCacheDirectoryFactoryV2 blockCacheDirectoryFactoryV2 = new BlockCacheDirectoryFactoryV2(bc,
        _totalNumberOfBytes);
    _closer.register(blockCacheDirectoryFactoryV2);
    Directory dir = blockCacheDirectoryFactoryV2.newDirectory("table", "shard", hdfsDirectory, null);

    List<IndexCommit> listCommits = DirectoryReader.listCommits(dir);
    IndexCommit indexCommit = findIndexCommit(listCommits, generation, shardPath);
    _reader = DirectoryReader.open(indexCommit);
    return _indexSearcher = new IndexSearcher(_reader);
  }
}
 
Example 10
Source File: EngineTestCase.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that the max_seq_no stored in the commit's user_data is never smaller than seq_no of any document in the commit.
 */
public static void assertMaxSeqNoInCommitUserData(Engine engine) throws Exception {
    List<IndexCommit> commits = DirectoryReader.listCommits(engine.store.directory());
    for (IndexCommit commit : commits) {
        try (DirectoryReader reader = DirectoryReader.open(commit)) {
            assertThat(Long.parseLong(commit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)),
                greaterThanOrEqualTo(maxSeqNosInReader(reader)));
        }
    }
}
 
Example 11
Source File: FastHdfsKeyValueDirectoryTest.java    From incubator-retired-blur with Apache License 2.0 4 votes vote down vote up
@Test
public void testMulipleCommitsAndReopens() throws IOException {
  IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
  conf.setMergeScheduler(new SerialMergeScheduler());
  TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
  mergePolicy.setUseCompoundFile(false);

  Set<String> fileSet = new TreeSet<String>();
  long seed = new Random().nextLong();
  System.out.println("Seed:" + seed);
  Random random = new Random(seed);
  int docCount = 0;
  int passes = 10;
  byte[] segmentsGenContents = null;
  for (int run = 0; run < passes; run++) {
    final FastHdfsKeyValueDirectory directory = new FastHdfsKeyValueDirectory(false, _timer, _configuration,
        new Path(_path, "test_multiple_commits_reopens"));
    if (segmentsGenContents != null) {
      byte[] segmentsGenContentsCurrent = readSegmentsGen(directory);
      assertTrue(Arrays.equals(segmentsGenContents, segmentsGenContentsCurrent));
    }
    assertFiles(fileSet, run, -1, directory);
    assertEquals(docCount, getDocumentCount(directory));
    IndexWriter writer = new IndexWriter(directory, conf.clone());
    int numberOfCommits = random.nextInt(100);
    for (int i = 0; i < numberOfCommits; i++) {
      assertFiles(fileSet, run, i, directory);
      addDocuments(writer, random.nextInt(100));
      // Before Commit
      writer.commit();
      // After Commit

      // Set files after commit
      {
        fileSet.clear();
        List<IndexCommit> listCommits = DirectoryReader.listCommits(directory);
        assertEquals(1, listCommits.size());
        IndexCommit indexCommit = listCommits.get(0);
        fileSet.addAll(indexCommit.getFileNames());
      }
      segmentsGenContents = readSegmentsGen(directory);
    }
    docCount = getDocumentCount(directory);
  }
}