org.rocksdb.FlushOptions Java Examples

The following examples show how to use org.rocksdb.FlushOptions. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TraceIndexExtractor.java    From act with GNU General Public License v3.0 6 votes vote down vote up
private void writeTracesToDB(RocksDBAndHandles<COLUMN_FAMILIES> dbAndHandles,
                             List<Double> times,
                             List<List<Double>> allTraces) throws RocksDBException, IOException {

  LOGGER.info("Writing timepoints to on-disk index (%d points)", times.size());
  dbAndHandles.put(COLUMN_FAMILIES.TIMEPOINTS, TIMEPOINTS_KEY, serializeDoubleList(times));
  for (int i = 0; i < allTraces.size(); i++) {
    byte[] keyBytes = serializeObject(i);
    byte[] valBytes = serializeDoubleList(allTraces.get(i));
    dbAndHandles.put(COLUMN_FAMILIES.ID_TO_TRACE, keyBytes, valBytes);
    if (i % 1000 == 0) {
      LOGGER.info("Finished writing %d traces", i);
    }

    // Drop this trace as soon as it's written so the GC can pick it up and hopefully reduce memory pressure.
    allTraces.set(i, Collections.emptyList());
  }

  dbAndHandles.getDb().flush(new FlushOptions());
  LOGGER.info("Done writing trace data to index");
}
 
Example #2
Source File: RocksDbHdfsState.java    From jstorm with Apache License 2.0 6 votes vote down vote up
/**
 * Flush the data in memtable of RocksDB into disk, and then create checkpoint
 * 
 * @param batchId
 */
@Override
public void checkpoint(long batchId) {
    long startTime = System.currentTimeMillis();
    try {
        rocksDb.flush(new FlushOptions());
        Checkpoint cp = Checkpoint.create(rocksDb);
        cp.createCheckpoint(getLocalCheckpointPath(batchId));
    } catch (RocksDBException e) {
        LOG.error("Failed to create checkpoint for batch-" + batchId, e);
        throw new RuntimeException(e.getMessage());
    }

    if (JStormMetrics.enabled)
        rocksDbFlushAndCpLatency.update(System.currentTimeMillis() - startTime);
}
 
Example #3
Source File: SamzaTimerInternalsFactoryTest.java    From beam with Apache License 2.0 6 votes vote down vote up
private static KeyValueStore<ByteArray, byte[]> createStore(String name) {
  final Options options = new Options();
  options.setCreateIfMissing(true);

  RocksDbKeyValueStore rocksStore =
      new RocksDbKeyValueStore(
          new File(System.getProperty("java.io.tmpdir") + "/" + name),
          options,
          new MapConfig(),
          false,
          "beamStore",
          new WriteOptions(),
          new FlushOptions(),
          new KeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));

  return new SerializedKeyValueStore<>(
      rocksStore,
      new ByteArraySerdeFactory.ByteArraySerde(),
      new ByteSerde(),
      new SerializedKeyValueStoreMetrics("beamStore", new MetricsRegistryMap()));
}
 
Example #4
Source File: PubchemTTLMerger.java    From act with GNU General Public License v3.0 6 votes vote down vote up
@Override
public void endRDF() throws RDFHandlerException {
  super.endRDF();
  DateTime endTime = new DateTime().withZone(DateTimeZone.UTC);
  Long runtimeInMilis = endTime.getMillis() - startTime.getMillis();
  Long numProcessedVal = numProcessed.get();
  LOGGER.info("PCRDFHandler reached end of RDF with %d events in %.3fs, at %.3f ms per event",
      numProcessedVal,
      runtimeInMilis.floatValue() / MS_PER_S,
      runtimeInMilis.doubleValue() / numProcessedVal.doubleValue()
  );
  try {
    db.flush(new FlushOptions().setWaitForFlush(true));
  } catch (RocksDBException e) {
    LOGGER.error("Caught RocksDB exception when flushing after completing RDF processing: %s", e.getMessage());
    throw new RDFHandlerException(e);
  }
}
 
Example #5
Source File: RocksDBStore.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
  if (!closed.compareAndSet(false, true)) {
    return;
  }
  unregisterMetrics();
  exclusively((deferred) -> {
    deleteAllIterators(deferred);
    try(FlushOptions options = new FlushOptions()){
      options.setWaitForFlush(true);
      db.flush(options, handle);
    } catch (RocksDBException ex) {
      deferred.addException(ex);
    }
    deferred.suppressingClose(handle);
  });
}
 
Example #6
Source File: JRocksDB.java    From snowblossom with Apache License 2.0 5 votes vote down vote up
@Override
public void close()
{
  super.close();

  logger.info("RocksDB flush started");
  try
  {
    FlushOptions fl = new FlushOptions();
    fl.setWaitForFlush(true);
    if (shared_db != null)
    {
      shared_db.flush(fl);
    }
    if (separate_db_map != null)
    {
      for(RocksDB db : separate_db_map.values())
      {
        db.flush(fl);
      }
    }
  }
  catch(Exception e)
  {
    logger.log(Level.WARNING, "rocks flush", e);
  }

  logger.info("RocksDB flush completed");

}
 
Example #7
Source File: BrendaSupportingEntries.java    From act with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Create an on-disk index of reaction-supporting data from BRENDA using RocksDB.  This DB will contain a number
 * of `column families` (i.e. per-table namespaces).  Each FromBrendaDB instance
 *
 * All index rows are keyed on EC number, literature reference (individually, lists are split during construction),
 * and organism names.   Values are serialized (via Serializable) lists of FromBrendaDB objects; each column family
 * contains one type of object.
 *
 * Creating this data on an in-office MBP with a BRENDA MySQL instance running in EC2 takes just a couple of minutes.
 * Looking up supporting data locally vs. running MySQL queries for every data type * every reaction results in a
 * ~30x speedup of reaction processing.
 *
 * @param pathToIndex The local path where the index should be built.  This will become a directory containing
 *                    RocksDB files.
 * @param conn A connection to the BRENDA MySQL DB (`brenda` database) from which data will be read.
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws RocksDBException
 * @throws SQLException
 */
public void constructOnDiskBRENDAIndex(File pathToIndex, Connection conn)
    throws IOException, ClassNotFoundException, RocksDBException, SQLException {
  if (pathToIndex.exists()) {
    System.out.println("Index already exists, not recreating.");
    return;
  }

  RocksDB db = null; // Not auto-closable.
  List<? extends FromBrendaDB> instances = allFromBrendaDBInstances();
  try {
    Options options = new Options().setCreateIfMissing(true);
    System.out.println("Opening index at " + pathToIndex.getAbsolutePath());
    db = RocksDB.open(options, pathToIndex.getAbsolutePath());

    for (FromBrendaDB instance : instances) {
      System.out.println("Writing index for " + instance.getColumnFamilyName());
      ColumnFamilyHandle cfh =
          db.createColumnFamily(new ColumnFamilyDescriptor(instance.getColumnFamilyName().getBytes(UTF8)));
      IndexWriter writer = new IndexWriter(cfh, db, instance);
      writer.run(conn);
      db.flush(new FlushOptions());
    }
  } finally {
    if (db != null) {
      db.close();
    }
  }
}
 
Example #8
Source File: TraceIndexExtractor.java    From act with GNU General Public License v3.0 5 votes vote down vote up
private void writeWindowsToDB(RocksDBAndHandles<COLUMN_FAMILIES> dbAndHandles, List<MZWindow> windows)
    throws RocksDBException, IOException {
  for (MZWindow window : windows) {
    byte[] keyBytes = serializeObject(window.getTargetMZ());
    byte[] valBytes = serializeObject(window);

    dbAndHandles.put(COLUMN_FAMILIES.TARGET_TO_WINDOW, keyBytes, valBytes);
  }

  dbAndHandles.getDb().flush(new FlushOptions());
  LOGGER.info("Done writing window data to index");
}
 
Example #9
Source File: RocksDBKVStore.java    From journalkeeper with Apache License 2.0 5 votes vote down vote up
@Override
public void flush() {
    try {
        rocksDB.flush(new FlushOptions());
    } catch (RocksDBException e) {
        throw new CoordinatingException(e);
    }
}
 
Example #10
Source File: RocksDBSegmentStore.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
public void persist() {
    try {
        db.flush(new FlushOptions());
    } catch (RocksDBException e) {
        e.printStackTrace();
    }
}
 
Example #11
Source File: TestRocksDbKeyValueStoreJava.java    From samza with Apache License 2.0 5 votes vote down vote up
@Test
public void testIterate() throws Exception {
  Config config = new MapConfig();
  Options options = new Options();
  options.setCreateIfMissing(true);

  File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
  RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore",
      new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));

  ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
  String prefix = "prefix";
  for (int i = 0; i < 100; i++) {
    store.put(genKey(outputStream, prefix, i), genValue());
  }

  byte[] firstKey = genKey(outputStream, prefix, 0);
  byte[] lastKey = genKey(outputStream, prefix, 1000);
  KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
  // Make sure the cached Iterable won't change when new elements are added
  store.put(genKey(outputStream, prefix, 200), genValue());
  KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
  assertTrue(Iterators.size(iterator) == 100);
  iterator.close();
  List<Integer> keys = new ArrayList<>();
  KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
  while (iterator2.hasNext()) {
    Entry<byte[], byte[]> entry = iterator2.next();
    int key = Ints.fromByteArray(Arrays.copyOfRange(entry.getKey(), prefix.getBytes().length, entry.getKey().length));
    keys.add(key);
  }
  assertEquals(keys, IntStream.rangeClosed(0, 99).boxed().collect(Collectors.toList()));
  iterator2.close();

  outputStream.close();
  snapshot.close();
  store.close();
}
 
Example #12
Source File: TestKeyValueSizeHistogramMetric.java    From samza with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {

  Config config = new MapConfig();
  Options options = new Options();
  options.setCreateIfMissing(true);

  File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
  RocksDbKeyValueStore kvStore = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore",
      new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));
  KeyValueStore<String, String> serializedStore =
      new SerializedKeyValueStore<>(kvStore, stringSerde, stringSerde, serializedKeyValueStoreMetrics);
  store = new NullSafeKeyValueStore<>(serializedStore);
}
 
Example #13
Source File: RocksDBSegmentStore.java    From kylin with Apache License 2.0 5 votes vote down vote up
@Override
public void persist() {
    try {
        db.flush(new FlushOptions());
    } catch (RocksDBException e) {
        e.printStackTrace();
    }
}
 
Example #14
Source File: JRocksDB.java    From jelectrum with MIT License 5 votes vote down vote up
@Override
protected void dbShutdownHandler()
  throws Exception
{
  log.alarm("RocksDB: flushing");
  FlushOptions fl = new FlushOptions();
  fl.setWaitForFlush(true);
  db.flush(fl);
  log.alarm("RocksDB: flush complete");


}
 
Example #15
Source File: RocksDbUnitTest.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private static void rocksDbTest(RocksDB db, List<ColumnFamilyHandle> handlers) {
    try {
        ColumnFamilyHandle handler1 = null;
        ColumnFamilyHandle handler2 = null;
        if (handlers.size() > 0) {
            // skip default column family
            handler1 = handlers.get(1);
            handler2 = handlers.get(2);
        } else {
            handler1 = db.createColumnFamily(new ColumnFamilyDescriptor("test1".getBytes()));
            handler2 = db.createColumnFamily(new ColumnFamilyDescriptor("test2".getBytes()));
        }
        int startValue1 = getStartValue(db, handler1);
        int startValue2 = getStartValue(db, handler2);;

        Checkpoint cp = Checkpoint.create(db);
   
        if (isCompaction) {
            db.compactRange();
            LOG.info("Compaction!");
        }

        long flushWaitTime = System.currentTimeMillis() + flushInterval;
        for (int i = 0; i < putNum || putNum == -1; i++) {
            db.put(handler1, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue1 + i).getBytes());
            db.put(handler2, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue2 + i).getBytes());
            if (isFlush && flushWaitTime <= System.currentTimeMillis()) {
                db.flush(new FlushOptions());
                if (isCheckpoint) {
                    cp.createCheckpoint(cpPath + "/" + i);
                }
                flushWaitTime = System.currentTimeMillis() + flushInterval;
            }
        }
    } catch (RocksDBException e) {
        LOG.error("Failed to put or flush", e);
    }
}
 
Example #16
Source File: RDBStore.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Override
public void flush() throws IOException {
  try (FlushOptions flushOptions = new FlushOptions()) {
    flushOptions.setWaitForFlush(true);
    db.flush(flushOptions);
  } catch (RocksDBException e) {
    throw toIOException("Unable to Flush RocksDB data", e);
  }
}
 
Example #17
Source File: RocksDBCache.java    From kcache with Apache License 2.0 4 votes vote down vote up
private void openDB() {
    // initialize the default rocksdb options

    final DBOptions dbOptions = new DBOptions();
    final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
    userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions);
    userSpecifiedOptions.setComparator(new RocksDBKeySliceComparator<>(keySerde, comparator));

    final BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
    cache = new LRUCache(BLOCK_CACHE_SIZE);
    tableConfig.setBlockCache(cache);
    tableConfig.setBlockSize(BLOCK_SIZE);

    filter = new BloomFilter();
    tableConfig.setFilterPolicy(filter);

    userSpecifiedOptions.optimizeFiltersForHits();
    userSpecifiedOptions.setTableFormatConfig(tableConfig);
    userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE);
    userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE);
    userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE);
    userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS);
    userSpecifiedOptions.setCreateIfMissing(true);
    userSpecifiedOptions.setErrorIfExists(false);
    userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL);
    // this is the recommended way to increase parallelism in RocksDb
    // note that the current implementation of setIncreaseParallelism affects the number
    // of compaction threads but not flush threads (the latter remains one). Also
    // the parallelism value needs to be at least two because of the code in
    // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580
    // subtracts one from the value passed to determine the number of compaction threads
    // (this could be a bug in the RocksDB code and their devs have been contacted).
    userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2));

    wOptions = new WriteOptions();
    wOptions.setDisableWAL(true);

    fOptions = new FlushOptions();
    fOptions.setWaitForFlush(true);

    dbDir = new File(new File(rootDir, parentDir), name);

    try {
        Files.createDirectories(dbDir.getParentFile().toPath());
        Files.createDirectories(dbDir.getAbsoluteFile().toPath());
    } catch (final IOException fatal) {
        throw new CacheInitializationException("Could not create directories", fatal);
    }

    openRocksDB(dbOptions, columnFamilyOptions);
    open = true;
}
 
Example #18
Source File: RocksDBAndHandles.java    From act with GNU General Public License v3.0 4 votes vote down vote up
public void flush(boolean waitForFlush) throws RocksDBException {
  FlushOptions options = new FlushOptions();
  options.setWaitForFlush(waitForFlush);
  db.flush(options);
}
 
Example #19
Source File: PubchemTTLMergerTest.java    From act with GNU General Public License v3.0 4 votes vote down vote up
@Test
public void testIndexConstructionAndMerge() throws Exception {
  PubchemTTLMerger merger = new PubchemTTLMerger();
  Pair<RocksDB, Map<PubchemTTLMerger.COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles =
      PubchemTTLMerger.createNewRocksDB(tempDirPath.toFile());

  // Alas, we can't swap this with a JAR-safe stream as we must list the files.
  File testSynonymFileDir = new File(this.getClass().getResource(TEST_RDF_PATH).getFile());
  List<File> testFiles = Arrays.asList(testSynonymFileDir.listFiles());
  Collections.sort(testFiles);

  Set<String> expectedValues, actualValues;

  merger.buildIndex(dbAndHandles, testFiles);

  dbAndHandles.getLeft().flush(new FlushOptions());

  // Check the hash-to-synonym index.
  expectedValues = new HashSet<>(Arrays.asList("test1"));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.HASH_TO_SYNONYMS, MD51));
  assertEquals("First hash-to-synonyms returns expected value(s)", expectedValues, actualValues);
  expectedValues = new HashSet<>(Arrays.asList("test2"));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.HASH_TO_SYNONYMS, MD52));
  assertEquals("Second hash-to-synonyms returns expected value(s)", expectedValues, actualValues);
  expectedValues = new HashSet<>(Arrays.asList("TEST3", "test3"));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.HASH_TO_SYNONYMS, MD53));
  assertEquals("Third hash-to-synonyms returns expected value(s)", expectedValues, actualValues);

  // Now check the MESH index.
  expectedValues = new HashSet<>(Arrays.asList("M01"));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.HASH_TO_MESH, MD51));
  assertEquals("First hash-to-synonyms returns expected value(s)", expectedValues, actualValues);
  expectedValues = new HashSet<>(Arrays.asList("M02"));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.HASH_TO_MESH, MD52));
  assertEquals("Second hash-to-synonyms returns expected value(s)", expectedValues, actualValues);

  // Finally (before merging) check the CID to hash index
  expectedValues = new HashSet<>(Arrays.asList(MD51));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.CID_TO_HASHES, "CID01"));
  assertEquals("First hash-to-synonyms returns expected value(s)", expectedValues, actualValues);
  expectedValues = new HashSet<>(Arrays.asList(MD52, MD53));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.CID_TO_HASHES, "CID02"));
  assertEquals("Second hash-to-synonyms returns expected value(s)", expectedValues, actualValues);
  expectedValues = new HashSet<>(Arrays.asList(MD53));
  actualValues = new HashSet<>(getValForKey(dbAndHandles, PubchemTTLMerger.COLUMN_FAMILIES.CID_TO_HASHES, "CID03"));
  assertEquals("Third hash-to-synonyms returns expected value(s)", expectedValues, actualValues);

  merger.merge(dbAndHandles);

  PubchemSynonyms expectedSynonyms, actualSynonyms;

  expectedSynonyms = new PubchemSynonyms("CID01");
  expectedSynonyms.addMeSHId("M01");
  expectedSynonyms.addSynonym(PubchemTTLMerger.PC_SYNONYM_TYPES.TRIVIAL_NAME, "test1");
  actualSynonyms = getPCSyonymsForKey(dbAndHandles, "CID01");
  assertEquals("First CID-to-synonyms entry has expected PubchemSynonyms value", expectedSynonyms, actualSynonyms);
  expectedSynonyms = new PubchemSynonyms("CID02");
  expectedSynonyms.addMeSHId("M02");
  expectedSynonyms.addSynonyms(PubchemTTLMerger.PC_SYNONYM_TYPES.UNKNOWN, new HashSet<>(Arrays.asList("test2")));
  expectedSynonyms.addSynonyms(PubchemTTLMerger.PC_SYNONYM_TYPES.INTL_NONPROPRIETARY_NAME,
      new HashSet<>(Arrays.asList("test3", "TEST3")));
  actualSynonyms = getPCSyonymsForKey(dbAndHandles, "CID02");
  assertEquals("Second CID-to-synonyms entry has expected PubchemSynonyms value", expectedSynonyms, actualSynonyms);
  expectedSynonyms = new PubchemSynonyms("CID03");
  expectedSynonyms.addSynonyms(PubchemTTLMerger.PC_SYNONYM_TYPES.INTL_NONPROPRIETARY_NAME,
      new HashSet<>(Arrays.asList("test3", "TEST3")));
  actualSynonyms = getPCSyonymsForKey(dbAndHandles, "CID03");
  assertEquals("ThirdCID-to-synonyms entry has expected PubchemSynonyms value", expectedSynonyms, actualSynonyms);

  dbAndHandles.getLeft().flush(new FlushOptions());
  dbAndHandles.getLeft().close();
}
 
Example #20
Source File: TestRocksDbKeyValueStoreJava.java    From samza with Apache License 2.0 4 votes vote down vote up
@Test
public void testPerf() throws Exception {
  Config config = new MapConfig();
  Options options = new Options();
  options.setCreateIfMissing(true);

  File dbDir = new File(System.getProperty("java.io.tmpdir") + "/dbStore" + System.currentTimeMillis());
  RocksDbKeyValueStore store = new RocksDbKeyValueStore(dbDir, options, config, false, "dbStore",
      new WriteOptions(), new FlushOptions(), new KeyValueStoreMetrics("dbStore", new MetricsRegistryMap()));

  ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
  String prefix = "this is the key prefix";
  Random r = new Random();
  for (int i = 0; i < 100000; i++) {
    store.put(genKey(outputStream, prefix, r.nextInt()), genValue());
  }

  byte[] firstKey = genKey(outputStream, prefix, 0);
  byte[] lastKey = genKey(outputStream, prefix, Integer.MAX_VALUE);

  long start;

  start = System.currentTimeMillis();
  KeyValueIterator<byte[], byte[]> iterator1 = store.range(firstKey, lastKey);
  long rangeTime = System.currentTimeMillis() - start;
  start = System.currentTimeMillis();
  Iterators.size(iterator1);
  long rangeIterTime = System.currentTimeMillis() - start;
  System.out.println("range iter create time: " + rangeTime + ", iterate time: " + rangeIterTime);
  iterator1.close();
  // Please comment out range query part in order to do an accurate perf test for snapshot
  start = System.currentTimeMillis();
  KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
  KeyValueIterator<byte[], byte[]> iterator2 = snapshot.iterator();
  long snapshotTime = System.currentTimeMillis() - start;
  start = System.currentTimeMillis();
  Iterators.size(iterator2);
  long snapshotIterTime = System.currentTimeMillis() - start;
  System.out.println("snapshot iter create time: " + snapshotTime + ", iterate time: " + snapshotIterTime);
  iterator2.close();
  snapshot.close();
  store.close();
}