Java Code Examples for org.rocksdb.RocksDB#loadLibrary()

The following examples show how to use org.rocksdb.RocksDB#loadLibrary() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RocksDbDataStoreFactory.java    From biomedicus with Apache License 2.0 6 votes vote down vote up
@Override
public KnownWordsDataStore openKnownWordDataStore(int id) {
  RocksDB.loadLibrary();
  try {
    LOGGER.info("Opening TnT model known word model: {}", id);
    RocksDB rocksDB = RocksDB.openReadOnly(dbPath.resolve(getWordsName(id)).toString());
    RocksDB candidatesDB = RocksDB.openReadOnly(dbPath.resolve(getCandidatesName(id)).toString());

    RocksDbKnownWordsDataStore rocksDbKnownWordsDataStore = new RocksDbKnownWordsDataStore(
        rocksDB, candidatesDB);
    if (inMemory) {
      LOGGER.info("Loading TnT known word model into memory: {}", id);
      InMemoryKnownWordDataStore inMemoryKnownWordDataStore = rocksDbKnownWordsDataStore
          .inMemory();
      LOGGER.info("Done loading TnT known word model into memory: {}", id);
      rocksDB.close();
      candidatesDB.close();
      return inMemoryKnownWordDataStore;
    }
    rocksDBS.add(rocksDB);
    rocksDBS.add(candidatesDB);
    return rocksDbKnownWordsDataStore;
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 2
Source File: RocksDBClient.java    From geowave with Apache License 2.0 6 votes vote down vote up
public synchronized RocksDBIndexTable getIndexTable(
    final String tableName,
    final short adapterId,
    final byte[] partition,
    final boolean requiresTimestamp) {
  if (indexWriteOptions == null) {
    RocksDB.loadLibrary();
    final int cores = Runtime.getRuntime().availableProcessors();
    indexWriteOptions =
        new Options().setCreateIfMissing(true).prepareForBulkLoad().setIncreaseParallelism(cores);
    indexReadOptions = new Options().setIncreaseParallelism(cores);
    batchWriteOptions =
        new WriteOptions().setDisableWAL(false).setNoSlowdown(false).setSync(false);
  }
  final String directory = subDirectory + "/" + tableName;
  return indexTableCache.get(
      (IndexCacheKey) keyCache.get(
          directory,
          d -> new IndexCacheKey(d, adapterId, partition, requiresTimestamp)));
}
 
Example 3
Source File: RocksDBClient.java    From geowave with Apache License 2.0 6 votes vote down vote up
public synchronized RocksDBDataIndexTable getDataIndexTable(
    final String tableName,
    final short adapterId) {
  if (indexWriteOptions == null) {
    RocksDB.loadLibrary();
    final int cores = Runtime.getRuntime().availableProcessors();
    indexWriteOptions =
        new Options().setCreateIfMissing(true).prepareForBulkLoad().setIncreaseParallelism(cores);
    indexReadOptions = new Options().setIncreaseParallelism(cores);
    batchWriteOptions =
        new WriteOptions().setDisableWAL(false).setNoSlowdown(false).setSync(false);
  }
  final String directory = subDirectory + "/" + tableName;
  return dataIndexTableCache.get(
      (DataIndexCacheKey) keyCache.get(directory, d -> new DataIndexCacheKey(d, adapterId)));
}
 
Example 4
Source File: RocksDbKeyValueReader.java    From samza with Apache License 2.0 6 votes vote down vote up
/**
 * Construct the <code>RocksDbKeyValueReader</code> with store's name,
 * database's path and Samza's config
 *
 * @param storeName name of the RocksDb defined in the config file
 * @param dbPath path to the db directory
 * @param config Samza's config
 */
public RocksDbKeyValueReader(String storeName, String dbPath, Config config) {
  // get the key serde and value serde from the config
  StorageConfig storageConfig = new StorageConfig(config);
  SerializerConfig serializerConfig = new SerializerConfig(config);

  keySerde = getSerdeFromName(storageConfig.getStorageKeySerde(storeName).orElse(null), serializerConfig);
  valueSerde = getSerdeFromName(storageConfig.getStorageMsgSerde(storeName).orElse(null), serializerConfig);

  // get db options
  Options options = RocksDbOptionsHelper.options(config, 1, new File(dbPath), StorageEngineFactory.StoreMode.ReadWrite);

  // open the db
  RocksDB.loadLibrary();
  try {
    db = RocksDB.openReadOnly(options, dbPath);
  } catch (RocksDBException e) {
    throw new SamzaException("can not open the rocksDb in " + dbPath, e);
  }
}
 
Example 5
Source File: RocksDbDataStoreFactory.java    From biomedicus with Apache License 2.0 6 votes vote down vote up
@Override
public SuffixDataStore openSuffixDataStore(int id) {
  RocksDB.loadLibrary();
  try {
    LOGGER.info("Opening TnT suffix model: {}", id);
    RocksDB rocksDB = RocksDB.openReadOnly(dbPath.resolve(getSuffixesName(id)).toString());
    RocksDbSuffixDataStore rocksDbSuffixDataStore = new RocksDbSuffixDataStore(rocksDB);
    if (inMemory) {
      LOGGER.info("Loading TnT suffix model into memory: {}", id);
      InMemorySuffixDataStore inMemorySuffixDataStore = rocksDbSuffixDataStore.inMemory();
      LOGGER.info("Done loading TnT suffix model into memory: {}", id);
      rocksDB.close();
      return inMemorySuffixDataStore;
    }
    rocksDBS.add(rocksDB);
    return rocksDbSuffixDataStore;
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 6
Source File: DbInitConfig.java    From md_blockchain with Apache License 2.0 5 votes vote down vote up
@Bean
@ConditionalOnProperty("db.rocksDB")
public RocksDB rocksDB() {
    RocksDB.loadLibrary();

    Options options = new Options().setCreateIfMissing(true);
    try {
        return RocksDB.open(options, "./rocksDB");
    } catch (RocksDBException e) {
        e.printStackTrace();
        return null;
    }
}
 
Example 7
Source File: RocksDbDataStoreFactory.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
@Override
public SuffixDataStore createSuffixDataStore(int id) {
  RocksDB.loadLibrary();
  try (Options options = new Options().setCreateIfMissing(true).prepareForBulkLoad()) {
    Files.createDirectories(dbPath);
    RocksDB rocksDB = RocksDB.open(options, dbPath.resolve(getSuffixesName(id)).toString());
    rocksDBS.add(rocksDB);

    return new RocksDbSuffixDataStore(rocksDB);
  } catch (RocksDBException | IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 8
Source File: RocksDBNormalizerModel.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
RocksDBNormalizerModel(Path dbPath) {
  RocksDB.loadLibrary();

  try (Options options = new Options().setInfoLogLevel(InfoLogLevel.ERROR_LEVEL)) {
    db = RocksDB.openReadOnly(options, dbPath.toString());
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 9
Source File: TraceIndexExtractor.java    From act with GNU General Public License v3.0 5 votes vote down vote up
public void processScan(List<Double> targetMZs, File scanFile, File rocksDBFile)
    throws RocksDBException, ParserConfigurationException, XMLStreamException, IOException {
  LOGGER.info("Accessing scan file at %s", scanFile.getAbsolutePath());
  LCMSNetCDFParser parser = new LCMSNetCDFParser();
  Iterator<LCMSSpectrum> spectrumIterator = parser.getIterator(scanFile.getAbsolutePath());

  LOGGER.info("Opening index at %s", rocksDBFile.getAbsolutePath());
  RocksDB.loadLibrary();
  RocksDBAndHandles<COLUMN_FAMILIES> dbAndHandles = null;

  try {
    // TODO: add to existing DB instead of complaining if the DB already exists.  That'll enable one index per scan.
    dbAndHandles = DBUtil.createNewRocksDB(rocksDBFile, COLUMN_FAMILIES.values());

    // TODO: split targetMZs into batches of ~100k and extract incrementally to allow huge input sets.

    LOGGER.info("Extracting traces");
    IndexedTraces windowsTimesAndTraces = runSweepLine(targetMZs, spectrumIterator);

    LOGGER.info("Writing search targets to on-disk index");
    writeWindowsToDB(dbAndHandles, windowsTimesAndTraces.getWindows());

    LOGGER.info("Writing trace data to on-disk index");
    writeTracesToDB(dbAndHandles, windowsTimesAndTraces.getTimes(), windowsTimesAndTraces.getAllTraces());
  } finally {
    if (dbAndHandles != null) {
      dbAndHandles.getDb().close();
    }
  }

  LOGGER.info("Done");
}
 
Example 10
Source File: RocksDbStrings.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
public RocksDbStrings(Path termsPath) {
  RocksDB.loadLibrary();

  try {
    terms = RocksDB.openReadOnly(termsPath.toString());
  } catch (RocksDBException e) {
    // says "if error happens in underlying native library", can't possible hope to handle that.
    throw new RuntimeException(e);
  }
}
 
Example 11
Source File: RocksDbIdentifiers.java    From biomedicus with Apache License 2.0 5 votes vote down vote up
public RocksDbIdentifiers(Path identifiersPath) {
  RocksDB.loadLibrary();

  try (Options options = new Options().setInfoLogLevel(InfoLogLevel.ERROR_LEVEL)) {
    indices = RocksDB.openReadOnly(options, identifiersPath.toString());
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 12
Source File: RocksDBClient.java    From geowave with Apache License 2.0 5 votes vote down vote up
public synchronized RocksDBMetadataTable getMetadataTable(final MetadataType type) {
  if (metadataOptions == null) {
    RocksDB.loadLibrary();
    metadataOptions = new Options().setCreateIfMissing(true).optimizeForSmallDb();
  }
  final String directory = subDirectory + "/" + type.name();
  return metadataTableCache.get(
      keyCache.get(directory, d -> new CacheKey(d, type.equals(MetadataType.STATS))));
}
 
Example 13
Source File: JRocksDB.java    From snowblossom with Apache License 2.0 5 votes vote down vote up
public JRocksDB(Config config)
  throws Exception
{
  super(config);

  use_separate_dbs=config.getBoolean("db_separate");
  
  config.require("db_path");
  
  String path = config.get("db_path");

  base_path = new File(path);

  base_path.mkdirs();

  logger.info(String.format("Loading RocksDB with path %s", path));

  RocksDB.loadLibrary();
  sharedWriteOptions = new WriteOptions();
  sharedWriteOptions.setDisableWAL(false);
  sharedWriteOptions.setSync(false);


  // Separate DBs should only be used when you don't care about syncing between
  // the databases,  If you are fine with writes to them being preserved out of order
  // relative to each other it should be fine.
  // For example, in combined DBs if you write a to A then b to B, you will either get {}, {a}, or {a,b} 
  // on a bad shutdown.  If you use separate, you could very well get {b}.

  if (use_separate_dbs)
  {
    separate_db_map = new TreeMap<>();
  }
  else
  {
    shared_db = openRocksDB(path);
  }

}
 
Example 14
Source File: JRocksDB.java    From jelectrum with MIT License 5 votes vote down vote up
public JRocksDB(Config config, EventLog log)
  throws Exception
{
  super(config);

  this.log = log;

  config.require("rocksdb_path");

  String path = config.get("rocksdb_path");

  RocksDB.loadLibrary();
  Options options = new Options();

  options.setIncreaseParallelism(16);
  options.setCreateIfMissing(true);
  options.setAllowMmapReads(true);
  //options.setAllowMmapWrites(true);

  sharedWriteOptions = new WriteOptions();
  sharedWriteOptions.setDisableWAL(true);
  sharedWriteOptions.setSync(false);

  db = RocksDB.open(options, path);

  open();
}
 
Example 15
Source File: RocksDao.java    From fasten with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor of RocksDao (Database Access Object).
 *
 * @param dbDir Directory where RocksDB data will be stored
 * @throws RocksDBException if there is an error loading or opening RocksDB instance
 */
public RocksDao(final String dbDir) throws RocksDBException {
    RocksDB.loadLibrary();
    final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions();
    final DBOptions dbOptions = new DBOptions()
            .setCreateIfMissing(true)
            .setCreateMissingColumnFamilies(true);
    final List<ColumnFamilyDescriptor> cfDescriptors = Collections.singletonList(
            new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions));
    final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
    this.rocksDb = RocksDB.open(dbOptions, dbDir, cfDescriptors, columnFamilyHandles);
    this.defaultHandle = columnFamilyHandles.get(0);
    initKryo();
}
 
Example 16
Source File: RocksDBDataInterfaceFactory.java    From count-db with MIT License 5 votes vote down vote up
public RocksDBDataInterfaceFactory(ApplicationContext applicationContext, boolean usePatch) {
    super(applicationContext);
    this.directory = applicationContext.getProperty("data_directory");
    this.usePatch = usePatch;
    File libFile = findLibFile();
    if (libFile == null) {
        throw new RuntimeException("Could not find librocksdbjni.so");
    }
    Utils.addLibraryPath(libFile.getParentFile().getAbsolutePath());
    RocksDB.loadLibrary();
}
 
Example 17
Source File: KafkaStreamsRecorder.java    From quarkus with Apache License 2.0 4 votes vote down vote up
public void loadRocksDb() {
    RocksDB.loadLibrary();
}
 
Example 18
Source File: Builder.java    From act with GNU General Public License v3.0 4 votes vote down vote up
public static Builder makeBuilder(File indexDir) throws RocksDBException{
  RocksDB.loadLibrary();
  LOGGER.info("Creating index at %s", indexDir.getAbsolutePath());
  RocksDBAndHandles<ColumnFamilies> dbAndHandles = DBUtil.createNewRocksDB(indexDir, ColumnFamilies.values());
  return new Builder(dbAndHandles);
}
 
Example 19
Source File: TestReadThrouputRocksDB.java    From PalDB with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public void loadLibrary() {
  RocksDB.loadLibrary();
}
 
Example 20
Source File: RocksDBStateBackend.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private void ensureRocksDBIsLoaded(String tempDirectory) throws IOException {
	synchronized (RocksDBStateBackend.class) {
		if (!rocksDbInitialized) {

			final File tempDirParent = new File(tempDirectory).getAbsoluteFile();
			LOG.info("Attempting to load RocksDB native library and store it under '{}'", tempDirParent);

			Throwable lastException = null;
			for (int attempt = 1; attempt <= ROCKSDB_LIB_LOADING_ATTEMPTS; attempt++) {
				try {
					// when multiple instances of this class and RocksDB exist in different
					// class loaders, then we can see the following exception:
					// "java.lang.UnsatisfiedLinkError: Native Library /path/to/temp/dir/librocksdbjni-linux64.so
					// already loaded in another class loader"

					// to avoid that, we need to add a random element to the library file path
					// (I know, seems like an unnecessary hack, since the JVM obviously can handle multiple
					//  instances of the same JNI library being loaded in different class loaders, but
					//  apparently not when coming from the same file path, so there we go)

					final File rocksLibFolder = new File(tempDirParent, "rocksdb-lib-" + new AbstractID());

					// make sure the temp path exists
					LOG.debug("Attempting to create RocksDB native library folder {}", rocksLibFolder);
					// noinspection ResultOfMethodCallIgnored
					rocksLibFolder.mkdirs();

					// explicitly load the JNI dependency if it has not been loaded before
					NativeLibraryLoader.getInstance().loadLibrary(rocksLibFolder.getAbsolutePath());

					// this initialization here should validate that the loading succeeded
					RocksDB.loadLibrary();

					// seems to have worked
					LOG.info("Successfully loaded RocksDB native library");
					rocksDbInitialized = true;
					return;
				}
				catch (Throwable t) {
					lastException = t;
					LOG.debug("RocksDB JNI library loading attempt {} failed", attempt, t);

					// try to force RocksDB to attempt reloading the library
					try {
						resetRocksDBLoadedFlag();
					} catch (Throwable tt) {
						LOG.debug("Failed to reset 'initialized' flag in RocksDB native code loader", tt);
					}
				}
			}

			throw new IOException("Could not load the native RocksDB library", lastException);
		}
	}
}