Java Code Examples for org.rocksdb.RocksDBException

The following examples show how to use org.rocksdb.RocksDBException. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: benchmarks   Source File: RocksDb.java    License: Apache License 2.0 6 votes vote down vote up
@Override
@SuppressWarnings("PMD.CloseResource")
public void setup(final BenchmarkParams b) throws IOException {
  super.setup(b);
  wkb = new UnsafeBuffer(new byte[keySize]);
  wvb = new UnsafeBuffer(new byte[valSize]);
  loadLibrary();
  final Options options = new Options();
  options.setCreateIfMissing(true);
  options.setCompressionType(NO_COMPRESSION);
  try {
    db = open(options, tmp.getAbsolutePath());
  } catch (final RocksDBException ex) {
    throw new IOException(ex);
  }
}
 
Example 2
Source Project: hadoop-ozone   Source File: RocksDBStore.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void writeBatch(BatchOperation operation)
    throws IOException {
  List<BatchOperation.SingleOperation> operations =
      operation.getOperations();
  if (!operations.isEmpty()) {
    try (WriteBatch writeBatch = new WriteBatch()) {
      for (BatchOperation.SingleOperation opt : operations) {
        switch (opt.getOpt()) {
        case DELETE:
          writeBatch.delete(opt.getKey());
          break;
        case PUT:
          writeBatch.put(opt.getKey(), opt.getValue());
          break;
        default:
          throw new IllegalArgumentException("Invalid operation "
              + opt.getOpt());
        }
      }
      db.write(writeOptions, writeBatch);
    } catch (RocksDBException e) {
      throw toIOException("Batch write operation failed", e);
    }
  }
}
 
Example 3
Source Project: hugegraph   Source File: RocksDBPerfTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGet1KeyWithMultiValues() throws RocksDBException {

    put("person:1gname", "James");
    put("person:1gage", "19");
    put("person:1gcity", "Beijing");

    put("person:2gname", "Lisa");
    put("person:2gage", "20");
    put("person:2gcity", "Beijing");

    put("person:2all", "name=Lisa,age=20,city=Beijing");

    Session session = this.rocks.session();
    for (int i = 0; i < TIMES; i++) {
        s(session.get(TABLE, b("person:2all")));
    }
}
 
Example 4
Source Project: dremio-oss   Source File: RocksDBStore.java    License: Apache License 2.0 6 votes vote down vote up
private String stats() {
  try {
    StringBuilder sb = new StringBuilder();
    append(sb, "rocksdb.estimate-num-keys", "Estimated Number of Keys");
    append(sb, "rocksdb.estimate-live-data-size", "Estimated Live Data Size");
    append(sb, "rocksdb.total-sst-files-size", "Total SST files size");
    append(sb, "rocksdb.estimate-pending-compaction-bytes", "Pending Compaction Bytes");

    final BlobStats blobStats = metaManager.getStats();
    if (blobStats != null) {
      blobStats.append(sb);
    }
    return sb.toString();
  } catch(RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 5
Source Project: fasten   Source File: KnowledgeBase.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("resource")
public static KnowledgeBase getInstance(final String kbDir, final String kbMetadataPathname, final boolean readOnly) throws RocksDBException, ClassNotFoundException, IOException {
	final boolean metadataExists = new File(kbMetadataPathname).exists();
	final boolean kbDirExists = new File(kbDir).exists();
	if (metadataExists != kbDirExists) throw new IllegalArgumentException("Either both or none of the knowledge-base directory and metadata must exist");

	RocksDB.loadLibrary();
	final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions().setCompressionType(CompressionType.LZ4_COMPRESSION);
	final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true);
	final List<ColumnFamilyDescriptor> cfDescriptors = Arrays.asList(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOptions), new ColumnFamilyDescriptor(GID2URI, cfOptions), new ColumnFamilyDescriptor(URI2GID, cfOptions));

	final List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
	final RocksDB db = readOnly ? RocksDB.openReadOnly(dbOptions, kbDir, cfDescriptors, columnFamilyHandles) : RocksDB.open(dbOptions, kbDir, cfDescriptors, columnFamilyHandles);

	final KnowledgeBase kb;
	if (metadataExists) {
		kb = (KnowledgeBase) BinIO.loadObject(kbMetadataPathname);
		kb.readOnly = readOnly;
		kb.callGraphDB = db;
		kb.defaultHandle = columnFamilyHandles.get(0);
		kb.gid2uriFamilyHandle = columnFamilyHandles.get(1);
		kb.uri2gidFamilyHandle = columnFamilyHandles.get(2);
	} else kb = new KnowledgeBase(db, columnFamilyHandles.get(0), columnFamilyHandles.get(1), columnFamilyHandles.get(2), kbMetadataPathname, readOnly);
	return kb;
}
 
Example 6
Source Project: hugegraph   Source File: RocksDBStdSessions.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public synchronized void createTable(String table) throws RocksDBException {
    if (this.cfs.containsKey(table)) {
        return;
    }

    this.checkValid();

    // Should we use options.setCreateMissingColumnFamilies() to create CF
    ColumnFamilyDescriptor cfd = new ColumnFamilyDescriptor(encode(table));
    ColumnFamilyOptions options = cfd.getOptions();
    initOptions(this.config(), null, null, options, options);
    this.cfs.put(table, new CFHandle(this.rocksdb.createColumnFamily(cfd)));

    ingestExternalFile();
}
 
Example 7
Source Project: sofa-jraft   Source File: RocksRawKVStore.java    License: Apache License 2.0 6 votes vote down vote up
void ingestSstFiles(final EnumMap<SstColumnFamily, File> sstFileTable) {
    final Timer.Context timeCtx = getTimeContext("INGEST_SST_FILE");
    final Lock readLock = this.readWriteLock.readLock();
    readLock.lock();
    try {
        for (final Map.Entry<SstColumnFamily, File> entry : sstFileTable.entrySet()) {
            final SstColumnFamily sstColumnFamily = entry.getKey();
            final File sstFile = entry.getValue();
            final ColumnFamilyHandle columnFamilyHandle = findColumnFamilyHandle(sstColumnFamily);
            try (final IngestExternalFileOptions ingestOptions = new IngestExternalFileOptions()) {
                if (FileUtils.sizeOf(sstFile) == 0L) {
                    return;
                }
                final String filePath = sstFile.getAbsolutePath();
                LOG.info("Start ingest sst file {}.", filePath);
                this.db.ingestExternalFile(columnFamilyHandle, Collections.singletonList(filePath), ingestOptions);
            } catch (final RocksDBException e) {
                throw new StorageException("Fail to ingest sst file at path: " + sstFile, e);
            }
        }
    } finally {
        readLock.unlock();
        timeCtx.stop();
    }
}
 
Example 8
Source Project: biomedicus   Source File: RocksDBSenseVectors.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void removeWord(int index) {
  try (WriteBatch writeBatch = new WriteBatch()) {
    try (RocksIterator rocksIterator = rocksDB.newIterator()) {
      rocksIterator.seekToFirst();
      while (rocksIterator.isValid()) {
        SparseVector sparseVector = new SparseVector(rocksIterator.value());
        sparseVector.remove(index);
        writeBatch.put(rocksIterator.key(), sparseVector.toBytes());
      }
    }
    rocksDB.write(new WriteOptions(), writeBatch);
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example 9
Source Project: aion   Source File: RocksDBWrapper.java    License: MIT License 6 votes vote down vote up
@Override
public void commit() {
    check();

    if (batch != null) {
        try {
            db.write(writeOptions, batch);
        } catch (RocksDBException e) {
            LOG.error(
                    "Unable to execute batch put/update/delete operation on "
                            + this.toString()
                            + ".",
                    e);
        }
        batch.close();
        batch = null;
    }
}
 
Example 10
Source Project: jstorm   Source File: RocksDbHdfsState.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Flush the data in memtable of RocksDB into disk, and then create checkpoint
 * 
 * @param batchId
 */
@Override
public void checkpoint(long batchId) {
    long startTime = System.currentTimeMillis();
    try {
        rocksDb.flush(new FlushOptions());
        Checkpoint cp = Checkpoint.create(rocksDb);
        cp.createCheckpoint(getLocalCheckpointPath(batchId));
    } catch (RocksDBException e) {
        LOG.error("Failed to create checkpoint for batch-" + batchId, e);
        throw new RuntimeException(e.getMessage());
    }

    if (JStormMetrics.enabled)
        rocksDbFlushAndCpLatency.update(System.currentTimeMillis() - startTime);
}
 
Example 11
Source Project: kylin-on-parquet-v2   Source File: RocksDBLookupBuilder.java    License: Apache License 2.0 6 votes vote down vote up
public void build(ILookupTable srcLookupTable) {
    File dbFolder = new File(dbPath);
    if (dbFolder.exists()) {
        logger.info("remove rocksdb folder:{} to rebuild table cache:{}", dbPath, tableDesc.getIdentity());
        FileUtils.deleteQuietly(dbFolder);
    } else {
        logger.info("create new rocksdb folder:{} for table cache:{}", dbPath, tableDesc.getIdentity());
        dbFolder.mkdirs();
    }
    logger.info("start to build lookup table:{} to rocks db:{}", tableDesc.getIdentity(), dbPath);
    try (RocksDB rocksDB = RocksDB.open(options, dbPath)) {
        // todo use batch may improve write performance
        for (String[] row : srcLookupTable) {
            KV kv = encoder.encode(row);
            rocksDB.put(kv.getKey(), kv.getValue());
        }
    } catch (RocksDBException e) {
        logger.error("error when put data to rocksDB", e);
        throw new RuntimeException("error when write data to rocks db", e);
    }

    logger.info("source table:{} has been written to rocks db:{}", tableDesc.getIdentity(), dbPath);
}
 
Example 12
/**
 * Delete the record falls into [beginKeyBytes, endKeyBytes) of the db.
 *
 * @param db the target need to be clipped.
 * @param columnFamilyHandles the column family need to be clipped.
 * @param beginKeyBytes the begin key bytes
 * @param endKeyBytes the end key bytes
 */
private static void deleteRange(
	RocksDB db,
	List<ColumnFamilyHandle> columnFamilyHandles,
	byte[] beginKeyBytes,
	byte[] endKeyBytes) throws RocksDBException {

	for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) {
		try (RocksIteratorWrapper iteratorWrapper = RocksDBOperationUtils.getRocksIterator(db, columnFamilyHandle);
			RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(db)) {

			iteratorWrapper.seek(beginKeyBytes);

			while (iteratorWrapper.isValid()) {
				final byte[] currentKey = iteratorWrapper.key();
				if (beforeThePrefixBytes(currentKey, endKeyBytes)) {
					writeBatchWrapper.remove(columnFamilyHandle, currentKey);
				} else {
					break;
				}
				iteratorWrapper.next();
			}
		}
	}
}
 
Example 13
Source Project: hudi   Source File: RocksDBDAO.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Helper to load managed column family descriptors.
 */
private List<ColumnFamilyDescriptor> loadManagedColumnFamilies(DBOptions dbOptions) throws RocksDBException {
  final List<ColumnFamilyDescriptor> managedColumnFamilies = new ArrayList<>();
  final Options options = new Options(dbOptions, new ColumnFamilyOptions());
  List<byte[]> existing = RocksDB.listColumnFamilies(options, rocksDBBasePath);

  if (existing.isEmpty()) {
    LOG.info("No column family found. Loading default");
    managedColumnFamilies.add(getColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
  } else {
    LOG.info("Loading column families :" + existing.stream().map(String::new).collect(Collectors.toList()));
    managedColumnFamilies
        .addAll(existing.stream().map(RocksDBDAO::getColumnFamilyDescriptor).collect(Collectors.toList()));
  }
  return managedColumnFamilies;
}
 
Example 14
Source Project: fasten   Source File: RocksDaoTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void databaseTest5() throws IOException, RocksDBException {
    var json = new JSONObject("{" +
            "\"index\": 2," +
            "\"product\": \"test\"," +
            "\"version\": \"0.0.1\"," +
            "\"nodes\": [9223372036854775804, 9223372036854775805, 9223372036854775806, 9223372036854775807]," +
            "\"numInternalNodes\": 3," +
            "\"edges\": [[9223372036854775804, 9223372036854775805], [9223372036854775804, 9223372036854775807], [9223372036854775805, 9223372036854775806], [9223372036854775806, 9223372036854775807]]" +
            "}");
    var graph = GidGraph.getGraph(json);
    rocksDao.saveToRocksDb(graph.getIndex(), graph.getNodes(), graph.getNumInternalNodes(), graph.getEdges());
    var graphData = rocksDao.getGraphData(graph.getIndex());
    assertEquals(graph.getNumInternalNodes(), graphData.nodes().size() - graphData.externalNodes().size());
    assertEquals(graph.getNodes().size(), graphData.nodes().size());
    assertEquals(new LongOpenHashSet(graph.getNodes()), graphData.nodes());
    assertEquals(new LongArrayList(List.of(9223372036854775805L, 9223372036854775807L)), graphData.successors(9223372036854775804L));
    assertEquals(new LongArrayList(List.of(9223372036854775806L)), graphData.successors(9223372036854775805L));
    assertEquals(new LongArrayList(List.of(9223372036854775807L)), graphData.successors(9223372036854775806L));
    assertEquals(new LongArrayList(), graphData.predecessors(9223372036854775804L));
    assertEquals(new LongArrayList(List.of(9223372036854775804L)), graphData.predecessors(9223372036854775805L));
    assertEquals(new LongArrayList(List.of(9223372036854775805L)), graphData.predecessors(9223372036854775806L));
    assertEquals(new LongArrayList(List.of(9223372036854775804L, 9223372036854775806L)), graphData.predecessors(9223372036854775807L));
    assertEquals(graph.getEdges().size(), graphData.numArcs());
    assertEquals(new LongOpenHashSet(List.of(9223372036854775807L)), graphData.externalNodes());
}
 
Example 15
Source Project: iot-mqtt   Source File: RDB.java    License: Apache License 2.0 5 votes vote down vote up
public boolean put(final ColumnFamilyHandle cfh,final WriteOptions writeOptions,final byte[] key,final byte[] value){
    try {
        this.DB.put(cfh, writeOptions, key, value);
        log.debug("[RocksDB] -> success put value");
    } catch (RocksDBException e) {
        log.error("[RocksDB] -> error while put, columnFamilyHandle:{}, key:{}, err:{}",
                cfh.isOwningHandle(), new String(key), e.getMessage(), e);
        return false;
}
    return true;
}
 
Example 16
Source Project: samza   Source File: RocksDbReadingTool.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws RocksDBException {
  RocksDbReadingTool tool = new RocksDbReadingTool();
  OptionSet options = tool.parser().parse(args);
  Config config = tool.loadConfig(options);
  String path = tool.getDbPath();
  String dbName = tool.getDbName();
  RocksDbKeyValueReader kvReader = new RocksDbKeyValueReader(dbName, path, config);

  for (Object obj : tool.getKeys()) {
    Object result = kvReader.get(obj);
    tool.outputResult(obj, result);
  }

  kvReader.stop();
}
 
Example 17
Source Project: act   Source File: Builder.java    License: GNU General Public License v3.0 5 votes vote down vote up
public void processScan(List<Double> targetMZs, File scanFile)
    throws RocksDBException, ParserConfigurationException, XMLStreamException, IOException {
  DateTime start = DateTime.now();
  LOGGER.info("Accessing scan file at %s", scanFile.getAbsolutePath());
  LCMSNetCDFParser parser = new LCMSNetCDFParser();
  Iterator<LCMSSpectrum> spectrumIterator = parser.getIterator(scanFile.getAbsolutePath());

  WriteOptions writeOptions = new WriteOptions();
  /* The write-ahead log and disk synchronization features are useful when we need our writes to be durable (i.e. to
   * survive a crash).  However, our index construction is effectively a one-shot deal: if it doesn't succeed, we'll
   * just start from scratch.  Since we don't care about durability while we're constructing the index, the WAL and
   * sync features eat a lot of disk space and I/O bandwidth, which slows us down.  So long as we cleanly close the
   * index once it's built, nobody has to know that we disabled these features. */
  writeOptions.setDisableWAL(true);
  writeOptions.setSync(false);
  dbAndHandles.setWriteOptions(writeOptions);

  // TODO: split targetMZs into batches of ~100k and extract incrementally to allow huge input sets.

  LOGGER.info("Extracting traces");
  List<MZWindow> windows = targetsToWindows(targetMZs);
  extractTriples(spectrumIterator, windows);

  LOGGER.info("Writing search targets to on-disk index");
  writeWindowsToDB(windows);

  DateTime end = DateTime.now();
  LOGGER.info("Index construction completed in %dms", end.getMillis() - start.getMillis());
}
 
Example 18
Source Project: dremio-oss   Source File: TestByteStoreManager.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testNoDBOpenRetry() throws Exception {
  String dbPath = temporaryFolder.newFolder().getAbsolutePath();

  try (ByteStoreManager bsm = new ByteStoreManager(dbPath, false)) {
    bsm.start();
    ByteStoreManager bsm2 = new ByteStoreManager(dbPath, false, true);
    bsm2.start();

    fail("ByteStoreManager shouldn't have been able to open a locked instance");
  } catch (RocksDBException e) {
    assertTrue("RocksDBException isn't IOError type", Status.Code.IOError.equals(e.getStatus().getCode()));
    assertTrue("Incorrect error message", e.getStatus().getState().contains("While lock"));
  }
}
 
Example 19
Source Project: hugegraph   Source File: RocksDBStdSessions.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Add a KV record to a table
 */
@Override
public void put(String table, byte[] key, byte[] value) {
    try (CFHandle cf = cf(table)) {
        this.batch.put(cf.get(), key, value);
    } catch (RocksDBException e) {
        throw new BackendException(e);
    }
}
 
Example 20
Source Project: hadoop-ozone   Source File: RocksDBStore.java    License: Apache License 2.0 5 votes vote down vote up
public RocksDBStore(File dbFile, Options options) throws IOException {
  Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
  RocksDB.loadLibrary();
  dbOptions = options;
  dbLocation = dbFile;
  writeOptions = new WriteOptions();
  try {
    db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
    if (dbOptions.statistics() != null) {
      Map<String, String> jmxProperties = new HashMap<String, String>();
      jmxProperties.put("dbName", dbFile.getName());
      statMBeanName = HddsUtils.registerWithJmxProperties(
          "Ozone", "RocksDbStore", jmxProperties,
          RocksDBStoreMBean.create(dbOptions.statistics(), dbFile.getName()));
      if (statMBeanName == null) {
        LOG.warn("jmx registration failed during RocksDB init, db path :{}",
            dbFile.getAbsolutePath());
      }
    }
  } catch (RocksDBException e) {
    String msg = "Failed init RocksDB, db path : " + dbFile.getAbsolutePath()
        + ", " + "exception :" + (e.getCause() == null ?
        e.getClass().getCanonicalName() + " " + e.getMessage() :
        e.getCause().getClass().getCanonicalName() + " " +
            e.getCause().getMessage());
    throw new IOException(msg, e);
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("RocksDB successfully opened.");
    LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
    LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
    LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
    LOG.debug("[Option] compressionType= {}", options.compressionType());
    LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
    LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
  }
}
 
Example 21
Source Project: jstorm   Source File: RocksDbUnitTest.java    License: Apache License 2.0 5 votes vote down vote up
private static void rocksDbTest(RocksDB db, List<ColumnFamilyHandle> handlers) {
    try {
        ColumnFamilyHandle handler1 = null;
        ColumnFamilyHandle handler2 = null;
        if (handlers.size() > 0) {
            // skip default column family
            handler1 = handlers.get(1);
            handler2 = handlers.get(2);
        } else {
            handler1 = db.createColumnFamily(new ColumnFamilyDescriptor("test1".getBytes()));
            handler2 = db.createColumnFamily(new ColumnFamilyDescriptor("test2".getBytes()));
        }
        int startValue1 = getStartValue(db, handler1);
        int startValue2 = getStartValue(db, handler2);;

        Checkpoint cp = Checkpoint.create(db);
   
        if (isCompaction) {
            db.compactRange();
            LOG.info("Compaction!");
        }

        long flushWaitTime = System.currentTimeMillis() + flushInterval;
        for (int i = 0; i < putNum || putNum == -1; i++) {
            db.put(handler1, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue1 + i).getBytes());
            db.put(handler2, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue2 + i).getBytes());
            if (isFlush && flushWaitTime <= System.currentTimeMillis()) {
                db.flush(new FlushOptions());
                if (isCheckpoint) {
                    cp.createCheckpoint(cpPath + "/" + i);
                }
                flushWaitTime = System.currentTimeMillis() + flushInterval;
            }
        }
    } catch (RocksDBException e) {
        LOG.error("Failed to put or flush", e);
    }
}
 
Example 22
Source Project: aion   Source File: RocksDBWrapper.java    License: MIT License 5 votes vote down vote up
@Override
public void deleteBatchInternal(Collection<byte[]> keys) {
    try (WriteBatch batch = new WriteBatch()) {
        // add delete operations to batch
        for (byte[] key : keys) {
            batch.delete(key);
        }

        // bulk atomic update
        db.write(writeOptions, batch);
    } catch (RocksDBException e) {
        LOG.error("Unable to execute batch delete operation on " + this.toString() + ".", e);
    }
}
 
Example 23
private void addToRocksDB(@Nonnull byte[] toAddBytes) {
	try {
		batchWrapper.put(columnFamilyHandle, toAddBytes, DUMMY_BYTES);
	} catch (RocksDBException e) {
		throw new FlinkRuntimeException(e);
	}
}
 
Example 24
Source Project: hadoop-ozone   Source File: RDBBatchOperation.java    License: Apache License 2.0 5 votes vote down vote up
public void delete(ColumnFamilyHandle handle, byte[] key) throws IOException {
  try {
    writeBatch.delete(handle, key);
  } catch (RocksDBException e) {
    throw new IOException("Can't record batch delete operation.", e);
  }
}
 
Example 25
Source Project: Flink-CEPplus   Source File: RocksDBWriteBatchWrapper.java    License: Apache License 2.0 5 votes vote down vote up
public void flush() throws RocksDBException {
	if (options != null) {
		db.write(options, batch);
	} else {
		// use the default WriteOptions, if wasn't provided.
		try (WriteOptions writeOptions = new WriteOptions()) {
			db.write(writeOptions, batch);
		}
	}
	batch.clear();
}
 
Example 26
Source Project: hadoop-ozone   Source File: RDBStore.java    License: Apache License 2.0 5 votes vote down vote up
public static IOException toIOException(String msg, RocksDBException e) {
  String statusCode = e.getStatus() == null ? "N/A" :
      e.getStatus().getCodeString();
  String errMessage = e.getMessage() == null ? "Unknown error" :
      e.getMessage();
  String output = msg + "; status : " + statusCode
      + "; message : " + errMessage;
  return new IOException(output, e);
}
 
Example 27
Source Project: outbackcdx   Source File: Index.java    License: Apache License 2.0 5 votes vote down vote up
public String resolveAlias(String surt) {
    try {
        byte[] resolved = db.get(aliasCF, surt.getBytes(StandardCharsets.US_ASCII));
        if (resolved != null) {
            return new String(resolved, StandardCharsets.US_ASCII);
        } else {
            return surt;
        }
    } catch (RocksDBException e) {
        throw new RuntimeException(e);
    }
}
 
Example 28
Source Project: jelectrum   Source File: RocksDBMapMutationSet.java    License: MIT License 5 votes vote down vote up
public void remove(ByteString key, ByteString value)
{
try
{
  	ByteString w = getDBKey(key, value);
  	db.remove(jdb.getWriteOption(), w.toByteArray());

}
  catch(RocksDBException e)
  {
    throw new RuntimeException(e);
  }
}
 
Example 29
Source Project: Lealone-Plugins   Source File: RocksdbMap.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public V get(K key) {
    byte[] value;
    try {
        value = db.get(k(key));
    } catch (RocksDBException e) {
        throw ioe(e, "Failed to get " + key);
    }
    return v(value);
}
 
Example 30
Source Project: DDMQ   Source File: RDB.java    License: Apache License 2.0 5 votes vote down vote up
public static boolean delete(final ColumnFamilyHandle cfh, final byte[] key) {
    try {
        DB.delete(cfh, key);
        LOGGER.debug("succ delete key, columnFamilyHandle:{}, key:{}", cfh.toString(), new String(key));
    } catch (RocksDBException e) {
        LOGGER.error("error while delete key, columnFamilyHandle:{}, key:{}, err:{}",
                cfh.toString(), new String(key), e.getMessage(), e);
    }
    return true;
}