org.rocksdb.WriteBatch Java Examples

The following examples show how to use org.rocksdb.WriteBatch. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RocksDBSenseVectors.java    From biomedicus with Apache License 2.0 6 votes vote down vote up
@Override
public void removeWord(int index) {
  try (WriteBatch writeBatch = new WriteBatch()) {
    try (RocksIterator rocksIterator = rocksDB.newIterator()) {
      rocksIterator.seekToFirst();
      while (rocksIterator.isValid()) {
        SparseVector sparseVector = new SparseVector(rocksIterator.value());
        sparseVector.remove(index);
        writeBatch.put(rocksIterator.key(), sparseVector.toBytes());
      }
    }
    rocksDB.write(new WriteOptions(), writeBatch);
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example #2
Source File: RocksDBSenseVectors.java    From biomedicus with Apache License 2.0 6 votes vote down vote up
@Override
public void removeWords(Collection<Integer> indexes) {
  try (WriteBatch writeBatch = new WriteBatch()) {
    try (RocksIterator rocksIterator = rocksDB.newIterator()) {
      rocksIterator.seekToFirst();
      while (rocksIterator.isValid()) {
        SparseVector sparseVector = new SparseVector(rocksIterator.value());
        sparseVector.removeAll(indexes);
        writeBatch.put(rocksIterator.key(), sparseVector.toBytes());
      }
    }
    rocksDB.write(new WriteOptions(), writeBatch);
  } catch (RocksDBException e) {
    throw new RuntimeException(e);
  }
}
 
Example #3
Source File: RDB.java    From iot-mqtt with Apache License 2.0 6 votes vote down vote up
public boolean deleteByPrefix(final ColumnFamilyHandle cfh,final byte[] prefixKey,boolean sync){
    try{
        RocksIterator iterator = this.newIterator(cfh);
        int item = 0;
        WriteBatch writeBatch = new WriteBatch();
        for(iterator.seek(prefixKey);iterator.isValid();iterator.next()){
        	if(new String(iterator.key()).startsWith(new String(prefixKey))) {
        		writeBatch.delete(cfh,iterator.key());
                item++;
        	}
        }
        if(item > 0){
            this.DB.write(sync?WRITE_OPTIONS_SYNC:WRITE_OPTIONS_ASYNC,writeBatch);
        }
        log.debug("[RocksDB] -> succ while delete by prefix,columnFamilyHandle:{}, prefixKey:{}, nums:{}",cfh.toString(),new String(prefixKey),item);
    }catch(RocksDBException e){
        log.error("[RocksDB] ->  error while delete by prefix, columnFamilyHandle:{}, prefixKey:{}, err:{}",
                cfh.toString(), new String(prefixKey), e);
        return false;
    }
    return true;
}
 
Example #4
Source File: RocksRawKVStore.java    From sofa-jraft with Apache License 2.0 6 votes vote down vote up
@Override
public void put(final List<KVEntry> entries, final KVStoreClosure closure) {
    final Timer.Context timeCtx = getTimeContext("PUT_LIST");
    final Lock readLock = this.readWriteLock.readLock();
    readLock.lock();
    try (final WriteBatch batch = new WriteBatch()) {
        for (final KVEntry entry : entries) {
            batch.put(entry.getKey(), entry.getValue());
        }
        this.db.write(this.writeOptions, batch);
        setSuccess(closure, Boolean.TRUE);
    } catch (final Exception e) {
        LOG.error("Failed to [PUT_LIST], [size = {}], {}.", entries.size(), StackTraceUtil.stackTrace(e));
        setCriticalError(closure, "Fail to [PUT_LIST]", e);
    } finally {
        readLock.unlock();
        timeCtx.stop();
    }
}
 
Example #5
Source File: RocksRawKVStore.java    From sofa-jraft with Apache License 2.0 6 votes vote down vote up
@Override
public void delete(final List<byte[]> keys, final KVStoreClosure closure) {
    final Timer.Context timeCtx = getTimeContext("DELETE_LIST");
    final Lock readLock = this.readWriteLock.readLock();
    readLock.lock();
    try (final WriteBatch batch = new WriteBatch()) {
        for (final byte[] key : keys) {
            batch.delete(key);
        }
        this.db.write(this.writeOptions, batch);
        setSuccess(closure, Boolean.TRUE);
    } catch (final Exception e) {
        LOG.error("Failed to [DELETE_LIST], [size = {}], {}.", keys.size(), StackTraceUtil.stackTrace(e));
        setCriticalError(closure, "Fail to [DELETE_LIST]", e);
    } finally {
        readLock.unlock();
        timeCtx.stop();
    }
}
 
Example #6
Source File: RDB.java    From iot-mqtt with Apache License 2.0 6 votes vote down vote up
public List<byte[]> pollByPrefix(final ColumnFamilyHandle cfh,final byte[] prefixKey,int nums){
    List<byte[]> values = new ArrayList<>();
    int count = 0;
    try{
        RocksIterator iterator = this.newIterator(cfh);
        WriteBatch writeBatch = new WriteBatch();
        for(iterator.seek(prefixKey);iterator.isValid();iterator.next()){
        	if(new String(iterator.key()).startsWith(new String(prefixKey))) {
        		values.add(iterator.value());
        		writeBatch.delete(cfh,iterator.key());
        		count++;
        	}
        	if(count>=nums) {
        		break;
        	}
        }
        if(count > 0){
            this.DB.write(WRITE_OPTIONS_SYNC,writeBatch);
        }
        log.debug("[RocksDB] -> succ while get by prefix,columnFamilyHandle:{}, pollByPrefix:{}",cfh.toString(),new String(prefixKey));
    }catch(Exception e){
        log.error("[RocksDB] ->  error while get by prefix, columnFamilyHandle:{}, pollByPrefix:{}, err:{}",
                cfh.toString(), new String(prefixKey), e);
    }
    return values;
}
 
Example #7
Source File: RocksDbDataSourceImpl.java    From gsc-core with GNU Lesser General Public License v3.0 6 votes vote down vote up
private void updateByBatchInner(Map<byte[], byte[]> rows, WriteOptions options)
        throws Exception {
    if (quitIfNotAlive()) {
        return;
    }
    try (WriteBatch batch = new WriteBatch()) {
        for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) {
            if (entry.getValue() == null) {
                batch.delete(entry.getKey());
            } else {
                batch.put(entry.getKey(), entry.getValue());
            }
        }
        database.write(new WriteOptions(), batch);
    }
}
 
Example #8
Source File: RocksDBMapMutationSet.java    From snowblossom with Apache License 2.0 6 votes vote down vote up
@Override
public void addAll(TreeMultimap<ByteString, ByteString> map)
{
  try(WriteBatch batch = new WriteBatch())
  {
    byte b[]=new byte[0];

    for(Map.Entry<ByteString, ByteString> me : map.entries())
    {
      ByteString w = getDBKey(me.getKey(), me.getValue());
      batch.put(w.toByteArray(), b);
    }

    db.write(jdb.getWriteOption(), batch);
  }
  catch(RocksDBException e)
  {
    throw new RuntimeException(e);
  }

}
 
Example #9
Source File: RocksDBBlockHeaderStorage.java    From WeCross with Apache License 2.0 6 votes vote down vote up
@Override
public void writeBlockHeader(long blockNumber, byte[] blockHeader) {
    if (dbClosed == true) {
        logger.warn("Write RocksDB error: RocksDB has been closed");
        return;
    }

    String key = blockKeyPrefix + String.valueOf(blockNumber);

    try {
        WriteBatch writeBatch = new WriteBatch();
        writeBatch.put(numberKey.getBytes(), String.valueOf(blockNumber).getBytes());
        writeBatch.put(key.getBytes(), blockHeader);

        WriteOptions writeOptions = new WriteOptions();

        rocksDB.write(writeOptions, writeBatch);
        onBlockHeader(blockNumber, blockHeader);
    } catch (RocksDBException e) {
        logger.error("RocksDB write error", e);
    }
}
 
Example #10
Source File: RocksDBStore.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Override
public void writeBatch(BatchOperation operation)
    throws IOException {
  List<BatchOperation.SingleOperation> operations =
      operation.getOperations();
  if (!operations.isEmpty()) {
    try (WriteBatch writeBatch = new WriteBatch()) {
      for (BatchOperation.SingleOperation opt : operations) {
        switch (opt.getOpt()) {
        case DELETE:
          writeBatch.delete(opt.getKey());
          break;
        case PUT:
          writeBatch.put(opt.getKey(), opt.getValue());
          break;
        default:
          throw new IllegalArgumentException("Invalid operation "
              + opt.getOpt());
        }
      }
      db.write(writeOptions, writeBatch);
    } catch (RocksDBException e) {
      throw toIOException("Batch write operation failed", e);
    }
  }
}
 
Example #11
Source File: RocksDbDataSourceImpl.java    From gsc-core with GNU Lesser General Public License v3.0 5 votes vote down vote up
private void updateByBatchInner(Map<byte[], byte[]> rows) throws Exception {
    if (quitIfNotAlive()) {
        return;
    }
    try (WriteBatch batch = new WriteBatch()) {
        for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) {
            if (entry.getValue() == null) {
                batch.delete(entry.getKey());
            } else {
                batch.put(entry.getKey(), entry.getValue());
            }
        }
        database.write(new WriteOptions(), batch);
    }
}
 
Example #12
Source File: TimerStore.java    From KitDB with Apache License 2.0 5 votes vote down vote up
public static List<TData> rangeDel(DB db, String head, int start, int end, int limit) throws RocksDBException {
    List<TData> entries = new ArrayList<>();
    List<byte[]> dels = new ArrayList<>();

    try (final RocksIterator iterator = db.newIterator(SstColumnFamily.DEFAULT)) {
        iterator.seek(ArrayKits.addAll(getHead(head), ArrayKits.intToBytes(start)));
        long index = 0;
        int count = 0;
        while (iterator.isValid() && index <= end && count < limit) {
            byte[] key_bs = iterator.key();
            if (!BytesUtil.checkHead(getHead(head), key_bs)) break;
            TData tData = new TData();
            tData.setTime(ArrayKits.bytesToInt(ArrayKits.sub(key_bs, 3, 7), 0));
            tData.setValue(ArrayKits.sub(key_bs, 7, key_bs.length));
            index = tData.getTime();
            if (index > end) {
                break;
            }
            entries.add(tData);
            dels.add(key_bs);
            count++;
            iterator.next();
        }
    }

    if (dels.size() == 0) {
        return entries;
    }
    try (final WriteBatch batch = new WriteBatch()) {
        for (byte[] del : dels) {
            batch.delete(db.defHandle, del);
        }
        db.rocksDB().write(db.writeOptions, batch);
    } catch (Exception e) {
        throw e;
    }

    return entries;

}
 
Example #13
Source File: RocksDBDAO.java    From hudi with Apache License 2.0 5 votes vote down vote up
/**
 * Helper to add delete operation in batch.
 *
 * @param batch Batch Handle
 * @param columnFamilyName Column Family
 * @param key Key
 */
public <K extends Serializable> void deleteInBatch(WriteBatch batch, String columnFamilyName, K key) {
  try {
    batch.delete(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
  } catch (Exception e) {
    throw new HoodieException(e);
  }
}
 
Example #14
Source File: RocksDBDAO.java    From hudi with Apache License 2.0 5 votes vote down vote up
/**
 * Helper to add delete operation in batch.
 *
 * @param batch Batch Handle
 * @param columnFamilyName Column Family
 * @param key Key
 */
public void deleteInBatch(WriteBatch batch, String columnFamilyName, String key) {
  try {
    batch.delete(managedHandlesMap.get(columnFamilyName), key.getBytes());
  } catch (RocksDBException e) {
    throw new HoodieException(e);
  }
}
 
Example #15
Source File: RocksDBCache.java    From kcache with Apache License 2.0 5 votes vote down vote up
@Override
public void prepareBatch(final Map<byte[], byte[]> entries,
                         final WriteBatch batch) throws RocksDBException {
    for (final Map.Entry<byte[], byte[]> entry : entries.entrySet()) {
        Objects.requireNonNull(entry.getKey(), "key cannot be null");
        addToBatch(entry.getKey(), entry.getValue(), batch);
    }
}
 
Example #16
Source File: RocksDBCache.java    From kcache with Apache License 2.0 5 votes vote down vote up
@Override
public void addToBatch(final byte[] key,
                       final byte[] value,
                       final WriteBatch batch) throws RocksDBException {
    if (value == null) {
        batch.delete(columnFamily, key);
    } else {
        batch.put(columnFamily, key, value);
    }
}
 
Example #17
Source File: RocksDBStdSessions.java    From hugegraph with Apache License 2.0 5 votes vote down vote up
public StdSession(HugeConfig conf) {
    boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE);
    this.batch = new WriteBatch();
    this.writeOptions = new WriteOptions();
    this.writeOptions.setDisableWAL(bulkload);
    //this.writeOptions.setSync(false);
}
 
Example #18
Source File: RocksDBDAO.java    From hudi with Apache License 2.0 5 votes vote down vote up
/**
 * Helper to add put operation in batch.
 *
 * @param batch Batch Handle
 * @param columnFamilyName Column Family
 * @param key Key
 * @param value Payload
 * @param <T> Type of payload
 */
public <K extends Serializable, T extends Serializable> void putInBatch(WriteBatch batch, String columnFamilyName,
    K key, T value) {
  try {
    byte[] keyBytes = SerializationUtils.serialize(key);
    byte[] payload = SerializationUtils.serialize(value);
    batch.put(managedHandlesMap.get(columnFamilyName), keyBytes, payload);
  } catch (Exception e) {
    throw new HoodieException(e);
  }
}
 
Example #19
Source File: RocksDBDAO.java    From hudi with Apache License 2.0 5 votes vote down vote up
/**
 * Perform a batch write operation.
 */
public void writeBatch(BatchHandler handler) {
  try (WriteBatch batch = new WriteBatch()) {
    handler.apply(batch);
    getRocksDB().write(new WriteOptions(), batch);
  } catch (RocksDBException re) {
    throw new HoodieException(re);
  }
}
 
Example #20
Source File: RDB.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
private static boolean write(final WriteOptions writeOptions, final WriteBatch writeBatch) {
    try {
        DB.write(writeOptions, writeBatch);
        LOGGER.debug("succ write writeBatch, size:{}", writeBatch.count());
    } catch (RocksDBException e) {
        // TODO: 2017/11/8 上报写入失败
        LOGGER.error("error while write batch, err:{}", e.getMessage(), e);
        return false;
    }
    return true;
}
 
Example #21
Source File: RocksDBBatchOperation.java    From nuls-v2 with MIT License 5 votes vote down vote up
RocksDBBatchOperation(String table) {
    this.table = table;
    db = RocksDBManager.getTable(table);
    if (db != null) {
        batch = new WriteBatch();
    }
}
 
Example #22
Source File: RocksDBWrapper.java    From aion with MIT License 5 votes vote down vote up
@Override
public void deleteBatchInternal(Collection<byte[]> keys) {
    try (WriteBatch batch = new WriteBatch()) {
        // add delete operations to batch
        for (byte[] key : keys) {
            batch.delete(key);
        }

        // bulk atomic update
        db.write(writeOptions, batch);
    } catch (RocksDBException e) {
        LOG.error("Unable to execute batch delete operation on " + this.toString() + ".", e);
    }
}
 
Example #23
Source File: RDB.java    From iot-mqtt with Apache License 2.0 5 votes vote down vote up
public boolean write(final WriteOptions writeOptions,final WriteBatch writeBatch){
    try {
        this.DB.write(writeOptions,writeBatch);
        log.debug("[RocksDB] -> success write writeBatch, size:{}", writeBatch.count());
    } catch (RocksDBException e) {
        log.error("[RocksDB] -> error while write batch, err:{}", e.getMessage(), e);
        return false;
    }
    return true;
}
 
Example #24
Source File: ChangePollingThread.java    From outbackcdx with Apache License 2.0 5 votes vote down vote up
private void commitWriteBatch(Index index, long sequenceNumber, String writeBatch) throws RocksDBException {
    Base64.Decoder decoder = Base64.getDecoder();
    byte[] decodedWriteBatch = decoder.decode(writeBatch);
    try (WriteBatch batch = new WriteBatch(decodedWriteBatch)){
        batch.put(SEQ_NUM_KEY, String.valueOf(sequenceNumber).getBytes("ASCII"));
        index.commitBatch(batch);
    } catch (UnsupportedEncodingException e){
        throw new RuntimeException(e); // ASCII is everywhere; this shouldn't happen.
    }
}
 
Example #25
Source File: RDB.java    From DDMQ with Apache License 2.0 5 votes vote down vote up
private static boolean write(final WriteOptions writeOptions, final WriteBatch writeBatch) {
    try {
        DB.write(writeOptions, writeBatch);
        LOGGER.debug("succ write writeBatch, size:{}", writeBatch.count());
    } catch (RocksDBException e) {
        // TODO: 2017/11/8 上报写入失败
        LOGGER.error("error while write batch, err:{}", e.getMessage(), e);
        return false;
    }
    return true;
}
 
Example #26
Source File: Index.java    From outbackcdx with Apache License 2.0 5 votes vote down vote up
private void updateExistingRecordsWithNewAliases() throws IOException {
    try (WriteBatch wb = new WriteBatch()) {
        for (Map.Entry<String, String> entry : newAliases.entrySet()) {
            updateExistingRecordsWithNewAlias(wb, entry.getKey(), entry.getValue());
        }
        try {
            commitBatch(wb);
        } catch (RocksDBException e) {
            throw new RuntimeException(e);
        }
    }
}
 
Example #27
Source File: RocksDBWriteBatchWrapper.java    From flink with Apache License 2.0 5 votes vote down vote up
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) {
	Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY,
		"capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY);

	this.db = rocksDB;
	this.options = options;
	this.capacity = capacity;
	this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES);
}
 
Example #28
Source File: RocksDBWriteBatchWrapper.java    From flink with Apache License 2.0 5 votes vote down vote up
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity, long batchSize) {
	Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY,
		"capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY);
	Preconditions.checkArgument(batchSize >= 0, "Max batch size have to be no negative.");

	this.db = rocksDB;
	this.options = options;
	this.capacity = capacity;
	this.batchSize = batchSize;
	if (this.batchSize > 0) {
		this.batch = new WriteBatch((int) Math.min(this.batchSize, this.capacity * PER_RECORD_BYTES));
	} else {
		this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES);
	}
}
 
Example #29
Source File: AbstractRocksDBTable.java    From geowave with Apache License 2.0 5 votes vote down vote up
private BatchWriter(
    final WriteBatch dataToWrite,
    final RocksDB db,
    final WriteOptions options,
    final Semaphore writeSemaphore) {
  super();
  this.dataToWrite = dataToWrite;
  this.db = db;
  this.options = options;
  this.writeSemaphore = writeSemaphore;
}
 
Example #30
Source File: EzRocksDbBatch.java    From ezdb with Apache License 2.0 5 votes vote down vote up
public EzRocksDbBatch(RocksDB db, Serde<H> hashKeySerde, Serde<R> rangeKeySerde,
		Serde<V> valueSerde) {
	this.writeOptions = new WriteOptions();
	this.db = db;
	this.writeBatch = new WriteBatch();
	this.hashKeySerde = hashKeySerde;
	this.rangeKeySerde = rangeKeySerde;
	this.valueSerde = valueSerde;
}