Java Code Examples for org.iq80.leveldb.WriteBatch#put()

The following examples show how to use org.iq80.leveldb.WriteBatch#put() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NMLeveldbStateStoreService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void finishResourceLocalization(String user, ApplicationId appId,
    LocalizedResourceProto proto) throws IOException {
  String localPath = proto.getLocalPath();
  String startedKey = getResourceStartedKey(user, appId, localPath);
  String completedKey = getResourceCompletedKey(user, appId, localPath);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing localized resource to " + completedKey);
  }
  try {
    WriteBatch batch = db.createWriteBatch();
    try {
      batch.delete(bytes(startedKey));
      batch.put(bytes(completedKey), proto.toByteArray());
      db.write(batch);
    } finally {
      batch.close();
    }
  } catch (DBException e) {
    throw new IOException(e);
  }
}
 
Example 2
Source File: NMLeveldbStateStoreService.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void finishResourceLocalization(String user, ApplicationId appId,
    LocalizedResourceProto proto) throws IOException {
  String localPath = proto.getLocalPath();
  String startedKey = getResourceStartedKey(user, appId, localPath);
  String completedKey = getResourceCompletedKey(user, appId, localPath);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing localized resource to " + completedKey);
  }
  try {
    WriteBatch batch = db.createWriteBatch();
    try {
      batch.delete(bytes(startedKey));
      batch.put(bytes(completedKey), proto.toByteArray());
      db.write(batch);
    } finally {
      batch.close();
    }
  } catch (DBException e) {
    throw new IOException(e);
  }
}
 
Example 3
Source File: LevelDbUtil.java    From mcg-helper with Apache License 2.0 5 votes vote down vote up
public static void batchPut() throws IOException {
    WriteBatch writeBatch = db.createWriteBatch();
    writeBatch.put("key-03".getBytes(Constants.CHARSET),"value-03".getBytes(Constants.CHARSET));
    //writeBatch.delete("key-01".getBytes(charset));
    db.write(writeBatch);
    writeBatch.close();        
}
 
Example 4
Source File: LevelDBStorage.java    From greycat with Apache License 2.0 5 votes vote down vote up
@Override
public final void putSilent(Buffer stream, Callback<Buffer> callback) {
    if (!isConnected) {
        throw new RuntimeException(_connectedError);
    }
    try {
        Buffer result = graph.newBuffer();
        WriteBatch batch = db.createWriteBatch();
        BufferIterator it = stream.iterator();
        boolean isFirst = true;
        while (it.hasNext()) {
            Buffer keyView = it.next();
            Buffer valueView = it.next();
            if (valueView != null) {
                batch.put(keyView.data(), valueView.data());
            }
            if (isFirst) {
                isFirst = false;
            } else {
                result.write(Constants.KEY_SEP);
            }
            result.writeAll(keyView.data());
            result.write(Constants.KEY_SEP);
            Base64.encodeLongToBuffer(HashHelper.hashBuffer(valueView, 0, valueView.length()), result);
        }
        db.write(batch);
        for (int i = 0; i < updates.size(); i++) {
            final Callback<Buffer> explicit = updates.get(i);
            explicit.on(result);
        }
        callback.on(result);
    } catch (Exception e) {
        e.printStackTrace();
        if (callback != null) {
            callback.on(null);
        }
    }
}
 
Example 5
Source File: LeveldbRMStateStore.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void storeOrUpdateRMDT(RMDelegationTokenIdentifier tokenId,
    Long renewDate, boolean isUpdate) throws IOException {
  String tokenKey = getRMDTTokenNodeKey(tokenId);
  RMDelegationTokenIdentifierData tokenData =
      new RMDelegationTokenIdentifierData(tokenId, renewDate);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing token to " + tokenKey);
  }
  try {
    WriteBatch batch = db.createWriteBatch();
    try {
      batch.put(bytes(tokenKey), tokenData.toByteArray());
      if(!isUpdate) {
        ByteArrayOutputStream bs = new ByteArrayOutputStream();
        try (DataOutputStream ds = new DataOutputStream(bs)) {
          ds.writeInt(tokenId.getSequenceNumber());
        }
        if (LOG.isDebugEnabled()) {
          LOG.debug("Storing " + tokenId.getSequenceNumber() + " to "
              + RM_DT_SEQUENCE_NUMBER_KEY);   
        }
        batch.put(bytes(RM_DT_SEQUENCE_NUMBER_KEY), bs.toByteArray());
      }
      db.write(batch);
    } finally {
      batch.close();
    }
  } catch (DBException e) {
    throw new IOException(e);
  }
}
 
Example 6
Source File: LevelDb.java    From benchmarks with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("PMD.CloseResource")
void write(final int batchSize) throws IOException {
  final int rndByteMax = RND_MB.length - valSize;
  int rndByteOffset = 0;
  WriteBatch batch = db.createWriteBatch();
  for (int i = 0; i < keys.length; i++) {
    final int key = keys[i];
    if (intKey) {
      wkb.putInt(0, key, LITTLE_ENDIAN);
    } else {
      wkb.putStringWithoutLengthUtf8(0, padKey(key));
    }
    if (valRandom) {
      wvb.putBytes(0, RND_MB, rndByteOffset, valSize);
      rndByteOffset += valSize;
      if (rndByteOffset >= rndByteMax) {
        rndByteOffset = 0;
      }
    } else {
      wvb.putInt(0, key);
    }
    batch.put(wkb.byteArray(), wvb.byteArray());
    if (i % batchSize == 0) {
      db.write(batch);
      batch.close();
      batch = db.createWriteBatch();
    }
  }
  db.write(batch); // possible partial batch
  batch.close();
}
 
Example 7
Source File: LeveldbRMStateStore.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void storeOrUpdateRMDT(RMDelegationTokenIdentifier tokenId,
    Long renewDate, boolean isUpdate) throws IOException {
  String tokenKey = getRMDTTokenNodeKey(tokenId);
  RMDelegationTokenIdentifierData tokenData =
      new RMDelegationTokenIdentifierData(tokenId, renewDate);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Storing token to " + tokenKey);
  }
  try {
    WriteBatch batch = db.createWriteBatch();
    try {
      batch.put(bytes(tokenKey), tokenData.toByteArray());
      if(!isUpdate) {
        ByteArrayOutputStream bs = new ByteArrayOutputStream();
        try (DataOutputStream ds = new DataOutputStream(bs)) {
          ds.writeInt(tokenId.getSequenceNumber());
        }
        if (LOG.isDebugEnabled()) {
          LOG.debug("Storing " + tokenId.getSequenceNumber() + " to "
              + RM_DT_SEQUENCE_NUMBER_KEY);   
        }
        batch.put(bytes(RM_DT_SEQUENCE_NUMBER_KEY), bs.toByteArray());
      }
      db.write(batch);
    } finally {
      batch.close();
    }
  } catch (DBException e) {
    throw new IOException(e);
  }
}
 
Example 8
Source File: StandaloneDirectoryClient.java    From warp10-platform with Apache License 2.0 5 votes vote down vote up
private void store(byte[] key, byte[] value) throws IOException {
  
  if (null == this.db) {
    return;
  }
  
  WriteBatch batch = perThreadWriteBatch.get();

  AtomicLong size = perThreadWriteBatchSize.get();
  
  boolean written = false;
  
  WriteOptions options = new WriteOptions().sync(null == key || null == value || 1.0 == syncrate);
  
  try {
    if (null != key && null != value) {
      batch.put(key, value);
      size.addAndGet(key.length + value.length);
    }
    
    if (null == key || null == value || size.get() > MAX_BATCH_SIZE) {
      
      if (syncwrites && !options.sync()) {
        options = new WriteOptions().sync(Math.random() < syncrate);
      }
      
      this.db.write(batch, options);
      size.set(0L);
      perThreadWriteBatch.remove();
      written = true;
    }
  } finally {
    if (written) {
      batch.close();
    }
  }    
}
 
Example 9
Source File: StandaloneStoreClient.java    From warp10-platform with Apache License 2.0 5 votes vote down vote up
private void store(List<byte[][]> kvs) throws IOException {

  //WriteBatch batch = this.db.createWriteBatch();
  
  WriteBatch batch = perThreadWriteBatch.get();

  AtomicLong size = perThreadWriteBatchSize.get();
  
  boolean written = false;
  
  try {
    if (null != kvs) {
      for (byte[][] kv: kvs) {
        batch.put(kv[0], kv[1]);
        size.addAndGet(kv[0].length + kv[1].length);
      }        
    }
    
    if (null == kvs || size.get() > MAX_ENCODER_SIZE) {
      
      WriteOptions options = new WriteOptions().sync(null == kvs || 1.0 == syncrate);
      
      if (syncwrites && !options.sync()) {
        options = new WriteOptions().sync(Math.random() < syncrate);
      }

      this.db.write(batch, options);
      size.set(0L);
      perThreadWriteBatch.remove();
      written = true;
    }
    //this.db.write(batch);
  } finally {
    if (written) {
      batch.close();
    }
  }
}
 
Example 10
Source File: LevelDBStorage.java    From greycat with Apache License 2.0 4 votes vote down vote up
@Override
public void put(Buffer stream, Callback<Boolean> callback) {
    if (!isConnected) {
        throw new RuntimeException(_connectedError);
    }
    try {
        Buffer result = null;
        if (updates.size() != 0) {
            result = graph.newBuffer();
        }
        WriteBatch batch = db.createWriteBatch();
        BufferIterator it = stream.iterator();
        boolean isFirst = true;
        while (it.hasNext()) {
            Buffer keyView = it.next();
            Buffer valueView = it.next();
            if (valueView != null) {
                batch.put(keyView.data(), valueView.data());
            }
            if (result != null) {
                if (isFirst) {
                    isFirst = false;
                } else {
                    result.write(Constants.KEY_SEP);
                }
                result.writeAll(keyView.data());
                result.write(Constants.KEY_SEP);
                Base64.encodeLongToBuffer(HashHelper.hashBuffer(valueView, 0, valueView.length()), result);
            }
        }
        db.write(batch);
        batch.close();
        for (int i = 0; i < updates.size(); i++) {
            final Callback<Buffer> explicit = updates.get(i);
            explicit.on(result);
        }
        if (callback != null) {
            callback.on(true);
        }
    } catch (Exception e) {
        e.printStackTrace();
        if (callback != null) {
            callback.on(false);
        }
    }
}
 
Example 11
Source File: LevelDB.java    From SPADE with GNU General Public License v3.0 4 votes vote down vote up
private boolean flushBulkEntries(boolean forcedFlush)
{
    try
    {
        if((globalTxCount % GLOBAL_TX_SIZE == 0) || forcedFlush)
        {
            /*
                processing child vertex
             */
            WriteBatch childBatch = childDatabase.createWriteBatch();
            for(Map.Entry<String, StringBuilder> childCacheEntry : childListCache.entrySet())
            {
                String childVertexHash = childCacheEntry.getKey();
                byte[] childVertexHashBytes = bytes(childVertexHash);
                byte[] currentChildEntryBytes = childDatabase.get(childVertexHashBytes);
                String newChildEntry;
                if(currentChildEntryBytes == null)
                {
                    newChildEntry = childCacheEntry.getValue().toString();
                } else
                {
                    String currentChildEntry = asString(currentChildEntryBytes);
                    newChildEntry = currentChildEntry + HASH_SEPARATOR + childCacheEntry.getValue();
                }
                childBatch.put(childVertexHashBytes, bytes(newChildEntry));
            }
            childDatabase.write(childBatch);
            childBatch.close();
            childListCache.clear();

            /*
                processing parent vertex
             */
            WriteBatch parentBatch = parentDatabase.createWriteBatch();
            for(Map.Entry<String, StringBuilder> parentCacheEntry : parentListCache.entrySet())
            {
                String parentVertexHash = parentCacheEntry.getKey();
                byte[] parentVertexHashBytes = bytes(parentVertexHash);
                byte[] currentParentEntryBytes = parentDatabase.get(parentVertexHashBytes);
                String newParentEntry;
                if(currentParentEntryBytes == null)
                {
                    newParentEntry = parentCacheEntry.getValue().toString();
                } else
                {
                    String currentParentEntry = asString(currentParentEntryBytes);
                    newParentEntry = currentParentEntry + HASH_SEPARATOR + parentCacheEntry.getValue().toString();
                }
                parentBatch.put(parentVertexHashBytes, bytes(newParentEntry));
            }
            parentDatabase.write(parentBatch);
            parentBatch.close();
            parentListCache.clear();
        }
    }
    catch(Exception ex)
    {
        logger.log(Level.SEVERE, "Error bulk flushing cached entries to scaffold!", ex);
        return false;
    }

    return true;
}
 
Example 12
Source File: LevelDB.java    From SPADE with GNU General Public License v3.0 4 votes vote down vote up
public boolean flushBulkEntries1(boolean forcedFlush)
{
    try
    {
        if((globalTxCount % GLOBAL_TX_SIZE == 0) || forcedFlush)
        {
            WriteBatch batch = scaffoldDatabase.createWriteBatch();
            Set<String> hashList = new HashSet<>(childListCache.keySet());
            hashList.addAll(parentListCache.keySet());
            for(String hash: hashList)
            {
                byte[] hashBytes = bytes(hash);
                StringBuilder childCacheBuilder = childListCache.get(hash);
                String childCache = "";
                if(childCacheBuilder != null)
                {
                    childCache = childCacheBuilder.toString();
                }
                StringBuilder parentCacheBuilder = parentListCache.get(hash);
                String parentCache = "";
                if(parentCacheBuilder != null)
                {
                    parentCache = parentCacheBuilder.toString();
                }
                byte[] currentScaffoldEntryBytes = scaffoldDatabase.get(hashBytes);
                String newScaffoldEntry;
                if(currentScaffoldEntryBytes == null)
                {
                    Set<String> childHashSet = new HashSet<>(Arrays.asList(childCache.split(HASH_SEPARATOR)));
                    Set<String> parentHashSet = new HashSet<>(Arrays.asList(parentCache.split(HASH_SEPARATOR)));
                    newScaffoldEntry = StringUtils.join(childHashSet, HASH_SEPARATOR) + LIST_SEPARATOR +
                            StringUtils.join(parentHashSet, HASH_SEPARATOR);
                }
                else
                {
                    String currentScaffoldEntry = asString(currentScaffoldEntryBytes);
                    String[] neighborHashList = currentScaffoldEntry.split(LIST_SEPARATOR, -1);
                    String childrenHashList = neighborHashList[0];
                    String parentHashList = neighborHashList[1];
                    Set<String> cachedChildrenHashSet = new HashSet<>(Arrays.asList(childCache.split(HASH_SEPARATOR)));
                    Set<String> cachedParentHashSet = new HashSet<>(Arrays.asList(parentCache.split(HASH_SEPARATOR)));

                    Set<String> currentChildrenHashSet = new HashSet<>(Arrays.asList(childrenHashList.split(HASH_SEPARATOR)));
                    Set<String> currentParentHashSet = new HashSet<>(Arrays.asList(parentHashList.split(HASH_SEPARATOR)));
                    currentChildrenHashSet.addAll(cachedChildrenHashSet);
                    currentParentHashSet.addAll(cachedParentHashSet);

                    newScaffoldEntry = StringUtils.join(currentChildrenHashSet, HASH_SEPARATOR) + LIST_SEPARATOR +
                            StringUtils.join(currentParentHashSet, HASH_SEPARATOR);
                }
                batch.put(hashBytes, bytes(newScaffoldEntry));
            }
            childListCache.clear();
            parentListCache.clear();
            scaffoldDatabase.write(batch);
            batch.close();
        }
    }
    catch(Exception ex)
    {
        logger.log(Level.SEVERE, "Error bulk flushing cached entries to scaffold!", ex);
        return false;
    }

    return true;
}