org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo,
                                 RamDiskReplica replicaState) throws IOException {
  File metaFile = replicaState.getSavedMetaFile();
  File blockFile = replicaState.getSavedBlockFile();
  final long blockId = replicaInfo.getBlockId();
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);

  ReplicaInfo newReplicaInfo =
      new ReplicaBuilder(ReplicaState.FINALIZED)
          .setBlockId(blockId)
          .setLength(replicaInfo.getBytesOnDisk())
          .setGenerationStamp(replicaInfo.getGenerationStamp())
          .setFsVolume(replicaState.getLazyPersistVolume())
          .setDirectoryToUse(targetBlockFile.getParentFile())
          .build();
  return newReplicaInfo;
}
 
Example #2
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void onFailLazyPersist(String bpId, long blockId) {
  RamDiskReplica block = null;
  block = ramDiskReplicaTracker.getReplica(bpId, blockId);
  if (block != null) {
    LOG.warn("Failed to save replica " + block + ". re-enqueueing it.");
    ramDiskReplicaTracker.reenqueueReplicaNotPersisted(block);
  }
}
 
Example #3
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void onFailLazyPersist(String bpId, long blockId) {
  RamDiskReplica block = null;
  block = ramDiskReplicaTracker.getReplica(bpId, blockId);
  if (block != null) {
    LOG.warn("Failed to save replica " + block + ". re-enqueueing it.");
    ramDiskReplicaTracker.reenqueueReplicaNotPersisted(block);
  }
}
 
Example #4
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  final List<String> errors = new ArrayList<String>();
  for (int i = 0; i < invalidBlks.length; i++) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
      if (info == null) {
        // It is okay if the block is not found -- it may be deleted earlier.
        LOG.info("Failed to delete replica " + invalidBlks[i]
            + ": ReplicaInfo not found.");
        continue;
      }
      if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            + ": GenerationStamp not matched, info=" + info);
        continue;
      }
      f = info.getBlockFile();
      v = (FsVolumeImpl)info.getVolume();
      if (v == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". No volume for this replica, file=" + f);
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". Parent not found for file " + f);
        continue;
      }
      ReplicaInfo removing = volumeMap.remove(bpid, invalidBlks[i]);
      addDeletingBlock(bpid, removing.getBlockId());
      if (LOG.isDebugEnabled()) {
        LOG.debug("Block file " + removing.getBlockFile().getName()
            + " is to be deleted");
      }
    }

    if (v.isTransientStorage()) {
      RamDiskReplica replicaInfo =
        ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
      if (replicaInfo != null) {
        if (!replicaInfo.getIsPersisted()) {
          datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
        }
        ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(),
          replicaInfo.getBlockId(), true);
      }
    }

    // If a DFSClient has the replica in its cache of short-circuit file
    // descriptors (and the client is using ShortCircuitShm), invalidate it.
    datanode.getShortCircuitRegistry().processBlockInvalidation(
              new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));

    // If the block is cached, start uncaching it.
    cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());

    // Delete the block asynchronously to make sure we can do it fast enough.
    // It's ok to unlink the block file before the uncache operation
    // finishes.
    try {
      asyncDiskService.deleteAsync(v.obtainReference(), f,
          FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
          new ExtendedBlock(bpid, invalidBlks[i]),
          dataStorage.getTrashDirectoryForBlockFile(bpid, f));
    } catch (ClosedChannelException e) {
      LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
          "block " + invalidBlks[i]);
    }
  }
  if (!errors.isEmpty()) {
    StringBuilder b = new StringBuilder("Failed to delete ")
      .append(errors.size()).append(" (out of ").append(invalidBlks.length)
      .append(") replica(s):");
    for(int i = 0; i < errors.size(); i++) {
      b.append("\n").append(i).append(") ").append(errors.get(i));
    }
    throw new IOException(b.toString());
  }
}
 
Example #5
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Checkpoint a pending replica to persistent storage now.
 * If we fail then move the replica to the end of the queue.
 * @return true if there is more work to be done, false otherwise.
 */
private boolean saveNextReplica() {
  RamDiskReplica block = null;
  FsVolumeReference targetReference;
  FsVolumeImpl targetVolume;
  ReplicaInfo replicaInfo;
  boolean succeeded = false;

  try {
    block = ramDiskReplicaTracker.dequeueNextReplicaToPersist();
    if (block != null) {
      synchronized (FsDatasetImpl.this) {
        replicaInfo = volumeMap.get(block.getBlockPoolId(), block.getBlockId());

        // If replicaInfo is null, the block was either deleted before
        // it could be checkpointed or it is already on persistent storage.
        // This can occur if a second replica on persistent storage was found
        // after the lazy write was scheduled.
        if (replicaInfo != null &&
            replicaInfo.getVolume().isTransientStorage()) {
          // Pick a target volume to persist the block.
          targetReference = volumes.getNextVolume(
              StorageType.DEFAULT, replicaInfo.getNumBytes());
          targetVolume = (FsVolumeImpl) targetReference.getVolume();

          ramDiskReplicaTracker.recordStartLazyPersist(
              block.getBlockPoolId(), block.getBlockId(), targetVolume);

          if (LOG.isDebugEnabled()) {
            LOG.debug("LazyWriter: Start persisting RamDisk block:"
                + " block pool Id: " + block.getBlockPoolId()
                + " block id: " + block.getBlockId()
                + " on target volume " + targetVolume);
          }

          asyncLazyPersistService.submitLazyPersistTask(
              block.getBlockPoolId(), block.getBlockId(),
              replicaInfo.getGenerationStamp(), block.getCreationTime(),
              replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
              targetReference);
        }
      }
    }
    succeeded = true;
  } catch(IOException ioe) {
    LOG.warn("Exception saving replica " + block, ioe);
  } finally {
    if (!succeeded && block != null) {
      LOG.warn("Failed to save replica " + block + ". re-enqueueing it.");
      onFailLazyPersist(block.getBlockPoolId(), block.getBlockId());
    }
  }
  return succeeded;
}
 
Example #6
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Attempt to evict one or more transient block replicas we have at least
 * spaceNeeded bytes free.
 */
private void evictBlocks() throws IOException {
  int iterations = 0;

  while (iterations++ < MAX_BLOCK_EVICTIONS_PER_ITERATION &&
         transientFreeSpaceBelowThreshold()) {
    RamDiskReplica replicaState = ramDiskReplicaTracker.getNextCandidateForEviction();

    if (replicaState == null) {
      break;
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("Evicting block " + replicaState);
    }

    ReplicaInfo replicaInfo, newReplicaInfo;
    File blockFile, metaFile;
    long blockFileUsed, metaFileUsed;
    final String bpid = replicaState.getBlockPoolId();

    synchronized (FsDatasetImpl.this) {
      replicaInfo = getReplicaInfo(replicaState.getBlockPoolId(), replicaState.getBlockId());
      Preconditions.checkState(replicaInfo.getVolume().isTransientStorage());
      blockFile = replicaInfo.getBlockFile();
      metaFile = replicaInfo.getMetaFile();
      blockFileUsed = blockFile.length();
      metaFileUsed = metaFile.length();
      ramDiskReplicaTracker.discardReplica(replicaState.getBlockPoolId(),
          replicaState.getBlockId(), false);

      // Move the replica from lazyPersist/ to finalized/ on target volume
      BlockPoolSlice bpSlice =
          replicaState.getLazyPersistVolume().getBlockPoolSlice(bpid);
      File newBlockFile = bpSlice.activateSavedReplica(
          replicaInfo, replicaState.getSavedMetaFile(),
          replicaState.getSavedBlockFile());

      newReplicaInfo =
          new FinalizedReplica(replicaInfo.getBlockId(),
                               replicaInfo.getBytesOnDisk(),
                               replicaInfo.getGenerationStamp(),
                               replicaState.getLazyPersistVolume(),
                               newBlockFile.getParentFile());

      // Update the volumeMap entry.
      volumeMap.add(bpid, newReplicaInfo);

      // Update metrics
      datanode.getMetrics().incrRamDiskBlocksEvicted();
      datanode.getMetrics().addRamDiskBlocksEvictionWindowMs(
          Time.monotonicNow() - replicaState.getCreationTime());
      if (replicaState.getNumReads() == 0) {
        datanode.getMetrics().incrRamDiskBlocksEvictedWithoutRead();
      }
    }

    removeOldReplica(replicaInfo, newReplicaInfo, blockFile, metaFile,
        blockFileUsed, metaFileUsed, bpid);
  }
}
 
Example #7
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  final List<String> errors = new ArrayList<String>();
  for (int i = 0; i < invalidBlks.length; i++) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
      if (info == null) {
        // It is okay if the block is not found -- it may be deleted earlier.
        LOG.info("Failed to delete replica " + invalidBlks[i]
            + ": ReplicaInfo not found.");
        continue;
      }
      if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            + ": GenerationStamp not matched, info=" + info);
        continue;
      }
      f = info.getBlockFile();
      v = (FsVolumeImpl)info.getVolume();
      if (v == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". No volume for this replica, file=" + f);
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". Parent not found for file " + f);
        continue;
      }
      ReplicaInfo removing = volumeMap.remove(bpid, invalidBlks[i]);
      addDeletingBlock(bpid, removing.getBlockId());
      if (LOG.isDebugEnabled()) {
        LOG.debug("Block file " + removing.getBlockFile().getName()
            + " is to be deleted");
      }
    }

    if (v.isTransientStorage()) {
      RamDiskReplica replicaInfo =
        ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
      if (replicaInfo != null) {
        if (!replicaInfo.getIsPersisted()) {
          datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
        }
        ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(),
          replicaInfo.getBlockId(), true);
      }
    }

    // If a DFSClient has the replica in its cache of short-circuit file
    // descriptors (and the client is using ShortCircuitShm), invalidate it.
    datanode.getShortCircuitRegistry().processBlockInvalidation(
              new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));

    // If the block is cached, start uncaching it.
    cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());

    // Delete the block asynchronously to make sure we can do it fast enough.
    // It's ok to unlink the block file before the uncache operation
    // finishes.
    try {
      asyncDiskService.deleteAsync(v.obtainReference(), f,
          FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
          new ExtendedBlock(bpid, invalidBlks[i]),
          dataStorage.getTrashDirectoryForBlockFile(bpid, f));
    } catch (ClosedChannelException e) {
      LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
          "block " + invalidBlks[i]);
    }
  }
  if (!errors.isEmpty()) {
    StringBuilder b = new StringBuilder("Failed to delete ")
      .append(errors.size()).append(" (out of ").append(invalidBlks.length)
      .append(") replica(s):");
    for(int i = 0; i < errors.size(); i++) {
      b.append("\n").append(i).append(") ").append(errors.get(i));
    }
    throw new IOException(b.toString());
  }
}
 
Example #8
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Checkpoint a pending replica to persistent storage now.
 * If we fail then move the replica to the end of the queue.
 * @return true if there is more work to be done, false otherwise.
 */
private boolean saveNextReplica() {
  RamDiskReplica block = null;
  FsVolumeReference targetReference;
  FsVolumeImpl targetVolume;
  ReplicaInfo replicaInfo;
  boolean succeeded = false;

  try {
    block = ramDiskReplicaTracker.dequeueNextReplicaToPersist();
    if (block != null) {
      synchronized (FsDatasetImpl.this) {
        replicaInfo = volumeMap.get(block.getBlockPoolId(), block.getBlockId());

        // If replicaInfo is null, the block was either deleted before
        // it could be checkpointed or it is already on persistent storage.
        // This can occur if a second replica on persistent storage was found
        // after the lazy write was scheduled.
        if (replicaInfo != null &&
            replicaInfo.getVolume().isTransientStorage()) {
          // Pick a target volume to persist the block.
          targetReference = volumes.getNextVolume(
              StorageType.DEFAULT, replicaInfo.getNumBytes());
          targetVolume = (FsVolumeImpl) targetReference.getVolume();

          ramDiskReplicaTracker.recordStartLazyPersist(
              block.getBlockPoolId(), block.getBlockId(), targetVolume);

          if (LOG.isDebugEnabled()) {
            LOG.debug("LazyWriter: Start persisting RamDisk block:"
                + " block pool Id: " + block.getBlockPoolId()
                + " block id: " + block.getBlockId()
                + " on target volume " + targetVolume);
          }

          asyncLazyPersistService.submitLazyPersistTask(
              block.getBlockPoolId(), block.getBlockId(),
              replicaInfo.getGenerationStamp(), block.getCreationTime(),
              replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
              targetReference);
        }
      }
    }
    succeeded = true;
  } catch(IOException ioe) {
    LOG.warn("Exception saving replica " + block, ioe);
  } finally {
    if (!succeeded && block != null) {
      LOG.warn("Failed to save replica " + block + ". re-enqueueing it.");
      onFailLazyPersist(block.getBlockPoolId(), block.getBlockId());
    }
  }
  return succeeded;
}
 
Example #9
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Attempt to evict one or more transient block replicas we have at least
 * spaceNeeded bytes free.
 */
private void evictBlocks() throws IOException {
  int iterations = 0;

  while (iterations++ < MAX_BLOCK_EVICTIONS_PER_ITERATION &&
         transientFreeSpaceBelowThreshold()) {
    RamDiskReplica replicaState = ramDiskReplicaTracker.getNextCandidateForEviction();

    if (replicaState == null) {
      break;
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("Evicting block " + replicaState);
    }

    ReplicaInfo replicaInfo, newReplicaInfo;
    File blockFile, metaFile;
    long blockFileUsed, metaFileUsed;
    final String bpid = replicaState.getBlockPoolId();

    synchronized (FsDatasetImpl.this) {
      replicaInfo = getReplicaInfo(replicaState.getBlockPoolId(), replicaState.getBlockId());
      Preconditions.checkState(replicaInfo.getVolume().isTransientStorage());
      blockFile = replicaInfo.getBlockFile();
      metaFile = replicaInfo.getMetaFile();
      blockFileUsed = blockFile.length();
      metaFileUsed = metaFile.length();
      ramDiskReplicaTracker.discardReplica(replicaState.getBlockPoolId(),
          replicaState.getBlockId(), false);

      // Move the replica from lazyPersist/ to finalized/ on target volume
      BlockPoolSlice bpSlice =
          replicaState.getLazyPersistVolume().getBlockPoolSlice(bpid);
      File newBlockFile = bpSlice.activateSavedReplica(
          replicaInfo, replicaState.getSavedMetaFile(),
          replicaState.getSavedBlockFile());

      newReplicaInfo =
          new FinalizedReplica(replicaInfo.getBlockId(),
                               replicaInfo.getBytesOnDisk(),
                               replicaInfo.getGenerationStamp(),
                               replicaState.getLazyPersistVolume(),
                               newBlockFile.getParentFile());

      // Update the volumeMap entry.
      volumeMap.add(bpid, newReplicaInfo);

      // Update metrics
      datanode.getMetrics().incrRamDiskBlocksEvicted();
      datanode.getMetrics().addRamDiskBlocksEvictionWindowMs(
          Time.monotonicNow() - replicaState.getCreationTime());
      if (replicaState.getNumReads() == 0) {
        datanode.getMetrics().incrRamDiskBlocksEvictedWithoutRead();
      }
    }

    removeOldReplica(replicaInfo, newReplicaInfo, blockFile, metaFile,
        blockFileUsed, metaFileUsed, bpid);
  }
}