Java Code Examples for org.apache.hadoop.hdfs.server.datanode.ReplicaInfo#getVolume()

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.ReplicaInfo#getVolume() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private synchronized FinalizedReplica finalizeReplica(String bpid,
    ReplicaInfo replicaInfo) throws IOException {
  FinalizedReplica newReplicaInfo = null;
  if (replicaInfo.getState() == ReplicaState.RUR &&
     ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == 
       ReplicaState.FINALIZED) {
    newReplicaInfo = (FinalizedReplica)
           ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
  } else {
    FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
    File f = replicaInfo.getBlockFile();
    if (v == null) {
      throw new IOException("No volume for temporary file " + f + 
          " for block " + replicaInfo);
    }

    File dest = v.addFinalizedBlock(
        bpid, replicaInfo, f, replicaInfo.getBytesReserved());
    newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());

    if (v.isTransientStorage()) {
      ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v);
      datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
    }
  }
  volumeMap.add(bpid, newReplicaInfo);

  return newReplicaInfo;
}
 
Example 2
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private synchronized FinalizedReplica finalizeReplica(String bpid,
    ReplicaInfo replicaInfo) throws IOException {
  FinalizedReplica newReplicaInfo = null;
  if (replicaInfo.getState() == ReplicaState.RUR &&
     ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == 
       ReplicaState.FINALIZED) {
    newReplicaInfo = (FinalizedReplica)
           ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
  } else {
    FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
    File f = replicaInfo.getBlockFile();
    if (v == null) {
      throw new IOException("No volume for temporary file " + f + 
          " for block " + replicaInfo);
    }

    File dest = v.addFinalizedBlock(
        bpid, replicaInfo, f, replicaInfo.getBytesReserved());
    newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());

    if (v.isTransientStorage()) {
      ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v);
      datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
    }
  }
  volumeMap.add(bpid, newReplicaInfo);

  return newReplicaInfo;
}
 
Example 3
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
  final ReplicaInfo r =  volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
  return r != null? (FsVolumeImpl)r.getVolume(): null;
}
 
Example 4
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  final List<String> errors = new ArrayList<String>();
  for (int i = 0; i < invalidBlks.length; i++) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
      if (info == null) {
        // It is okay if the block is not found -- it may be deleted earlier.
        LOG.info("Failed to delete replica " + invalidBlks[i]
            + ": ReplicaInfo not found.");
        continue;
      }
      if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            + ": GenerationStamp not matched, info=" + info);
        continue;
      }
      f = info.getBlockFile();
      v = (FsVolumeImpl)info.getVolume();
      if (v == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". No volume for this replica, file=" + f);
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". Parent not found for file " + f);
        continue;
      }
      ReplicaInfo removing = volumeMap.remove(bpid, invalidBlks[i]);
      addDeletingBlock(bpid, removing.getBlockId());
      if (LOG.isDebugEnabled()) {
        LOG.debug("Block file " + removing.getBlockFile().getName()
            + " is to be deleted");
      }
    }

    if (v.isTransientStorage()) {
      RamDiskReplica replicaInfo =
        ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
      if (replicaInfo != null) {
        if (!replicaInfo.getIsPersisted()) {
          datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
        }
        ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(),
          replicaInfo.getBlockId(), true);
      }
    }

    // If a DFSClient has the replica in its cache of short-circuit file
    // descriptors (and the client is using ShortCircuitShm), invalidate it.
    datanode.getShortCircuitRegistry().processBlockInvalidation(
              new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));

    // If the block is cached, start uncaching it.
    cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());

    // Delete the block asynchronously to make sure we can do it fast enough.
    // It's ok to unlink the block file before the uncache operation
    // finishes.
    try {
      asyncDiskService.deleteAsync(v.obtainReference(), f,
          FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
          new ExtendedBlock(bpid, invalidBlks[i]),
          dataStorage.getTrashDirectoryForBlockFile(bpid, f));
    } catch (ClosedChannelException e) {
      LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
          "block " + invalidBlks[i]);
    }
  }
  if (!errors.isEmpty()) {
    StringBuilder b = new StringBuilder("Failed to delete ")
      .append(errors.size()).append(" (out of ").append(invalidBlks.length)
      .append(") replica(s):");
    for(int i = 0; i < errors.size(); i++) {
      b.append("\n").append(i).append(") ").append(errors.get(i));
    }
    throw new IOException(b.toString());
  }
}
 
Example 5
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
 */
private void cacheBlock(String bpid, long blockId) {
  FsVolumeImpl volume;
  String blockFileName;
  long length, genstamp;
  Executor volumeExecutor;

  synchronized (this) {
    ReplicaInfo info = volumeMap.get(bpid, blockId);
    boolean success = false;
    try {
      if (info == null) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": ReplicaInfo not found.");
        return;
      }
      if (info.getState() != ReplicaState.FINALIZED) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": replica is not finalized; it is in state " +
            info.getState());
        return;
      }
      try {
        volume = (FsVolumeImpl)info.getVolume();
        if (volume == null) {
          LOG.warn("Failed to cache block with id " + blockId + ", pool " +
              bpid + ": volume not found.");
          return;
        }
      } catch (ClassCastException e) {
        LOG.warn("Failed to cache block with id " + blockId +
            ": volume was not an instance of FsVolumeImpl.");
        return;
      }
      if (volume.isTransientStorage()) {
        LOG.warn("Caching not supported on block with id " + blockId +
            " since the volume is backed by RAM.");
        return;
      }
      success = true;
    } finally {
      if (!success) {
        cacheManager.numBlocksFailedToCache.incrementAndGet();
      }
    }
    blockFileName = info.getBlockFile().getAbsolutePath();
    length = info.getVisibleLength();
    genstamp = info.getGenerationStamp();
    volumeExecutor = volume.getCacheExecutor();
  }
  cacheManager.cacheBlock(blockId, bpid, 
      blockFileName, length, genstamp, volumeExecutor);
}
 
Example 6
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public HdfsBlocksMetadata getHdfsBlocksMetadata(String poolId,
    long[] blockIds) throws IOException {
  List<FsVolumeImpl> curVolumes = getVolumes();
  // List of VolumeIds, one per volume on the datanode
  List<byte[]> blocksVolumeIds = new ArrayList<>(curVolumes.size());
  // List of indexes into the list of VolumeIds, pointing at the VolumeId of
  // the volume that the block is on
  List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blockIds.length);
  // Initialize the list of VolumeIds simply by enumerating the volumes
  for (int i = 0; i < curVolumes.size(); i++) {
    blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
  }
  // Determine the index of the VolumeId of each block's volume, by comparing 
  // the block's volume against the enumerated volumes
  for (int i = 0; i < blockIds.length; i++) {
    long blockId = blockIds[i];
    boolean isValid = false;

    ReplicaInfo info = volumeMap.get(poolId, blockId);
    int volumeIndex = 0;
    if (info != null) {
      FsVolumeSpi blockVolume = info.getVolume();
      for (FsVolumeImpl volume : curVolumes) {
        // This comparison of references should be safe
        if (blockVolume == volume) {
          isValid = true;
          break;
        }
        volumeIndex++;
      }
    }
    // Indicates that the block is not present, or not found in a data dir
    if (!isValid) {
      volumeIndex = Integer.MAX_VALUE;
    }
    blocksVolumeIndexes.add(volumeIndex);
  }
  return new HdfsBlocksMetadata(poolId, blockIds,
      blocksVolumeIds, blocksVolumeIndexes);
}
 
Example 7
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
  final ReplicaInfo r =  volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
  return r != null? (FsVolumeImpl)r.getVolume(): null;
}
 
Example 8
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * We're informed that a block is no longer valid.  We
 * could lazily garbage-collect the block, but why bother?
 * just get rid of it.
 */
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
  final List<String> errors = new ArrayList<String>();
  for (int i = 0; i < invalidBlks.length; i++) {
    final File f;
    final FsVolumeImpl v;
    synchronized (this) {
      final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
      if (info == null) {
        // It is okay if the block is not found -- it may be deleted earlier.
        LOG.info("Failed to delete replica " + invalidBlks[i]
            + ": ReplicaInfo not found.");
        continue;
      }
      if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            + ": GenerationStamp not matched, info=" + info);
        continue;
      }
      f = info.getBlockFile();
      v = (FsVolumeImpl)info.getVolume();
      if (v == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". No volume for this replica, file=" + f);
        continue;
      }
      File parent = f.getParentFile();
      if (parent == null) {
        errors.add("Failed to delete replica " + invalidBlks[i]
            +  ". Parent not found for file " + f);
        continue;
      }
      ReplicaInfo removing = volumeMap.remove(bpid, invalidBlks[i]);
      addDeletingBlock(bpid, removing.getBlockId());
      if (LOG.isDebugEnabled()) {
        LOG.debug("Block file " + removing.getBlockFile().getName()
            + " is to be deleted");
      }
    }

    if (v.isTransientStorage()) {
      RamDiskReplica replicaInfo =
        ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
      if (replicaInfo != null) {
        if (!replicaInfo.getIsPersisted()) {
          datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
        }
        ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(),
          replicaInfo.getBlockId(), true);
      }
    }

    // If a DFSClient has the replica in its cache of short-circuit file
    // descriptors (and the client is using ShortCircuitShm), invalidate it.
    datanode.getShortCircuitRegistry().processBlockInvalidation(
              new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));

    // If the block is cached, start uncaching it.
    cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());

    // Delete the block asynchronously to make sure we can do it fast enough.
    // It's ok to unlink the block file before the uncache operation
    // finishes.
    try {
      asyncDiskService.deleteAsync(v.obtainReference(), f,
          FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
          new ExtendedBlock(bpid, invalidBlks[i]),
          dataStorage.getTrashDirectoryForBlockFile(bpid, f));
    } catch (ClosedChannelException e) {
      LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
          "block " + invalidBlks[i]);
    }
  }
  if (!errors.isEmpty()) {
    StringBuilder b = new StringBuilder("Failed to delete ")
      .append(errors.size()).append(" (out of ").append(invalidBlks.length)
      .append(") replica(s):");
    for(int i = 0; i < errors.size(); i++) {
      b.append("\n").append(i).append(") ").append(errors.get(i));
    }
    throw new IOException(b.toString());
  }
}
 
Example 9
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
 */
private void cacheBlock(String bpid, long blockId) {
  FsVolumeImpl volume;
  String blockFileName;
  long length, genstamp;
  Executor volumeExecutor;

  synchronized (this) {
    ReplicaInfo info = volumeMap.get(bpid, blockId);
    boolean success = false;
    try {
      if (info == null) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": ReplicaInfo not found.");
        return;
      }
      if (info.getState() != ReplicaState.FINALIZED) {
        LOG.warn("Failed to cache block with id " + blockId + ", pool " +
            bpid + ": replica is not finalized; it is in state " +
            info.getState());
        return;
      }
      try {
        volume = (FsVolumeImpl)info.getVolume();
        if (volume == null) {
          LOG.warn("Failed to cache block with id " + blockId + ", pool " +
              bpid + ": volume not found.");
          return;
        }
      } catch (ClassCastException e) {
        LOG.warn("Failed to cache block with id " + blockId +
            ": volume was not an instance of FsVolumeImpl.");
        return;
      }
      if (volume.isTransientStorage()) {
        LOG.warn("Caching not supported on block with id " + blockId +
            " since the volume is backed by RAM.");
        return;
      }
      success = true;
    } finally {
      if (!success) {
        cacheManager.numBlocksFailedToCache.incrementAndGet();
      }
    }
    blockFileName = info.getBlockFile().getAbsolutePath();
    length = info.getVisibleLength();
    genstamp = info.getGenerationStamp();
    volumeExecutor = volume.getCacheExecutor();
  }
  cacheManager.cacheBlock(blockId, bpid, 
      blockFileName, length, genstamp, volumeExecutor);
}
 
Example 10
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public HdfsBlocksMetadata getHdfsBlocksMetadata(String poolId,
    long[] blockIds) throws IOException {
  List<FsVolumeImpl> curVolumes = getVolumes();
  // List of VolumeIds, one per volume on the datanode
  List<byte[]> blocksVolumeIds = new ArrayList<>(curVolumes.size());
  // List of indexes into the list of VolumeIds, pointing at the VolumeId of
  // the volume that the block is on
  List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blockIds.length);
  // Initialize the list of VolumeIds simply by enumerating the volumes
  for (int i = 0; i < curVolumes.size(); i++) {
    blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
  }
  // Determine the index of the VolumeId of each block's volume, by comparing 
  // the block's volume against the enumerated volumes
  for (int i = 0; i < blockIds.length; i++) {
    long blockId = blockIds[i];
    boolean isValid = false;

    ReplicaInfo info = volumeMap.get(poolId, blockId);
    int volumeIndex = 0;
    if (info != null) {
      FsVolumeSpi blockVolume = info.getVolume();
      for (FsVolumeImpl volume : curVolumes) {
        // This comparison of references should be safe
        if (blockVolume == volume) {
          isValid = true;
          break;
        }
        volumeIndex++;
      }
    }
    // Indicates that the block is not present, or not found in a data dir
    if (!isValid) {
      volumeIndex = Integer.MAX_VALUE;
    }
    blocksVolumeIndexes.add(volumeIndex);
  }
  return new HdfsBlocksMetadata(poolId, blockIds,
      blocksVolumeIds, blocksVolumeIndexes);
}