Java Code Examples for org.apache.hadoop.hdfs.protocol.BlockStoragePolicy#chooseStorageTypes()

The following examples show how to use org.apache.hadoop.hdfs.protocol.BlockStoragePolicy#chooseStorageTypes() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSDirectory.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
 
Example 2
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example 3
Source File: FSDirectory.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
 
Example 4
Source File: TestStorageMover.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example 5
Source File: INodeFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public final ContentSummaryComputationContext computeContentSummary(
    final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  long fileLen = 0;
  if (sf == null) {
    fileLen = computeFileSize();
    counts.addContent(Content.FILE, 1);
  } else {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.addContent(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      fileLen =  diffs.getLast().getFileSize();
    } else {
      fileLen = computeFileSize();
    }
  }
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed());

  if (getStoragePolicyID() != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
 
Example 6
Source File: INodeFile.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public final ContentSummaryComputationContext computeContentSummary(
    final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  long fileLen = 0;
  if (sf == null) {
    fileLen = computeFileSize();
    counts.addContent(Content.FILE, 1);
  } else {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.addContent(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      fileLen =  diffs.getLast().getFileSize();
    } else {
      fileLen = computeFileSize();
    }
  }
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed());

  if (getStoragePolicyID() != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
 
Example 7
Source File: INodeFile.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public final QuotaCounts computeQuotaUsage(
    BlockStoragePolicySuite bsps, byte blockStoragePolicyId,
    QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  long nsDelta = 1;
  final long ssDeltaNoReplication;
  short replication;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      ssDeltaNoReplication = storagespaceConsumedNoReplication();
      replication = getBlockReplication();
    } else if (last < lastSnapshotId) {
      ssDeltaNoReplication = computeFileSize(true, false);
      replication = getFileReplication();
    } else {
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      ssDeltaNoReplication = storagespaceConsumedNoReplication(sid);
      replication = getReplication(sid);
    }
  } else {
    ssDeltaNoReplication = storagespaceConsumedNoReplication();
    replication = getBlockReplication();
  }
  counts.addNameSpace(nsDelta);
  counts.addStorageSpace(ssDeltaNoReplication * replication);

  if (blockStoragePolicyId != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
    List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, ssDeltaNoReplication);
    }
  }
  return counts;
}
 
Example 8
Source File: BlockPlacementPolicyDefault.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
DatanodeStorageInfo[] chooseTarget(String src,
    int numOfReplicas,
    Node writer,
    Set<Node> excludedNodes,
    long blocksize,
    List<DatanodeDescriptor> favoredNodes,
    BlockStoragePolicy storagePolicy) {
  try {
    if (favoredNodes == null || favoredNodes.size() == 0) {
      // Favored nodes not specified, fall back to regular block placement.
      return chooseTarget(src, numOfReplicas, writer,
          new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
          excludedNodes, blocksize, storagePolicy);
    }

    Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
        new HashSet<Node>() : new HashSet<Node>(excludedNodes);
    final List<StorageType> requiredStorageTypes = storagePolicy
        .chooseStorageTypes((short)numOfReplicas);
    final EnumMap<StorageType, Integer> storageTypes =
        getRequiredStorageTypes(requiredStorageTypes);

    // Choose favored nodes
    List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>();
    boolean avoidStaleNodes = stats != null
        && stats.isAvoidingStaleDataNodesForWrite();

    int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas);
    numOfReplicas = maxNodesAndReplicas[0];
    int maxNodesPerRack = maxNodesAndReplicas[1];

    for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
      DatanodeDescriptor favoredNode = favoredNodes.get(i);
      // Choose a single node which is local to favoredNode.
      // 'results' is updated within chooseLocalNode
      final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
          favoriteAndExcludedNodes, blocksize, maxNodesPerRack,
          results, avoidStaleNodes, storageTypes, false);
      if (target == null) {
        LOG.warn("Could not find a target for file " + src
            + " with favored node " + favoredNode); 
        continue;
      }
      favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
    }

    if (results.size() < numOfReplicas) {
      // Not enough favored nodes, choose other nodes.
      numOfReplicas -= results.size();
      DatanodeStorageInfo[] remainingTargets = 
          chooseTarget(src, numOfReplicas, writer, results,
              false, favoriteAndExcludedNodes, blocksize, storagePolicy);
      for (int i = 0; i < remainingTargets.length; i++) {
        results.add(remainingTargets[i]);
      }
    }
    return getPipeline(writer,
        results.toArray(new DatanodeStorageInfo[results.size()]));
  } catch (NotEnoughReplicasException nr) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose with favored nodes (=" + favoredNodes
          + "), disregard favored nodes hint and retry.", nr);
    }
    // Fall back to regular block placement disregarding favored nodes hint
    return chooseTarget(src, numOfReplicas, writer, 
        new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
        excludedNodes, blocksize, storagePolicy);
  }
}
 
Example 9
Source File: INodeFile.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public final QuotaCounts computeQuotaUsage(
    BlockStoragePolicySuite bsps, byte blockStoragePolicyId,
    QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  long nsDelta = 1;
  final long ssDeltaNoReplication;
  short replication;
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if (sf != null) {
    FileDiffList fileDiffList = sf.getDiffs();
    int last = fileDiffList.getLastSnapshotId();

    if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
        || last == Snapshot.CURRENT_STATE_ID) {
      ssDeltaNoReplication = storagespaceConsumedNoReplication();
      replication = getBlockReplication();
    } else if (last < lastSnapshotId) {
      ssDeltaNoReplication = computeFileSize(true, false);
      replication = getFileReplication();
    } else {
      int sid = fileDiffList.getSnapshotById(lastSnapshotId);
      ssDeltaNoReplication = storagespaceConsumedNoReplication(sid);
      replication = getReplication(sid);
    }
  } else {
    ssDeltaNoReplication = storagespaceConsumedNoReplication();
    replication = getBlockReplication();
  }
  counts.addNameSpace(nsDelta);
  counts.addStorageSpace(ssDeltaNoReplication * replication);

  if (blockStoragePolicyId != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
    List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, ssDeltaNoReplication);
    }
  }
  return counts;
}
 
Example 10
Source File: BlockPlacementPolicyDefault.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
DatanodeStorageInfo[] chooseTarget(String src,
    int numOfReplicas,
    Node writer,
    Set<Node> excludedNodes,
    long blocksize,
    List<DatanodeDescriptor> favoredNodes,
    BlockStoragePolicy storagePolicy) {
  try {
    if (favoredNodes == null || favoredNodes.size() == 0) {
      // Favored nodes not specified, fall back to regular block placement.
      return chooseTarget(src, numOfReplicas, writer,
          new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
          excludedNodes, blocksize, storagePolicy);
    }

    Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
        new HashSet<Node>() : new HashSet<Node>(excludedNodes);
    final List<StorageType> requiredStorageTypes = storagePolicy
        .chooseStorageTypes((short)numOfReplicas);
    final EnumMap<StorageType, Integer> storageTypes =
        getRequiredStorageTypes(requiredStorageTypes);

    // Choose favored nodes
    List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>();
    boolean avoidStaleNodes = stats != null
        && stats.isAvoidingStaleDataNodesForWrite();

    int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas);
    numOfReplicas = maxNodesAndReplicas[0];
    int maxNodesPerRack = maxNodesAndReplicas[1];

    for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) {
      DatanodeDescriptor favoredNode = favoredNodes.get(i);
      // Choose a single node which is local to favoredNode.
      // 'results' is updated within chooseLocalNode
      final DatanodeStorageInfo target = chooseLocalStorage(favoredNode,
          favoriteAndExcludedNodes, blocksize, maxNodesPerRack,
          results, avoidStaleNodes, storageTypes, false);
      if (target == null) {
        LOG.warn("Could not find a target for file " + src
            + " with favored node " + favoredNode); 
        continue;
      }
      favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
    }

    if (results.size() < numOfReplicas) {
      // Not enough favored nodes, choose other nodes.
      numOfReplicas -= results.size();
      DatanodeStorageInfo[] remainingTargets = 
          chooseTarget(src, numOfReplicas, writer, results,
              false, favoriteAndExcludedNodes, blocksize, storagePolicy);
      for (int i = 0; i < remainingTargets.length; i++) {
        results.add(remainingTargets[i]);
      }
    }
    return getPipeline(writer,
        results.toArray(new DatanodeStorageInfo[results.size()]));
  } catch (NotEnoughReplicasException nr) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose with favored nodes (=" + favoredNodes
          + "), disregard favored nodes hint and retry.", nr);
    }
    // Fall back to regular block placement disregarding favored nodes hint
    return chooseTarget(src, numOfReplicas, writer, 
        new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
        excludedNodes, blocksize, storagePolicy);
  }
}