org.apache.hadoop.hdfs.protocol.BlockStoragePolicy Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.BlockStoragePolicy. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
  BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
      .newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
  // creation storage types
  StorageTypesProto creationProto = convert(policy.getStorageTypes());
  Preconditions.checkArgument(creationProto != null);
  builder.setCreationPolicy(creationProto);
  // creation fallback
  StorageTypesProto creationFallbackProto = convert(
      policy.getCreationFallbacks());
  if (creationFallbackProto != null) {
    builder.setCreationFallbackPolicy(creationFallbackProto);
  }
  // replication fallback
  StorageTypesProto replicationFallbackProto = convert(
      policy.getReplicationFallbacks());
  if (replicationFallbackProto != null) {
    builder.setReplicationFallbackPolicy(replicationFallbackProto);
  }
  return builder.build();
}
 
Example #2
Source File: BlockManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Choose target datanodes for creating a new block.
 * 
 * @throws IOException
 *           if the number of targets < minimum replication.
 * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
 *      Set, long, List, BlockStoragePolicy)
 */
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
    final int numOfReplicas, final Node client,
    final Set<Node> excludedNodes,
    final long blocksize,
    final List<String> favoredNodes,
    final byte storagePolicyID) throws IOException {
  List<DatanodeDescriptor> favoredDatanodeDescriptors = 
      getDatanodeDescriptors(favoredNodes);
  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
      numOfReplicas, client, excludedNodes, blocksize, 
      favoredDatanodeDescriptors, storagePolicy);
  if (targets.length < minReplication) {
    throw new IOException("File " + src + " could only be replicated to "
        + targets.length + " nodes instead of minReplication (="
        + minReplication + ").  There are "
        + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
        + " datanode(s) running and "
        + (excludedNodes == null? "no": excludedNodes.size())
        + " node(s) are excluded in this operation.");
  }
  return targets;
}
 
Example #3
Source File: TestDeleteRace.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
 
Example #4
Source File: TestStorageMover.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example #5
Source File: TestHFileOutputFormat2.java    From hbase with Apache License 2.0 6 votes vote down vote up
private String getStoragePolicyNameForOldHDFSVersion(FileSystem fs, Path path) {
  try {
    if (fs instanceof DistributedFileSystem) {
      DistributedFileSystem dfs = (DistributedFileSystem) fs;
      HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
      if (null != status) {
        byte storagePolicyId = status.getStoragePolicy();
        Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
        if (storagePolicyId != idUnspecified.getByte(BlockStoragePolicySuite.class)) {
          BlockStoragePolicy[] policies = dfs.getStoragePolicies();
          for (BlockStoragePolicy policy : policies) {
            if (policy.getId() == storagePolicyId) {
              return policy.getName();
            }
          }
        }
      }
    }
  } catch (Throwable e) {
    LOG.warn("failed to get block storage policy of [" + path + "]", e);
  }

  return null;
}
 
Example #6
Source File: TestStoragePolicySummary.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example #7
Source File: BlockManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Choose target datanodes for creating a new block.
 * 
 * @throws IOException
 *           if the number of targets < minimum replication.
 * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
 *      Set, long, List, BlockStoragePolicy)
 */
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
    final int numOfReplicas, final Node client,
    final Set<Node> excludedNodes,
    final long blocksize,
    final List<String> favoredNodes,
    final byte storagePolicyID) throws IOException {
  List<DatanodeDescriptor> favoredDatanodeDescriptors = 
      getDatanodeDescriptors(favoredNodes);
  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
      numOfReplicas, client, excludedNodes, blocksize, 
      favoredDatanodeDescriptors, storagePolicy);
  if (targets.length < minReplication) {
    throw new IOException("File " + src + " could only be replicated to "
        + targets.length + " nodes instead of minReplication (="
        + minReplication + ").  There are "
        + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
        + " datanode(s) running and "
        + (excludedNodes == null? "no": excludedNodes.size())
        + " node(s) are excluded in this operation.");
  }
  return targets;
}
 
Example #8
Source File: TestDeleteRace.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
 
Example #9
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example #10
Source File: StoragePolicyAdmin.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    System.out.println("Block Storage Policies:");
    for (BlockStoragePolicy policy : policies) {
      if (policy != null) {
        System.out.println("\t" + policy);
      }
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
 
Example #11
Source File: TestStoragePolicySummary.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example #12
Source File: FSDirectory.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
 
Example #13
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public GetStoragePoliciesResponseProto getStoragePolicies(
    RpcController controller, GetStoragePoliciesRequestProto request)
    throws ServiceException {
  try {
    BlockStoragePolicy[] policies = server.getStoragePolicies();
    GetStoragePoliciesResponseProto.Builder builder =
        GetStoragePoliciesResponseProto.newBuilder();
    if (policies == null) {
      return builder.build();
    }
    for (BlockStoragePolicy policy : policies) {
      builder.addPolicies(PBHelper.convert(policy));
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #14
Source File: FSDirectory.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyQuotaForTruncate(INodesInPath iip, INodeFile file,
    long newLength, QuotaCounts delta) throws QuotaExceededException {
  if (!getFSNamesystem().isImageLoaded() || shouldSkipQuotaChecks()) {
    // Do not check quota if edit log is still being processed
    return;
  }
  final long diff = file.computeQuotaDeltaForTruncate(newLength);
  final short repl = file.getBlockReplication();
  delta.addStorageSpace(diff * repl);
  final BlockStoragePolicy policy = getBlockStoragePolicySuite()
      .getPolicy(file.getStoragePolicyID());
  List<StorageType> types = policy.chooseStorageTypes(repl);
  for (StorageType t : types) {
    if (t.supportTypeQuota()) {
      delta.addTypeSpace(t, diff);
    }
  }
  if (diff > 0) {
    readLock();
    try {
      verifyQuota(iip, iip.length() - 1, delta, null);
    } finally {
      readUnlock();
    }
  }
}
 
Example #15
Source File: PBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
  BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
      .newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
  // creation storage types
  StorageTypesProto creationProto = convert(policy.getStorageTypes());
  Preconditions.checkArgument(creationProto != null);
  builder.setCreationPolicy(creationProto);
  // creation fallback
  StorageTypesProto creationFallbackProto = convert(
      policy.getCreationFallbacks());
  if (creationFallbackProto != null) {
    builder.setCreationFallbackPolicy(creationFallbackProto);
  }
  // replication fallback
  StorageTypesProto replicationFallbackProto = convert(
      policy.getReplicationFallbacks());
  if (replicationFallbackProto != null) {
    builder.setReplicationFallbackPolicy(replicationFallbackProto);
  }
  return builder.build();
}
 
Example #16
Source File: TestStoragePolicySummary.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example #17
Source File: TestStoragePolicySummary.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testMultipleWarmsInDifferentOrder() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy warm = bsps.getPolicy("WARM");
  //DISK:1,ARCHIVE:1
  sts.add(new StorageType[]{StorageType.DISK,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,StorageType.DISK},warm);
  //DISK:2,ARCHIVE:1
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.DISK,StorageType.DISK},warm);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.ARCHIVE,StorageType.DISK},warm);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.ARCHIVE},warm);
  //DISK:1,ARCHIVE:2
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.DISK,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.DISK},warm);
  //DISK:2,ARCHIVE:2
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l);
  expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l);
  expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
  expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example #18
Source File: StoragePolicySummary.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * 
 * @param storageTypes - sorted array of storageTypes
 * @return Storage Policy which matches the specific storage Combination
 */
private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) {
  for (BlockStoragePolicy storagePolicy:storagePolicies) {
    StorageType[] policyStorageTypes = storagePolicy.getStorageTypes();
    policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length);
    Arrays.sort(policyStorageTypes);
    if (policyStorageTypes.length <= storageTypes.length) {
      int i = 0; 
      for (; i < policyStorageTypes.length; i++) {
        if (policyStorageTypes[i] != storageTypes[i]) {
          break;
        }
      }
      if (i < policyStorageTypes.length) {
        continue;
      }
      int j=policyStorageTypes.length;
      for (; j < storageTypes.length; j++) {
        if (policyStorageTypes[i-1] != storageTypes[j]) {
          break;
        }
      }

      if (j==storageTypes.length) {
        return storagePolicy;
      }
    }
  }
  return null;
}
 
Example #19
Source File: TestStoragePolicySummary.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testMultipleWarmsInDifferentOrder() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy warm = bsps.getPolicy("WARM");
  //DISK:1,ARCHIVE:1
  sts.add(new StorageType[]{StorageType.DISK,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,StorageType.DISK},warm);
  //DISK:2,ARCHIVE:1
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.DISK,StorageType.DISK},warm);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.ARCHIVE,StorageType.DISK},warm);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.ARCHIVE},warm);
  //DISK:1,ARCHIVE:2
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.ARCHIVE,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.DISK,StorageType.ARCHIVE},warm);
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.DISK},warm);
  //DISK:2,ARCHIVE:2
  sts.add(new StorageType[]{StorageType.ARCHIVE,
      StorageType.ARCHIVE,StorageType.DISK,StorageType.DISK},warm);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("WARM|DISK:1,ARCHIVE:1(WARM)", 2l);
  expectedOutput.put("WARM|DISK:2,ARCHIVE:1", 3l);
  expectedOutput.put("WARM|DISK:1,ARCHIVE:2(WARM)", 3l);
  expectedOutput.put("WARM|DISK:2,ARCHIVE:2", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example #20
Source File: BlockStoragePolicySuite.java    From big-c with Apache License 2.0 5 votes vote down vote up
public BlockStoragePolicy getPolicy(String policyName) {
  Preconditions.checkNotNull(policyName);

  if (policies != null) {
    for (BlockStoragePolicy policy : policies) {
      if (policy != null && policy.getName().equalsIgnoreCase(policyName)) {
        return policy;
      }
    }
  }
  return null;
}
 
Example #21
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @return All the existing storage policies
 */
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
  TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
  try {
    return namenode.getStoragePolicies();
  } finally {
    scope.close();
  }
}
 
Example #22
Source File: StoragePolicyAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final String path = StringUtils.popOptionWithArgument("-path", args);
  if (path == null) {
    System.err.println("Please specify the path with -path.\nUsage:" +
        getLongUsage());
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    HdfsFileStatus status = dfs.getClient().getFileInfo(path);
    if (status == null) {
      System.err.println("File/Directory does not exist: " + path);
      return 2;
    }
    byte storagePolicyId = status.getStoragePolicy();
    if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
      System.out.println("The storage policy of " + path + " is unspecified");
      return 0;
    }
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    for (BlockStoragePolicy p : policies) {
      if (p.getId() == storagePolicyId) {
        System.out.println("The storage policy of " + path + ":\n" + p);
        return 0;
      }
    }
  } catch (Exception e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  System.err.println("Cannot identify the storage policy for " + path);
  return 2;
}
 
Example #23
Source File: INodeFile.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public final ContentSummaryComputationContext computeContentSummary(
    final ContentSummaryComputationContext summary) {
  final ContentCounts counts = summary.getCounts();
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  long fileLen = 0;
  if (sf == null) {
    fileLen = computeFileSize();
    counts.addContent(Content.FILE, 1);
  } else {
    final FileDiffList diffs = sf.getDiffs();
    final int n = diffs.asList().size();
    counts.addContent(Content.FILE, n);
    if (n > 0 && sf.isCurrentFileDeleted()) {
      fileLen =  diffs.getLast().getFileSize();
    } else {
      fileLen = computeFileSize();
    }
  }
  counts.addContent(Content.LENGTH, fileLen);
  counts.addContent(Content.DISKSPACE, storagespaceConsumed());

  if (getStoragePolicyID() != ID_UNSPECIFIED){
    BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
        getPolicy(getStoragePolicyID());
    List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
    for (StorageType t : storageTypes) {
      if (!t.supportTypeQuota()) {
        continue;
      }
      counts.addTypeSpace(t, fileLen);
    }
  }
  return summary;
}
 
Example #24
Source File: FSDirAttrOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
static HdfsFileStatus setStoragePolicy(
    FSDirectory fsd, BlockManager bm, String src, final String policyName)
    throws IOException {
  if (!fsd.isStoragePolicyEnabled()) {
    throw new IOException(
        "Failed to set storage policy since "
            + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = FSDirectory.resolvePath(src, pathComponents, fsd);
    iip = fsd.getINodesInPath4Write(src);

    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    // get the corresponding policy and make sure the policy name is valid
    BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
    if (policy == null) {
      throw new HadoopIllegalArgumentException(
          "Cannot find a block policy with the name " + policyName);
    }
    unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
    fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
 
Example #25
Source File: TestStoragePolicyCommands.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetAndGetStoragePolicy() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);

  final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
  DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
      "The storage policy of " + foo.toString() + " is unspecified");
  DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
      "The storage policy of " + bar.toString() + " is unspecified");

  DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo -policy WARM", 0,
      "Set storage policy WARM on " + foo.toString());
  DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar -policy COLD",
      0, "Set storage policy COLD on " + bar.toString());
  DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /fooz -policy WARM",
      2, "File/Directory does not exist: /fooz");

  final BlockStoragePolicySuite suite = BlockStoragePolicySuite
      .createDefaultSuite();
  final BlockStoragePolicy warm = suite.getPolicy("WARM");
  final BlockStoragePolicy cold = suite.getPolicy("COLD");
  DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0,
      "The storage policy of " + foo.toString() + ":\n" + warm);
  DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0,
      "The storage policy of " + bar.toString() + ":\n" + cold);
  DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2,
      "File/Directory does not exist: /fooz");
}
 
Example #26
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * A normal case for Mover: move a file into archival storage
 */
@Test
public void testMigrateFileToArchival() throws Exception {
  LOG.info("testMigrateFileToArchival");
  final Path foo = new Path("/foo");
  Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
  policyMap.put(foo, COLD);
  NamespaceScheme nsScheme = new NamespaceScheme(null, Arrays.asList(foo),
      2*BLOCK_SIZE, null, policyMap);
  ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
      NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
  new MigrationTest(clusterScheme, nsScheme).runBasicTest(true);
}
 
Example #27
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
  try {
    GetStoragePoliciesResponseProto response = rpcProxy
        .getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST);
    return PBHelper.convertStoragePolicies(response.getPoliciesList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #28
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static BlockStoragePolicy[] convertStoragePolicies(
    List<BlockStoragePolicyProto> policyProtos) {
  if (policyProtos == null || policyProtos.size() == 0) {
    return new BlockStoragePolicy[0];
  }
  BlockStoragePolicy[] policies = new BlockStoragePolicy[policyProtos.size()];
  int i = 0;
  for (BlockStoragePolicyProto proto : policyProtos) {
    policies[i++] = convert(proto);
  }
  return policies;
}
 
Example #29
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
NamespaceScheme(List<Path> dirs, List<Path> files, long fileSize, 
                Map<Path,List<String>> snapshotMap,
                Map<Path, BlockStoragePolicy> policyMap) {
  this.dirs = dirs == null? Collections.<Path>emptyList(): dirs;
  this.files = files == null? Collections.<Path>emptyList(): files;
  this.fileSize = fileSize;
  this.snapshotMap = snapshotMap == null ?
      Collections.<Path, List<String>>emptyMap() : snapshotMap;
  this.policyMap = policyMap;
}
 
Example #30
Source File: StoragePolicyAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final String path = StringUtils.popOptionWithArgument("-path", args);
  if (path == null) {
    System.err.println("Please specify the path with -path.\nUsage:" +
        getLongUsage());
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    HdfsFileStatus status = dfs.getClient().getFileInfo(path);
    if (status == null) {
      System.err.println("File/Directory does not exist: " + path);
      return 2;
    }
    byte storagePolicyId = status.getStoragePolicy();
    if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
      System.out.println("The storage policy of " + path + " is unspecified");
      return 0;
    }
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    for (BlockStoragePolicy p : policies) {
      if (p.getId() == storagePolicyId) {
        System.out.println("The storage policy of " + path + ":\n" + p);
        return 0;
      }
    }
  } catch (Exception e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  System.err.println("Cannot identify the storage policy for " + path);
  return 2;
}