Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#getPercentUsed()

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil#getPercentUsed() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DatanodeInfo.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** The used space by the data node as percentage of present capacity */
public float getDfsUsedPercent() { 
  return DFSUtil.getPercentUsed(dfsUsed, capacity);
}
 
Example 2
Source File: DatanodeInfo.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Used space by the block pool as percentage of present capacity */
public float getBlockPoolUsedPercent() {
  return DFSUtil.getPercentUsed(blockPoolUsed, capacity);
}
 
Example 3
Source File: DatanodeInfo.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * @return Cache used as a percentage of the datanode's total cache capacity
 */
public float getCacheUsedPercent() {
  return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity);
}
 
Example 4
Source File: ClusterJspHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void toXML(XMLOutputter doc) throws IOException {
  if (error != null) {
    // general exception, only print exception message onto web page.
    createGeneralException(doc, clusterid,
        StringUtils.stringifyException(error));
    doc.getWriter().flush();
    return;
  }
  
  int size = nnList.size();
  long total = 0L, free = 0L, nonDfsUsed = 0l;
  float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f;
  if (size > 0) {
    total = total_sum / size;
    free = free_sum / size;
    nonDfsUsed = nonDfsUsed_sum / size;
    dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total);
    dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total);
  }

  doc.startTag("cluster");
  doc.attribute("clusterId", clusterid);

  doc.startTag("storage");

  toXmlItemBlock(doc, "Total Files And Directories",
      Long.toString(totalFilesAndDirectories));

  toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total));

  toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed));

  toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed));

  toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));

  // dfsUsedPercent
  toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));

  // dfsRemainingPercent
  toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));

  doc.endTag(); // storage

  doc.startTag("namenodes");
  // number of namenodes
  toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size));

  for (NamenodeStatus nn : nnList) {
    doc.startTag("node");
    toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
    toXmlItemBlock(doc, "Blockpool Used",
        StringUtils.byteDesc(nn.bpUsed));
    toXmlItemBlock(doc, "Blockpool Used%",
        DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total)));
    toXmlItemBlock(doc, "Files And Directories",
        Long.toString(nn.filesAndDirectories));
    toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
    toXmlItemBlock(doc, "Missing Blocks",
        Long.toString(nn.missingBlocksCount));
    toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " ("
        + nn.liveDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=LIVE"),
        "Live Datanode (Decommissioned)");
    toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " ("
        + nn.deadDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=DEAD"),
        "Dead Datanode (Decommissioned)");
    toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
    doc.endTag(); // node
  }
  doc.endTag(); // namenodes

  createNamenodeExceptionMsg(doc, nnExceptions);
  doc.endTag(); // cluster
  doc.getWriter().flush();
}
 
Example 5
Source File: HeartbeatManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized float getCapacityUsedPercent() {
  return DFSUtil.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
}
 
Example 6
Source File: HeartbeatManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized float getPercentBlockPoolUsed() {
  return DFSUtil.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal);
}
 
Example 7
Source File: DatanodeInfo.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** The used space by the data node as percentage of present capacity */
public float getDfsUsedPercent() { 
  return DFSUtil.getPercentUsed(dfsUsed, capacity);
}
 
Example 8
Source File: DatanodeInfo.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Used space by the block pool as percentage of present capacity */
public float getBlockPoolUsedPercent() {
  return DFSUtil.getPercentUsed(blockPoolUsed, capacity);
}
 
Example 9
Source File: DatanodeInfo.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * @return Cache used as a percentage of the datanode's total cache capacity
 */
public float getCacheUsedPercent() {
  return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity);
}
 
Example 10
Source File: ClusterJspHelper.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void toXML(XMLOutputter doc) throws IOException {
  if (error != null) {
    // general exception, only print exception message onto web page.
    createGeneralException(doc, clusterid,
        StringUtils.stringifyException(error));
    doc.getWriter().flush();
    return;
  }
  
  int size = nnList.size();
  long total = 0L, free = 0L, nonDfsUsed = 0l;
  float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f;
  if (size > 0) {
    total = total_sum / size;
    free = free_sum / size;
    nonDfsUsed = nonDfsUsed_sum / size;
    dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total);
    dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total);
  }

  doc.startTag("cluster");
  doc.attribute("clusterId", clusterid);

  doc.startTag("storage");

  toXmlItemBlock(doc, "Total Files And Directories",
      Long.toString(totalFilesAndDirectories));

  toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total));

  toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed));

  toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed));

  toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));

  // dfsUsedPercent
  toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));

  // dfsRemainingPercent
  toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));

  doc.endTag(); // storage

  doc.startTag("namenodes");
  // number of namenodes
  toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size));

  for (NamenodeStatus nn : nnList) {
    doc.startTag("node");
    toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
    toXmlItemBlock(doc, "Blockpool Used",
        StringUtils.byteDesc(nn.bpUsed));
    toXmlItemBlock(doc, "Blockpool Used%",
        DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total)));
    toXmlItemBlock(doc, "Files And Directories",
        Long.toString(nn.filesAndDirectories));
    toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
    toXmlItemBlock(doc, "Missing Blocks",
        Long.toString(nn.missingBlocksCount));
    toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " ("
        + nn.liveDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=LIVE"),
        "Live Datanode (Decommissioned)");
    toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " ("
        + nn.deadDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=DEAD"),
        "Dead Datanode (Decommissioned)");
    toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
    doc.endTag(); // node
  }
  doc.endTag(); // namenodes

  createNamenodeExceptionMsg(doc, nnExceptions);
  doc.endTag(); // cluster
  doc.getWriter().flush();
}
 
Example 11
Source File: HeartbeatManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized float getCapacityUsedPercent() {
  return DFSUtil.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
}
 
Example 12
Source File: HeartbeatManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized float getPercentBlockPoolUsed() {
  return DFSUtil.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal);
}