Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#getPercentRemaining()

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil#getPercentRemaining() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DatanodeInfo.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** The remaining space as percentage of configured capacity. */
public float getRemainingPercent() { 
  return DFSUtil.getPercentRemaining(remaining, capacity);
}
 
Example 2
Source File: ClusterJspHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void toXML(XMLOutputter doc) throws IOException {
  if (error != null) {
    // general exception, only print exception message onto web page.
    createGeneralException(doc, clusterid,
        StringUtils.stringifyException(error));
    doc.getWriter().flush();
    return;
  }
  
  int size = nnList.size();
  long total = 0L, free = 0L, nonDfsUsed = 0l;
  float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f;
  if (size > 0) {
    total = total_sum / size;
    free = free_sum / size;
    nonDfsUsed = nonDfsUsed_sum / size;
    dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total);
    dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total);
  }

  doc.startTag("cluster");
  doc.attribute("clusterId", clusterid);

  doc.startTag("storage");

  toXmlItemBlock(doc, "Total Files And Directories",
      Long.toString(totalFilesAndDirectories));

  toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total));

  toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed));

  toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed));

  toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));

  // dfsUsedPercent
  toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));

  // dfsRemainingPercent
  toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));

  doc.endTag(); // storage

  doc.startTag("namenodes");
  // number of namenodes
  toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size));

  for (NamenodeStatus nn : nnList) {
    doc.startTag("node");
    toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
    toXmlItemBlock(doc, "Blockpool Used",
        StringUtils.byteDesc(nn.bpUsed));
    toXmlItemBlock(doc, "Blockpool Used%",
        DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total)));
    toXmlItemBlock(doc, "Files And Directories",
        Long.toString(nn.filesAndDirectories));
    toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
    toXmlItemBlock(doc, "Missing Blocks",
        Long.toString(nn.missingBlocksCount));
    toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " ("
        + nn.liveDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=LIVE"),
        "Live Datanode (Decommissioned)");
    toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " ("
        + nn.deadDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=DEAD"),
        "Dead Datanode (Decommissioned)");
    toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
    doc.endTag(); // node
  }
  doc.endTag(); // namenodes

  createNamenodeExceptionMsg(doc, nnExceptions);
  doc.endTag(); // cluster
  doc.getWriter().flush();
}
 
Example 3
Source File: HeartbeatManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized float getCapacityRemainingPercent() {
  return DFSUtil.getPercentRemaining(
      stats.capacityRemaining, stats.capacityTotal);
}
 
Example 4
Source File: DatanodeInfo.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** The remaining space as percentage of configured capacity. */
public float getRemainingPercent() { 
  return DFSUtil.getPercentRemaining(remaining, capacity);
}
 
Example 5
Source File: ClusterJspHelper.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void toXML(XMLOutputter doc) throws IOException {
  if (error != null) {
    // general exception, only print exception message onto web page.
    createGeneralException(doc, clusterid,
        StringUtils.stringifyException(error));
    doc.getWriter().flush();
    return;
  }
  
  int size = nnList.size();
  long total = 0L, free = 0L, nonDfsUsed = 0l;
  float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f;
  if (size > 0) {
    total = total_sum / size;
    free = free_sum / size;
    nonDfsUsed = nonDfsUsed_sum / size;
    dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total);
    dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total);
  }

  doc.startTag("cluster");
  doc.attribute("clusterId", clusterid);

  doc.startTag("storage");

  toXmlItemBlock(doc, "Total Files And Directories",
      Long.toString(totalFilesAndDirectories));

  toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total));

  toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed));

  toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed));

  toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));

  // dfsUsedPercent
  toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent));

  // dfsRemainingPercent
  toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent));

  doc.endTag(); // storage

  doc.startTag("namenodes");
  // number of namenodes
  toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size));

  for (NamenodeStatus nn : nnList) {
    doc.startTag("node");
    toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
    toXmlItemBlock(doc, "Blockpool Used",
        StringUtils.byteDesc(nn.bpUsed));
    toXmlItemBlock(doc, "Blockpool Used%",
        DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total)));
    toXmlItemBlock(doc, "Files And Directories",
        Long.toString(nn.filesAndDirectories));
    toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
    toXmlItemBlock(doc, "Missing Blocks",
        Long.toString(nn.missingBlocksCount));
    toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " ("
        + nn.liveDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=LIVE"),
        "Live Datanode (Decommissioned)");
    toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " ("
        + nn.deadDecomCount + ")", new URL(nn.httpAddress,
        "/dfsnodelist.jsp?whatNodes=DEAD"),
        "Dead Datanode (Decommissioned)");
    toXmlItemBlock(doc, "Software Version", nn.softwareVersion);
    doc.endTag(); // node
  }
  doc.endTag(); // namenodes

  createNamenodeExceptionMsg(doc, nnExceptions);
  doc.endTag(); // cluster
  doc.getWriter().flush();
}
 
Example 6
Source File: HeartbeatManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized float getCapacityRemainingPercent() {
  return DFSUtil.getPercentRemaining(
      stats.capacityRemaining, stats.capacityTotal);
}
 
Example 7
Source File: DatanodeInfo.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * @return Cache remaining as a percentage of the datanode's total cache
 * capacity
 */
public float getCacheRemainingPercent() {
  return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity);
}
 
Example 8
Source File: DatanodeInfo.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * @return Cache remaining as a percentage of the datanode's total cache
 * capacity
 */
public float getCacheRemainingPercent() {
  return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity);
}