Java Code Examples for org.apache.hadoop.net.NetUtils#getHostNameOfIP()

The following examples show how to use org.apache.hadoop.net.NetUtils#getHostNameOfIP() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: hadoop   File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Display each rack and the nodes assigned to that rack, as determined
 * by the NameNode, in a hierarchical manner.  The nodes and racks are
 * sorted alphabetically.
 * 
 * @throws IOException If an error while getting datanode report
 */
public int printTopology() throws IOException {
    DistributedFileSystem dfs = getDFS();
    final DatanodeInfo[] report = dfs.getDataNodeStats();

    // Build a map of rack -> nodes from the datanode report
    HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
    for(DatanodeInfo dni : report) {
      String location = dni.getNetworkLocation();
      String name = dni.getName();
      
      if(!tree.containsKey(location)) {
        tree.put(location, new TreeSet<String>());
      }
      
      tree.get(location).add(name);
    }
    
    // Sort the racks (and nodes) alphabetically, display in order
    ArrayList<String> racks = new ArrayList<String>(tree.keySet());
    Collections.sort(racks);
    
    for(String r : racks) {
      System.out.println("Rack: " + r);
      TreeSet<String> nodes = tree.get(r);

      for(String n : nodes) {
        System.out.print("   " + n);
        String hostname = NetUtils.getHostNameOfIP(n);
        if(hostname != null)
          System.out.print(" (" + hostname + ")");
        System.out.println();
      }

      System.out.println();
    }
  return 0;
}
 
Example 2
Source Project: big-c   File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Display each rack and the nodes assigned to that rack, as determined
 * by the NameNode, in a hierarchical manner.  The nodes and racks are
 * sorted alphabetically.
 * 
 * @throws IOException If an error while getting datanode report
 */
public int printTopology() throws IOException {
    DistributedFileSystem dfs = getDFS();
    final DatanodeInfo[] report = dfs.getDataNodeStats();

    // Build a map of rack -> nodes from the datanode report
    HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
    for(DatanodeInfo dni : report) {
      String location = dni.getNetworkLocation();
      String name = dni.getName();
      
      if(!tree.containsKey(location)) {
        tree.put(location, new TreeSet<String>());
      }
      
      tree.get(location).add(name);
    }
    
    // Sort the racks (and nodes) alphabetically, display in order
    ArrayList<String> racks = new ArrayList<String>(tree.keySet());
    Collections.sort(racks);
    
    for(String r : racks) {
      System.out.println("Rack: " + r);
      TreeSet<String> nodes = tree.get(r);

      for(String n : nodes) {
        System.out.print("   " + n);
        String hostname = NetUtils.getHostNameOfIP(n);
        if(hostname != null)
          System.out.print(" (" + hostname + ")");
        System.out.println();
      }

      System.out.println();
    }
  return 0;
}
 
Example 3
Source Project: hadoop   File: DatanodeInfo.java    License: Apache License 2.0 4 votes vote down vote up
/** A formatted string for reporting the status of the DataNode. */
public String getDatanodeReport() {
  StringBuilder buffer = new StringBuilder();
  long c = getCapacity();
  long r = getRemaining();
  long u = getDfsUsed();
  long nonDFSUsed = getNonDfsUsed();
  float usedPercent = getDfsUsedPercent();
  float remainingPercent = getRemainingPercent();
  long cc = getCacheCapacity();
  long cr = getCacheRemaining();
  long cu = getCacheUsed();
  float cacheUsedPercent = getCacheUsedPercent();
  float cacheRemainingPercent = getCacheRemainingPercent();
  String lookupName = NetUtils.getHostNameOfIP(getName());

  buffer.append("Name: "+ getName());
  if (lookupName != null) {
    buffer.append(" (" + lookupName + ")");
  }
  buffer.append("\n");
  buffer.append("Hostname: " + getHostName() + "\n");

  if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
    buffer.append("Rack: "+location+"\n");
  }
  buffer.append("Decommission Status : ");
  if (isDecommissioned()) {
    buffer.append("Decommissioned\n");
  } else if (isDecommissionInProgress()) {
    buffer.append("Decommission in progress\n");
  } else {
    buffer.append("Normal\n");
  }
  buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n");
  buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
  buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
  buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
  buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
  buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
  buffer.append("Configured Cache Capacity: "+cc+" ("+StringUtils.byteDesc(cc)+")"+"\n");
  buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(cu)+")"+"\n");
  buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
  buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
  buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
  buffer.append("Xceivers: "+getXceiverCount()+"\n");
  buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
  return buffer.toString();
}
 
Example 4
Source Project: big-c   File: DatanodeInfo.java    License: Apache License 2.0 4 votes vote down vote up
/** A formatted string for reporting the status of the DataNode. */
public String getDatanodeReport() {
  StringBuilder buffer = new StringBuilder();
  long c = getCapacity();
  long r = getRemaining();
  long u = getDfsUsed();
  long nonDFSUsed = getNonDfsUsed();
  float usedPercent = getDfsUsedPercent();
  float remainingPercent = getRemainingPercent();
  long cc = getCacheCapacity();
  long cr = getCacheRemaining();
  long cu = getCacheUsed();
  float cacheUsedPercent = getCacheUsedPercent();
  float cacheRemainingPercent = getCacheRemainingPercent();
  String lookupName = NetUtils.getHostNameOfIP(getName());

  buffer.append("Name: "+ getName());
  if (lookupName != null) {
    buffer.append(" (" + lookupName + ")");
  }
  buffer.append("\n");
  buffer.append("Hostname: " + getHostName() + "\n");

  if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
    buffer.append("Rack: "+location+"\n");
  }
  buffer.append("Decommission Status : ");
  if (isDecommissioned()) {
    buffer.append("Decommissioned\n");
  } else if (isDecommissionInProgress()) {
    buffer.append("Decommission in progress\n");
  } else {
    buffer.append("Normal\n");
  }
  buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n");
  buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
  buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
  buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
  buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n");
  buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n");
  buffer.append("Configured Cache Capacity: "+cc+" ("+StringUtils.byteDesc(cc)+")"+"\n");
  buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(cu)+")"+"\n");
  buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
  buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
  buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
  buffer.append("Xceivers: "+getXceiverCount()+"\n");
  buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
  return buffer.toString();
}