Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestDatanodeReport.java    License: Apache License 2.0 6 votes vote down vote up
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);
  
  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());
    
    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
 
Example 2
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){
	HashMap<String, Integer> disksPerHost = new HashMap<>();
	
	try {
		@SuppressWarnings("resource")
		DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf());
		
		DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL);
		
		for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
			disksPerHost.put(
					datanodeStorageReport.getDatanodeInfo().getHostName(),
					datanodeStorageReport.getStorageReports().length);
			
		}
	} catch (IOException e) {
		LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies).");
	}
	
	return disksPerHost;
}
 
Example 3
Source Project: big-c   Source File: TestDatanodeReport.java    License: Apache License 2.0 6 votes vote down vote up
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);
  
  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());
    
    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
 
Example 4
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
  try {
    return namenode.getDatanodeReport(type);
  } finally {
    scope.close();
  }
}
 
Example 5
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkOpen();
  TraceScope scope =
      Trace.startSpan("datanodeStorageReport", traceSampler);
  try {
    return namenode.getDatanodeStorageReport(type);
  } finally {
    scope.close();
  }
}
 
Example 6
Source Project: hadoop   Source File: FsckServlet.java    License: Apache License 2.0 5 votes vote down vote up
/** Handle fsck request */
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws IOException {
  @SuppressWarnings("unchecked")
  final Map<String,String[]> pmap = request.getParameterMap();
  final PrintWriter out = response.getWriter();
  final InetAddress remoteAddress = 
    InetAddress.getByName(request.getRemoteAddr());
  final ServletContext context = getServletContext();    
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);

  final UserGroupInformation ugi = getUGI(request, conf);
  try {
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
        
        final FSNamesystem namesystem = nn.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        final int totalDatanodes = 
            namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
        new NamenodeFsck(conf, nn,
            bm.getDatanodeManager().getNetworkTopology(), pmap, out,
            totalDatanodes, remoteAddress).fsck();
        
        return null;
      }
    });
  } catch (InterruptedException e) {
    response.sendError(400, e.getMessage());
  }
}
 
Example 7
Source Project: hadoop   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
  checkNNStartup();
  DatanodeInfo results[] = namesystem.datanodeReport(type);
  return results;
}
 
Example 8
Source Project: hadoop   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkNNStartup();
  final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
  return reports;
}
 
Example 9
Source Project: hadoop   Source File: DatanodeManager.java    License: Apache License 2.0 5 votes vote down vote up
/** @return list of datanodes where decommissioning is in progress. */
public List<DatanodeDescriptor> getDecommissioningNodes() {
  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  // A decommissioning DN may be "alive" or "dead".
  return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
}
 
Example 10
Source Project: hadoop   Source File: DatanodeManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live, 
    final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
  if (live == null && dead == null) {
    throw new HadoopIllegalArgumentException("Both live and dead lists are null");
  }

  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  final List<DatanodeDescriptor> results =
      getDatanodeListForReport(DatanodeReportType.ALL);
  for(DatanodeDescriptor node : results) {
    if (isDatanodeDead(node)) {
      if (dead != null) {
        dead.add(node);
      }
    } else {
      if (live != null) {
        live.add(node);
      }
    }
  }
  
  if (removeDecommissionNode) {
    if (live != null) {
      removeDecomNodeFromList(live);
    }
    if (dead != null) {
      removeDecomNodeFromList(dead);
    }
  }
}
 
Example 11
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static DatanodeReportTypeProto
  convert(DatanodeReportType t) {
  switch (t) {
  case ALL: return DatanodeReportTypeProto.ALL;
  case LIVE: return DatanodeReportTypeProto.LIVE;
  case DEAD: return DatanodeReportTypeProto.DEAD;
  case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
  default: 
    throw new IllegalArgumentException("Unexpected data type report:" + t);
  }
}
 
Example 12
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static DatanodeReportType 
  convert(DatanodeReportTypeProto t) {
  switch (t) {
  case ALL: return DatanodeReportType.ALL;
  case LIVE: return DatanodeReportType.LIVE;
  case DEAD: return DatanodeReportType.DEAD;
  case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
  default: 
    throw new IllegalArgumentException("Unexpected data type report:" + t);
  }
}
 
Example 13
Source Project: hadoop   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
    throws IOException {
  GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
      .newBuilder()
      .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convert(
        rpcProxy.getDatanodeReport(null, req).getDiList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 14
Source Project: hadoop   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
    throws IOException {
  final GetDatanodeStorageReportRequestProto req
      = GetDatanodeStorageReportRequestProto.newBuilder()
          .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convertDatanodeStorageReports(
        rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 15
Source Project: hadoop   Source File: TestPendingCorruptDnMessages.java    License: Apache License 2.0 5 votes vote down vote up
private static String getRegisteredDatanodeUid(
    MiniDFSCluster cluster, int nnIndex) {
  List<DatanodeDescriptor> registeredDatanodes = cluster.getNamesystem(nnIndex)
      .getBlockManager().getDatanodeManager()
      .getDatanodeListForReport(DatanodeReportType.ALL);
  assertEquals(1, registeredDatanodes.size());
  return registeredDatanodes.get(0).getDatanodeUuid();
}
 
Example 16
Source Project: hadoop   Source File: TestDecommissioningStatus.java    License: Apache License 2.0 5 votes vote down vote up
private String decommissionNode(FSNamesystem namesystem, DFSClient client,
    FileSystem localFileSys, int nodeIndex) throws IOException {
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

  String nodename = info[nodeIndex].getXferAddr();
  decommissionNode(namesystem, localFileSys, nodename);
  return nodename;
}
 
Example 17
Source Project: hadoop   Source File: TestBalancerWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Wait until balanced: each datanode gives utilization within 
 * BALANCE_ALLOWED_VARIANCE of average
 * @throws IOException
 * @throws TimeoutException
 */
private void waitForBalancer(long totalUsedSpace, long totalCapacity) 
throws IOException, TimeoutException {
  long timeout = TIMEOUT;
  long failtime = (timeout <= 0L) ? Long.MAX_VALUE
      : System.currentTimeMillis() + timeout;
  final double avgUtilization = ((double)totalUsedSpace) / totalCapacity;
  boolean balanced;
  do {
    DatanodeInfo[] datanodeReport = 
        client.getDatanodeReport(DatanodeReportType.ALL);
    assertEquals(datanodeReport.length, cluster.getDataNodes().size());
    balanced = true;
    for (DatanodeInfo datanode : datanodeReport) {
      double nodeUtilization = ((double)datanode.getDfsUsed())
          / datanode.getCapacity();
      if (Math.abs(avgUtilization - nodeUtilization) >
          BALANCE_ALLOWED_VARIANCE) {
        balanced = false;
        if (System.currentTimeMillis() > failtime) {
          throw new TimeoutException(
              "Rebalancing expected avg utilization to become "
              + avgUtilization + ", but on datanode " + datanode
              + " it remains at " + nodeUtilization
              + " after more than " + TIMEOUT + " msec.");
        }
        try {
          Thread.sleep(100);
        } catch (InterruptedException ignored) {
        }
        break;
      }
    }
  } while (!balanced);
}
 
Example 18
Source Project: hadoop   Source File: TestDecommission.java    License: Apache License 2.0 5 votes vote down vote up
public void testHostsFile(int numNameNodes) throws IOException,
    InterruptedException {
  int numDatanodes = 1;
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
      .numDataNodes(numDatanodes).setupHostsFile(true).build();
  cluster.waitActive();
  
  // Now empty hosts file and ensure the datanode is disallowed
  // from talking to namenode, resulting in it's shutdown.
  ArrayList<String>list = new ArrayList<String>();
  final String bogusIp = "127.0.30.1";
  list.add(bogusIp);
  writeConfigFile(hostsFile, list);
  
  for (int j = 0; j < numNameNodes; j++) {
    refreshNodes(cluster.getNamesystem(j), conf);
    
    DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    for (int i = 0 ; i < 5 && info.length != 0; i++) {
      LOG.info("Waiting for datanode to be marked dead");
      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
      info = client.datanodeReport(DatanodeReportType.LIVE);
    }
    assertEquals("Number of live nodes should be 0", 0, info.length);
    
    // Test that non-live and bogus hostnames are considered "dead".
    // The dead report should have an entry for (1) the DN  that is
    // now considered dead because it is no longer allowed to connect
    // and (2) the bogus entry in the hosts file (these entries are
    // always added last)
    info = client.datanodeReport(DatanodeReportType.DEAD);
    assertEquals("There should be 2 dead nodes", 2, info.length);
    DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
    assertEquals(id.getHostName(), info[0].getHostName());
    assertEquals(bogusIp, info[1].getHostName());
  }
}
 
Example 19
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
  try {
    return namenode.getDatanodeReport(type);
  } finally {
    scope.close();
  }
}
 
Example 20
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkOpen();
  TraceScope scope =
      Trace.startSpan("datanodeStorageReport", traceSampler);
  try {
    return namenode.getDatanodeStorageReport(type);
  } finally {
    scope.close();
  }
}
 
Example 21
Source Project: big-c   Source File: FsckServlet.java    License: Apache License 2.0 5 votes vote down vote up
/** Handle fsck request */
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws IOException {
  @SuppressWarnings("unchecked")
  final Map<String,String[]> pmap = request.getParameterMap();
  final PrintWriter out = response.getWriter();
  final InetAddress remoteAddress = 
    InetAddress.getByName(request.getRemoteAddr());
  final ServletContext context = getServletContext();    
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);

  final UserGroupInformation ugi = getUGI(request, conf);
  try {
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
        
        final FSNamesystem namesystem = nn.getNamesystem();
        final BlockManager bm = namesystem.getBlockManager();
        final int totalDatanodes = 
            namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
        new NamenodeFsck(conf, nn,
            bm.getDatanodeManager().getNetworkTopology(), pmap, out,
            totalDatanodes, remoteAddress).fsck();
        
        return null;
      }
    });
  } catch (InterruptedException e) {
    response.sendError(400, e.getMessage());
  }
}
 
Example 22
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
  checkNNStartup();
  DatanodeInfo results[] = namesystem.datanodeReport(type);
  return results;
}
 
Example 23
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkNNStartup();
  final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
  return reports;
}
 
Example 24
Source Project: big-c   Source File: DatanodeManager.java    License: Apache License 2.0 5 votes vote down vote up
/** @return list of datanodes where decommissioning is in progress. */
public List<DatanodeDescriptor> getDecommissioningNodes() {
  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  // A decommissioning DN may be "alive" or "dead".
  return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
}
 
Example 25
Source Project: big-c   Source File: DatanodeManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live, 
    final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
  if (live == null && dead == null) {
    throw new HadoopIllegalArgumentException("Both live and dead lists are null");
  }

  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  final List<DatanodeDescriptor> results =
      getDatanodeListForReport(DatanodeReportType.ALL);
  for(DatanodeDescriptor node : results) {
    if (isDatanodeDead(node)) {
      if (dead != null) {
        dead.add(node);
      }
    } else {
      if (live != null) {
        live.add(node);
      }
    }
  }
  
  if (removeDecommissionNode) {
    if (live != null) {
      removeDecomNodeFromList(live);
    }
    if (dead != null) {
      removeDecomNodeFromList(dead);
    }
  }
}
 
Example 26
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static DatanodeReportTypeProto
  convert(DatanodeReportType t) {
  switch (t) {
  case ALL: return DatanodeReportTypeProto.ALL;
  case LIVE: return DatanodeReportTypeProto.LIVE;
  case DEAD: return DatanodeReportTypeProto.DEAD;
  case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
  default: 
    throw new IllegalArgumentException("Unexpected data type report:" + t);
  }
}
 
Example 27
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static DatanodeReportType 
  convert(DatanodeReportTypeProto t) {
  switch (t) {
  case ALL: return DatanodeReportType.ALL;
  case LIVE: return DatanodeReportType.LIVE;
  case DEAD: return DatanodeReportType.DEAD;
  case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
  default: 
    throw new IllegalArgumentException("Unexpected data type report:" + t);
  }
}
 
Example 28
Source Project: big-c   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
    throws IOException {
  GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
      .newBuilder()
      .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convert(
        rpcProxy.getDatanodeReport(null, req).getDiList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 29
Source Project: big-c   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
    throws IOException {
  final GetDatanodeStorageReportRequestProto req
      = GetDatanodeStorageReportRequestProto.newBuilder()
          .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convertDatanodeStorageReports(
        rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 30
Source Project: big-c   Source File: TestPendingCorruptDnMessages.java    License: Apache License 2.0 5 votes vote down vote up
private static String getRegisteredDatanodeUid(
    MiniDFSCluster cluster, int nnIndex) {
  List<DatanodeDescriptor> registeredDatanodes = cluster.getNamesystem(nnIndex)
      .getBlockManager().getDatanodeManager()
      .getDatanodeListForReport(DatanodeReportType.ALL);
  assertEquals(1, registeredDatanodes.size());
  return registeredDatanodes.get(0).getDatanodeUuid();
}