org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDatanodeReport.java    From big-c with Apache License 2.0 6 votes vote down vote up
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);
  
  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());
    
    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
 
Example #2
Source File: TestDatanodeReport.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static void assertReports(int numDatanodes, DatanodeReportType type,
    DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
  final DatanodeInfo[] infos = client.datanodeReport(type);
  assertEquals(numDatanodes, infos.length);
  final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
  assertEquals(numDatanodes, reports.length);
  
  for(int i = 0; i < infos.length; i++) {
    assertEquals(infos[i], reports[i].getDatanodeInfo());
    
    final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
    if (bpid != null) {
      //check storage
      final StorageReport[] computed = reports[i].getStorageReports();
      Arrays.sort(computed, CMP);
      final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
      Arrays.sort(expected, CMP);

      assertEquals(expected.length, computed.length);
      for(int j = 0; j < expected.length; j++) {
        assertEquals(expected[j].getStorage().getStorageID(),
                     computed[j].getStorage().getStorageID());
      }
    }
  }
}
 
Example #3
Source File: DistributedFileSystemMetadata.java    From hdfs-metadata with GNU General Public License v3.0 6 votes vote down vote up
public HashMap<String, Integer> getNumberOfDataDirsPerHost(){
	HashMap<String, Integer> disksPerHost = new HashMap<>();
	
	try {
		@SuppressWarnings("resource")
		DFSClient dfsClient = new DFSClient(NameNode.getAddress(getConf()), getConf());
		
		DatanodeStorageReport[] datanodeStorageReports = dfsClient.getDatanodeStorageReport(DatanodeReportType.ALL);
		
		for (DatanodeStorageReport datanodeStorageReport : datanodeStorageReports) {
			disksPerHost.put(
					datanodeStorageReport.getDatanodeInfo().getHostName(),
					datanodeStorageReport.getStorageReports().length);
			
		}
	} catch (IOException e) {
		LOG.warn("number of data directories (disks) per node could not be collected (requieres higher privilegies).");
	}
	
	return disksPerHost;
}
 
Example #4
Source File: Dispatcher.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Get live datanode storage reports and then build the network topology. */
public List<DatanodeStorageReport> init() throws IOException {
  final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
  final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>(); 
  // create network topology and classify utilization collections:
  // over-utilized, above-average, below-average and under-utilized.
  for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
    final DatanodeInfo datanode = r.getDatanodeInfo();
    if (shouldIgnore(datanode)) {
      continue;
    }
    trimmed.add(r);
    cluster.add(datanode);
  }
  return trimmed;
}
 
Example #5
Source File: Dispatcher.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Get live datanode storage reports and then build the network topology. */
public List<DatanodeStorageReport> init() throws IOException {
  final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
  final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>(); 
  // create network topology and classify utilization collections:
  // over-utilized, above-average, below-average and under-utilized.
  for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
    final DatanodeInfo datanode = r.getDatanodeInfo();
    if (shouldIgnore(datanode)) {
      continue;
    }
    trimmed.add(r);
    cluster.add(datanode);
  }
  return trimmed;
}
 
Example #6
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DatanodeStorageReportProto convertDatanodeStorageReport(
    DatanodeStorageReport report) {
  return DatanodeStorageReportProto.newBuilder()
      .setDatanodeInfo(convert(report.getDatanodeInfo()))
      .addAllStorageReports(convertStorageReports(report.getStorageReports()))
      .build();
}
 
Example #7
Source File: BalancingPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
  long capacity = 0L;
  long blockPoolUsed = 0L;
  for(StorageReport s : r.getStorageReports()) {
    if (s.getStorage().getStorageType() == t) {
      capacity += s.getCapacity();
      blockPoolUsed += s.getBlockPoolUsed();
    }
  }
  return capacity == 0L? null: blockPoolUsed*100.0/capacity;
}
 
Example #8
Source File: BalancingPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
void accumulateSpaces(DatanodeStorageReport r) {
  for(StorageReport s : r.getStorageReports()) {
    final StorageType t = s.getStorage().getStorageType();
    totalCapacities.add(t, s.getCapacity());
    totalUsedSpaces.add(t, s.getBlockPoolUsed());
  }
}
 
Example #9
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
    DatanodeStorageReport[] reports) {
  final List<DatanodeStorageReportProto> protos
      = new ArrayList<DatanodeStorageReportProto>(reports.length);
  for(int i = 0; i < reports.length; i++) {
    protos.add(convertDatanodeStorageReport(reports[i]));
  }
  return protos;
}
 
Example #10
Source File: BalancingPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
  long capacity = 0L;
  long dfsUsed = 0L;
  for(StorageReport s : r.getStorageReports()) {
    if (s.getStorage().getStorageType() == t) {
      capacity += s.getCapacity();
      dfsUsed += s.getDfsUsed();
    }
  }
  return capacity == 0L? null: dfsUsed*100.0/capacity;
}
 
Example #11
Source File: BalancingPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
void accumulateSpaces(DatanodeStorageReport r) {
  for(StorageReport s : r.getStorageReports()) {
    final StorageType t = s.getStorage().getStorageType();
    totalCapacities.add(t, s.getCapacity());
    totalUsedSpaces.add(t, s.getDfsUsed());
  }
}
 
Example #12
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DatanodeStorageReport[] convertDatanodeStorageReports(
    List<DatanodeStorageReportProto> protos) {
  final DatanodeStorageReport[] reports
      = new DatanodeStorageReport[protos.size()];
  for(int i = 0; i < reports.length; i++) {
    reports[i] = convertDatanodeStorageReport(protos.get(i));
  }
  return reports;
}
 
Example #13
Source File: Balancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static long getRemaining(DatanodeStorageReport report, StorageType t) {
  long remaining = 0L;
  for(StorageReport r : report.getStorageReports()) {
    if (r.getStorage().getStorageType() == t) {
      remaining += r.getRemaining();
    }
  }
  return remaining;
}
 
Example #14
Source File: Balancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
  long capacity = 0L;
  for(StorageReport r : report.getStorageReports()) {
    if (r.getStorage().getStorageType() == t) {
      capacity += r.getCapacity();
    }
  }
  return capacity;
}
 
Example #15
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkNNStartup();
  final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
  return reports;
}
 
Example #16
Source File: Mover.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static long getMaxRemaining(DatanodeStorageReport report, StorageType t) {
  long max = 0L;
  for(StorageReport r : report.getStorageReports()) {
    if (r.getStorage().getStorageType() == t) {
      if (r.getRemaining() > max) {
        max = r.getRemaining();
      }
    }
  }
  return max;
}
 
Example #17
Source File: Mover.java    From big-c with Apache License 2.0 5 votes vote down vote up
void init() throws IOException {
  initStoragePolicies();
  final List<DatanodeStorageReport> reports = dispatcher.init();
  for(DatanodeStorageReport r : reports) {
    final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
    for(StorageType t : StorageType.getMovableTypes()) {
      final Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher);
      final long maxRemaining = getMaxRemaining(r, t);
      final StorageGroup target = maxRemaining > 0L ? dn.addTarget(t,
          maxRemaining) : null;
      storages.add(source, target);
    }
  }
}
 
Example #18
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkOpen();
  TraceScope scope =
      Trace.startSpan("datanodeStorageReport", traceSampler);
  try {
    return namenode.getDatanodeStorageReport(type);
  } finally {
    scope.close();
  }
}
 
Example #19
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
    throws IOException {
  final GetDatanodeStorageReportRequestProto req
      = GetDatanodeStorageReportRequestProto.newBuilder()
          .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convertDatanodeStorageReports(
        rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #20
Source File: DFSClient.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public DatanodeStorageReport[] getDatanodeStorageReport(
    DatanodeReportType type) throws IOException {
  checkOpen();
  TraceScope scope =
      Trace.startSpan("datanodeStorageReport", traceSampler);
  try {
    return namenode.getDatanodeStorageReport(type);
  } finally {
    scope.close();
  }
}
 
Example #21
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DatanodeStorageReport[] convertDatanodeStorageReports(
    List<DatanodeStorageReportProto> protos) {
  final DatanodeStorageReport[] reports
      = new DatanodeStorageReport[protos.size()];
  for(int i = 0; i < reports.length; i++) {
    reports[i] = convertDatanodeStorageReport(protos.get(i));
  }
  return reports;
}
 
Example #22
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
    throws IOException {
  final GetDatanodeStorageReportRequestProto req
      = GetDatanodeStorageReportRequestProto.newBuilder()
          .setType(PBHelper.convert(type)).build();
  try {
    return PBHelper.convertDatanodeStorageReports(
        rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #23
Source File: Mover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void init() throws IOException {
  initStoragePolicies();
  final List<DatanodeStorageReport> reports = dispatcher.init();
  for(DatanodeStorageReport r : reports) {
    final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
    for(StorageType t : StorageType.getMovableTypes()) {
      final Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher);
      final long maxRemaining = getMaxRemaining(r, t);
      final StorageGroup target = maxRemaining > 0L ? dn.addTarget(t,
          maxRemaining) : null;
      storages.add(source, target);
    }
  }
}
 
Example #24
Source File: Mover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static long getMaxRemaining(DatanodeStorageReport report, StorageType t) {
  long max = 0L;
  for(StorageReport r : report.getStorageReports()) {
    if (r.getStorage().getStorageType() == t) {
      if (r.getRemaining() > max) {
        max = r.getRemaining();
      }
    }
  }
  return max;
}
 
Example #25
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
    DatanodeStorageReport[] reports) {
  final List<DatanodeStorageReportProto> protos
      = new ArrayList<DatanodeStorageReportProto>(reports.length);
  for(int i = 0; i < reports.length; i++) {
    protos.add(convertDatanodeStorageReport(reports[i]));
  }
  return protos;
}
 
Example #26
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DatanodeStorageReportProto convertDatanodeStorageReport(
    DatanodeStorageReport report) {
  return DatanodeStorageReportProto.newBuilder()
      .setDatanodeInfo(convert(report.getDatanodeInfo()))
      .addAllStorageReports(convertStorageReports(report.getStorageReports()))
      .build();
}
 
Example #27
Source File: BalancingPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
  long capacity = 0L;
  long blockPoolUsed = 0L;
  for(StorageReport s : r.getStorageReports()) {
    if (s.getStorage().getStorageType() == t) {
      capacity += s.getCapacity();
      blockPoolUsed += s.getBlockPoolUsed();
    }
  }
  return capacity == 0L? null: blockPoolUsed*100.0/capacity;
}
 
Example #28
Source File: BalancingPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
void accumulateSpaces(DatanodeStorageReport r) {
  for(StorageReport s : r.getStorageReports()) {
    final StorageType t = s.getStorage().getStorageType();
    totalCapacities.add(t, s.getCapacity());
    totalUsedSpaces.add(t, s.getBlockPoolUsed());
  }
}
 
Example #29
Source File: BalancingPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
  long capacity = 0L;
  long dfsUsed = 0L;
  for(StorageReport s : r.getStorageReports()) {
    if (s.getStorage().getStorageType() == t) {
      capacity += s.getCapacity();
      dfsUsed += s.getDfsUsed();
    }
  }
  return capacity == 0L? null: dfsUsed*100.0/capacity;
}
 
Example #30
Source File: BalancingPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
void accumulateSpaces(DatanodeStorageReport r) {
  for(StorageReport s : r.getStorageReports()) {
    final StorageType t = s.getStorage().getStorageType();
    totalCapacities.add(t, s.getCapacity());
    totalUsedSpaces.add(t, s.getDfsUsed());
  }
}