Java Code Examples for org.apache.hadoop.yarn.api.records.NodeReport#getUsed()

The following examples show how to use org.apache.hadoop.yarn.api.records.NodeReport#getUsed() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractYarnClusterDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
Example 2
Source File: AbstractYarnClusterDescriptor.java    From flink with Apache License 2.0 6 votes vote down vote up
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
Example 3
Source File: YarnClusterDescriptor.java    From flink with Apache License 2.0 6 votes vote down vote up
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
Example 4
Source File: ClusterProfilingHelper.java    From jumbune with GNU Lesser General Public License v3.0 6 votes vote down vote up
/**
 * Get the available v cores in cluster.
 * @param rmCommunicator 
 *
 * @return the available v cores in cluster
 * @throws IOException Signals that an I/O exception has occurred.
 */
private int getAvailableVCoresInCluster(RMCommunicator rmCommunicator) throws IOException {
	List<NodeReport> nodeReports = null;
	try {
		nodeReports = rmCommunicator.getNodeReports();
	} catch (YarnException e) {
		LOGGER.error(JumbuneRuntimeException.throwYarnException(e.getStackTrace()));
	}
	Set<String> hostname = new HashSet<String>();
	
	int totalVCores = 0;
	int usedVCores = 0;
	for(NodeReport report: nodeReports){
		if(!hostname.contains(report.getHttpAddress())  && report.getNodeState().equals(NodeState.RUNNING)){
		hostname.add(report.getHttpAddress());
		totalVCores += report.getCapability().getVirtualCores();
		if(report.getUsed()!=null){
			usedVCores += report.getUsed().getVirtualCores();
			}
		}
	}
	int availableVCores = totalVCores - usedVCores;
	return availableVCores ;
}
 
Example 5
Source File: ClusterProfilingHelper.java    From jumbune with GNU Lesser General Public License v3.0 6 votes vote down vote up
/**
 * Get the total memory available in cluster.
 * @param rmCommunicator 
 *
 * @return the total memory available in cluster
 * @throws YarnException the yarn exception
 * @throws IOException Signals that an I/O exception has occurred.
 */
private int getTotalMemoryAvailableInCluster(RMCommunicator rmCommunicator) throws YarnException, IOException{
	List<NodeReport> reports=rmCommunicator.getNodeReports();
	int availableMemory=0;
	
	Set<String> hostname = new HashSet<String>();
	
	for (NodeReport nodeReport : reports) {
		if(!hostname.contains(nodeReport.getHttpAddress())  && nodeReport.getNodeState().equals(NodeState.RUNNING)){
		hostname.add(nodeReport.getHttpAddress());
		availableMemory+=nodeReport.getCapability().getMemory() - 
				(nodeReport.getUsed()==null?0:nodeReport.getUsed().getMemory());
		}
	}
	return availableMemory;
}
 
Example 6
Source File: PlacementPolicyTestRun.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Helper function to verify DISTRIBUTED placement policies.
 * Returns the number of NodeManagers on which runnables got provisioned.
 * @return number of NodeManagers on which runnables got provisioned.
 */
private int getProvisionedNodeManagerCount() throws Exception {
  int provisionedNodeManagerCount = 0;
  for (NodeReport nodeReport : getNodeReports()) {
    Resource used = nodeReport.getUsed();
    if (used != null && used.getMemory() > 0) {
        provisionedNodeManagerCount++;
    }
  }
  return provisionedNodeManagerCount;
}
 
Example 7
Source File: PlacementPolicyTestRun.java    From twill with Apache License 2.0 4 votes vote down vote up
/**
 * Verify the cluster configuration (number and capability of node managers) required for the tests.
 */
@BeforeClass
public static void verifyClusterCapability() throws InterruptedException {
  // Ignore verifications if it is running against older Hadoop versions which does not support blacklists.
  Assume.assumeTrue(YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_22));

  // All runnables in this test class use same resource specification for the sake of convenience.
  resource = ResourceSpecification.Builder.with()
    .setVirtualCores(RUNNABLE_CORES)
    .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
    .build();
  twoInstancesResource = ResourceSpecification.Builder.with()
    .setVirtualCores(RUNNABLE_CORES)
    .setMemory(RUNNABLE_MEMORY, ResourceSpecification.SizeUnit.MEGA)
    .setInstances(2)
    .build();

  // The tests need exactly three NodeManagers in the cluster.
  int trials = 0;
  while (trials++ < 20) {
    try {
      nodeReports = TWILL_TESTER.getNodeReports();
      if (nodeReports != null && nodeReports.size() == 3) {
        break;
      }
    } catch (Exception e) {
      LOG.error("Failed to get node reports", e);
    }
    LOG.warn("NodeManagers != 3. {}", nodeReports);
    TimeUnit.SECONDS.sleep(1);
  }

  // All NodeManagers should have enough capacity available to accommodate at least two runnables.
  for (NodeReport nodeReport : nodeReports) {
    Resource capability = nodeReport.getCapability();
    Resource used = nodeReport.getUsed();
    Assert.assertNotNull(capability);
    if (used != null) {
      Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory() - used.getMemory());
    } else {
      Assert.assertTrue(2 * resource.getMemorySize() < capability.getMemory());
    }
  }
}