Java Code Examples for org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse#getAllocatedContainers()

The following examples show how to use org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse#getAllocatedContainers() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Hadoop21YarnAMClient.java    From twill with Apache License 2.0 5 votes vote down vote up
@Override
protected AllocateResult doAllocate(float progress) throws Exception {
  AllocateResponse allocateResponse = amrmClient.allocate(progress);
  List<RunnableProcessLauncher> launchers
    = Lists.newArrayListWithCapacity(allocateResponse.getAllocatedContainers().size());

  for (Container container : allocateResponse.getAllocatedContainers()) {
    launchers.add(new RunnableProcessLauncher(new Hadoop21YarnContainerInfo(container), nmClient));
  }

  List<YarnContainerStatus> completed = ImmutableList.copyOf(
    Iterables.transform(allocateResponse.getCompletedContainersStatuses(), STATUS_TRANSFORM));

  return new AllocateResult(launchers, completed);
}
 
Example 2
Source File: TestFifoScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void test() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
  MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB);

  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());

  RMApp app2 = rm.submitApp(2048);
  // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
  nm2.nodeHeartbeat(true);
  RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
  MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
  am2.registerAppAttempt();
  SchedulerNodeReport report_nm2 = rm.getResourceScheduler().getNodeReport(
      nm2.getNodeId());
  Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request
  // add request for containers
  am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1);
  AllocateResponse alloc2Response = am2.schedule(); // send the request

  // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }
  while (alloc2Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 2...");
    Thread.sleep(1000);
    alloc2Response = am2.schedule();
  }
  // kick the scheduler, nothing given remaining 2 GB.
  nm2.nodeHeartbeat(true);

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());

  List<Container> allocated2 = alloc2Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated2.size());
  Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory());

  Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());

  Container c1 = allocated1.get(0);
  Assert.assertEquals(GB, c1.getResource().getMemory());
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(1000);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemory());

  rm.stop();
}
 
Example 3
Source File: TestFifoScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testResourceOverCommit() throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = rm.adminService;
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  rm.stop();
}
 
Example 4
Source File: TestApplicationCleanup.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("resource")
@Test(timeout = 60000)
public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  // start RM
  MockRM rm1 = new MockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 1024, rm1.getResourceTrackerService());
  nm1.registerNode();
  MockNM nm2 =
      new MockNM("127.0.0.1:5678", 1024, rm1.getResourceTrackerService());
  nm2.registerNode();

  // create app and launch the AM
  RMApp app0 = rm1.submitApp(200);
  MockAM am0 = launchAM(app0, rm1, nm1);

  // alloc another container on nm2
  AllocateResponse allocResponse =
      am0.allocate(Arrays.asList(ResourceRequest.newInstance(
          Priority.newInstance(1), "*", Resource.newInstance(1024, 0, 0), 1)),
          null);
  while (null == allocResponse.getAllocatedContainers()
      || allocResponse.getAllocatedContainers().isEmpty()) {
    nm2.nodeHeartbeat(true);
    allocResponse = am0.allocate(null, null);
    Thread.sleep(1000);
  }

  // start new RM
  MockRM rm2 = new MockRM(conf, memStore);
  rm2.start();

  // nm1/nm2 register to rm2, and do a heartbeat
  nm1.setResourceTrackerService(rm2.getResourceTrackerService());
  nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
    ContainerId.newContainerId(am0.getApplicationAttemptId(), 1),
    ContainerState.COMPLETE, Resource.newInstance(1024, 1, 1), "", 0,
    Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
  nm2.setResourceTrackerService(rm2.getResourceTrackerService());
  nm2.registerNode(Arrays.asList(app0.getApplicationId()));

  // assert app state has been saved.
  rm2.waitForState(app0.getApplicationId(), RMAppState.FAILED);

  // wait for application cleanup message received on NM1
  waitForAppCleanupMessageRecved(nm1, app0.getApplicationId());

  // wait for application cleanup message received on NM2
  waitForAppCleanupMessageRecved(nm2, app0.getApplicationId());

  rm1.stop();
  rm2.stop();
}
 
Example 5
Source File: TestCapacityScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testResourceOverCommit() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(100);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = ((MockRM)rm).getAdminService();
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Verify no NPE is trigger in schedule after resource is updated.
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1);
  alloc1Response = am1.schedule();
  Assert.assertEquals("Shouldn't have enough resource to allocate containers",
      0, alloc1Response.getAllocatedContainers().size());
  int times = 0;
  // try 10 times as scheduling is async process.
  while (alloc1Response.getAllocatedContainers().size() < 1
      && times++ < 10) {
    LOG.info("Waiting for containers to be allocated for app 1... Tried "
        + times + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals("Shouldn't have enough resource to allocate containers",
      0, alloc1Response.getAllocatedContainers().size());
  rm.stop();
}
 
Example 6
Source File: TestNMClient.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private Set<Container> allocateContainers(
    AMRMClientImpl<ContainerRequest> rmClient, int num)
    throws YarnException, IOException {
  // setup container request
  Resource capability = Resource.newInstance(1024, 0);
  Priority priority = Priority.newInstance(0);
  String node = nodeReports.get(0).getNodeId().getHost();
  String rack = nodeReports.get(0).getRackName();
  String[] nodes = new String[] {node};
  String[] racks = new String[] {rack};

  for (int i = 0; i < num; ++i) {
    rmClient.addContainerRequest(new ContainerRequest(capability, nodes,
        racks, priority));
  }

  int containersRequestedAny = rmClient.remoteRequestsTable.get(priority)
      .get(ResourceRequest.ANY).get(capability).remoteRequest
      .getNumContainers();

  // RM should allocate container within 2 calls to allocate()
  int allocatedContainerCount = 0;
  int iterationsLeft = 2;
  Set<Container> containers = new TreeSet<Container>();
  while (allocatedContainerCount < containersRequestedAny
      && iterationsLeft > 0) {
    AllocateResponse allocResponse = rmClient.allocate(0.1f);

    allocatedContainerCount += allocResponse.getAllocatedContainers().size();
    for(Container container : allocResponse.getAllocatedContainers()) {
      containers.add(container);
    }
    if (!allocResponse.getNMTokens().isEmpty()) {
      for (NMToken token : allocResponse.getNMTokens()) {
        rmClient.getNMTokenCache().setToken(token.getNodeId().toString(),
            token.getToken());
      }
    }
    if(allocatedContainerCount < containersRequestedAny) {
      // sleep to let NM's heartbeat to RM and trigger allocations
      sleep(1000);
    }

    --iterationsLeft;
  }
  return containers;
}
 
Example 7
Source File: TestFifoScheduler.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void test() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
  MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB);

  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());

  RMApp app2 = rm.submitApp(2048);
  // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
  nm2.nodeHeartbeat(true);
  RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
  MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
  am2.registerAppAttempt();
  SchedulerNodeReport report_nm2 = rm.getResourceScheduler().getNodeReport(
      nm2.getNodeId());
  Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request
  // add request for containers
  am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1);
  AllocateResponse alloc2Response = am2.schedule(); // send the request

  // kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }
  while (alloc2Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 2...");
    Thread.sleep(1000);
    alloc2Response = am2.schedule();
  }
  // kick the scheduler, nothing given remaining 2 GB.
  nm2.nodeHeartbeat(true);

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());

  List<Container> allocated2 = alloc2Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated2.size());
  Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory());

  Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());

  Container c1 = allocated1.get(0);
  Assert.assertEquals(GB, c1.getResource().getMemory());
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(1000);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemory());

  rm.stop();
}
 
Example 8
Source File: TestFifoScheduler.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testResourceOverCommit() throws Exception {
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(1000);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = rm.adminService;
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  rm.stop();
}
 
Example 9
Source File: TestApplicationCleanup.java    From big-c with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("resource")
@Test(timeout = 60000)
public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  // start RM
  MockRM rm1 = new MockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 1024, rm1.getResourceTrackerService());
  nm1.registerNode();
  MockNM nm2 =
      new MockNM("127.0.0.1:5678", 1024, rm1.getResourceTrackerService());
  nm2.registerNode();

  // create app and launch the AM
  RMApp app0 = rm1.submitApp(200);
  MockAM am0 = launchAM(app0, rm1, nm1);

  // alloc another container on nm2
  AllocateResponse allocResponse =
      am0.allocate(Arrays.asList(ResourceRequest.newInstance(
          Priority.newInstance(1), "*", Resource.newInstance(1024, 0), 1)),
          null);
  while (null == allocResponse.getAllocatedContainers()
      || allocResponse.getAllocatedContainers().isEmpty()) {
    nm2.nodeHeartbeat(true);
    allocResponse = am0.allocate(null, null);
    Thread.sleep(1000);
  }

  // start new RM
  MockRM rm2 = new MockRM(conf, memStore);
  rm2.start();

  // nm1/nm2 register to rm2, and do a heartbeat
  nm1.setResourceTrackerService(rm2.getResourceTrackerService());
  nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
    ContainerId.newContainerId(am0.getApplicationAttemptId(), 1),
    ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0,
    Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
  nm2.setResourceTrackerService(rm2.getResourceTrackerService());
  nm2.registerNode(Arrays.asList(app0.getApplicationId()));

  // assert app state has been saved.
  rm2.waitForState(app0.getApplicationId(), RMAppState.FAILED);

  // wait for application cleanup message received on NM1
  waitForAppCleanupMessageRecved(nm1, app0.getApplicationId());

  // wait for application cleanup message received on NM2
  waitForAppCleanupMessageRecved(nm2, app0.getApplicationId());

  rm1.stop();
  rm2.stop();
}
 
Example 10
Source File: TestCapacityScheduler.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testResourceOverCommit() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();
  SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
      nm1.getNodeId());
  // check node report, 2 GB used and 2 GB available
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());

  // add request for containers
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
  AllocateResponse alloc1Response = am1.schedule(); // send the request

  // kick the scheduler, 2 GB given to AM1, resource remaining 0
  nm1.nodeHeartbeat(true);
  while (alloc1Response.getAllocatedContainers().size() < 1) {
    LOG.info("Waiting for containers to be created for app 1...");
    Thread.sleep(100);
    alloc1Response = am1.schedule();
  }

  List<Container> allocated1 = alloc1Response.getAllocatedContainers();
  Assert.assertEquals(1, allocated1.size());
  Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
  Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
  
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  // check node report, 4 GB used and 0 GB available
  Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());

  // check container is assigned with 2 GB.
  Container c1 = allocated1.get(0);
  Assert.assertEquals(2 * GB, c1.getResource().getMemory());
  
  // update node resource to 2 GB, so resource is over-consumed.
  Map<NodeId, ResourceOption> nodeResourceMap = 
      new HashMap<NodeId, ResourceOption>();
  nodeResourceMap.put(nm1.getNodeId(), 
      ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
  UpdateNodeResourceRequest request = 
      UpdateNodeResourceRequest.newInstance(nodeResourceMap);
  AdminService as = ((MockRM)rm).getAdminService();
  as.updateNodeResource(request);
  
  // Now, the used resource is still 4 GB, and available resource is minus value.
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
  Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Check container can complete successfully in case of resource over-commitment.
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      c1.getId(), ContainerState.COMPLETE, "", 0);
  nm1.containerStatus(containerStatus);
  int waitCount = 0;
  while (attempt1.getJustFinishedContainers().size() < 1
      && waitCount++ != 20) {
    LOG.info("Waiting for containers to be finished for app 1... Tried "
        + waitCount + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
  Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
  report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
  Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
  // As container return 2 GB back, the available resource becomes 0 again.
  Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
  
  // Verify no NPE is trigger in schedule after resource is updated.
  am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1);
  alloc1Response = am1.schedule();
  Assert.assertEquals("Shouldn't have enough resource to allocate containers",
      0, alloc1Response.getAllocatedContainers().size());
  int times = 0;
  // try 10 times as scheduling is async process.
  while (alloc1Response.getAllocatedContainers().size() < 1
      && times++ < 10) {
    LOG.info("Waiting for containers to be allocated for app 1... Tried "
        + times + " times already..");
    Thread.sleep(100);
  }
  Assert.assertEquals("Shouldn't have enough resource to allocate containers",
      0, alloc1Response.getAllocatedContainers().size());
  rm.stop();
}
 
Example 11
Source File: TestNMClient.java    From big-c with Apache License 2.0 4 votes vote down vote up
private Set<Container> allocateContainers(
    AMRMClientImpl<ContainerRequest> rmClient, int num)
    throws YarnException, IOException {
  // setup container request
  Resource capability = Resource.newInstance(1024, 0);
  Priority priority = Priority.newInstance(0);
  String node = nodeReports.get(0).getNodeId().getHost();
  String rack = nodeReports.get(0).getRackName();
  String[] nodes = new String[] {node};
  String[] racks = new String[] {rack};

  for (int i = 0; i < num; ++i) {
    rmClient.addContainerRequest(new ContainerRequest(capability, nodes,
        racks, priority));
  }

  int containersRequestedAny = rmClient.remoteRequestsTable.get(priority)
      .get(ResourceRequest.ANY).get(capability).remoteRequest
      .getNumContainers();

  // RM should allocate container within 2 calls to allocate()
  int allocatedContainerCount = 0;
  int iterationsLeft = 2;
  Set<Container> containers = new TreeSet<Container>();
  while (allocatedContainerCount < containersRequestedAny
      && iterationsLeft > 0) {
    AllocateResponse allocResponse = rmClient.allocate(0.1f);

    allocatedContainerCount += allocResponse.getAllocatedContainers().size();
    for(Container container : allocResponse.getAllocatedContainers()) {
      containers.add(container);
    }
    if (!allocResponse.getNMTokens().isEmpty()) {
      for (NMToken token : allocResponse.getNMTokens()) {
        rmClient.getNMTokenCache().setToken(token.getNodeId().toString(),
            token.getToken());
      }
    }
    if(allocatedContainerCount < containersRequestedAny) {
      // sleep to let NM's heartbeat to RM and trigger allocations
      sleep(1000);
    }

    --iterationsLeft;
  }
  return containers;
}
 
Example 12
Source File: YarnManager.java    From Scribengin with GNU Affero General Public License v3.0 4 votes vote down vote up
public List<Container> getAllocatedContainers() throws YarnException, IOException {
  AllocateResponse response = amrmClient.allocate(0);
  return response.getAllocatedContainers() ;
}