Java Code Examples for org.apache.hadoop.yarn.util.resource.Resources#lessThanOrEqual()

The following examples show how to use org.apache.hadoop.yarn.util.resource.Resources#lessThanOrEqual() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSAppAttempt.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Whether this app has containers requests that could be satisfied on the
 * given node, if the node had full space.
 */
public boolean hasContainerForNode(Priority prio, FSSchedulerNode node) {
  ResourceRequest anyRequest = getResourceRequest(prio, ResourceRequest.ANY);
  ResourceRequest rackRequest = getResourceRequest(prio, node.getRackName());
  ResourceRequest nodeRequest = getResourceRequest(prio, node.getNodeName());

  return
      // There must be outstanding requests at the given priority:
      anyRequest != null && anyRequest.getNumContainers() > 0 &&
          // If locality relaxation is turned off at *-level, there must be a
          // non-zero request for the node's rack:
          (anyRequest.getRelaxLocality() ||
              (rackRequest != null && rackRequest.getNumContainers() > 0)) &&
          // If locality relaxation is turned off at rack-level, there must be a
          // non-zero request at the node:
          (rackRequest == null || rackRequest.getRelaxLocality() ||
              (nodeRequest != null && nodeRequest.getNumContainers() > 0)) &&
          // The requested container must be able to fit on the node:
          Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null,
              anyRequest.getCapability(), node.getRMNode().getTotalCapability());
}
 
Example 2
Source File: FSAppAttempt.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Whether this app has containers requests that could be satisfied on the
 * given node, if the node had full space.
 */
public boolean hasContainerForNode(Priority prio, FSSchedulerNode node) {
  ResourceRequest anyRequest = getResourceRequest(prio, ResourceRequest.ANY);
  ResourceRequest rackRequest = getResourceRequest(prio, node.getRackName());
  ResourceRequest nodeRequest = getResourceRequest(prio, node.getNodeName());

  return
      // There must be outstanding requests at the given priority:
      anyRequest != null && anyRequest.getNumContainers() > 0 &&
          // If locality relaxation is turned off at *-level, there must be a
          // non-zero request for the node's rack:
          (anyRequest.getRelaxLocality() ||
              (rackRequest != null && rackRequest.getNumContainers() > 0)) &&
          // If locality relaxation is turned off at rack-level, there must be a
          // non-zero request at the node:
          (rackRequest == null || rackRequest.getRelaxLocality() ||
              (nodeRequest != null && nodeRequest.getNumContainers() > 0)) &&
          // The requested container must be able to fit on the node:
          Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null,
              anyRequest.getCapability(), node.getRMNode().getTotalCapability());
}
 
Example 3
Source File: FiCaSchedulerApp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
synchronized public NodeId getNodeIdToUnreserve(Priority priority,
    Resource resourceNeedUnreserve, ResourceCalculator rc,
    Resource clusterResource) {

  // first go around make this algorithm simple and just grab first
  // reservation that has enough resources
  Map<NodeId, RMContainer> reservedContainers = this.reservedContainers
      .get(priority);

  if ((reservedContainers != null) && (!reservedContainers.isEmpty())) {
    for (Map.Entry<NodeId, RMContainer> entry : reservedContainers.entrySet()) {
      NodeId nodeId = entry.getKey();
      Resource containerResource = entry.getValue().getContainer().getResource();
      
      // make sure we unreserve one with at least the same amount of
      // resources, otherwise could affect capacity limits
      if (Resources.lessThanOrEqual(rc, clusterResource,
          resourceNeedUnreserve, containerResource)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("unreserving node with reservation size: "
              + containerResource
              + " in order to allocate container with size: " + resourceNeedUnreserve);
        }
        return nodeId;
      }
    }
  }
  return null;
}
 
Example 4
Source File: ProportionalCapacityPreemptionPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * As more resources are needed for preemption, saved AMContainers has to be
 * rescanned. Such AMContainers can be preempted based on resToObtain, but 
 * maxAMCapacityForThisQueue resources will be still retained.
 *  
 * @param clusterResource
 * @param preemptMap
 * @param skippedAMContainerlist
 * @param resToObtain
 * @param skippedAMSize
 * @param maxAMCapacityForThisQueue
 */
private void preemptAMContainers(Resource clusterResource,
    Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
    List<RMContainer> skippedAMContainerlist, Resource resToObtain,
    Resource skippedAMSize, Resource maxAMCapacityForThisQueue) {
  for (RMContainer c : skippedAMContainerlist) {
    // Got required amount of resources for preemption, can stop now
    if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain,
        Resources.none())) {
      break;
    }
    // Once skippedAMSize reaches down to maxAMCapacityForThisQueue,
    // container selection iteration for preemption will be stopped. 
    if (Resources.lessThanOrEqual(rc, clusterResource, skippedAMSize,
        maxAMCapacityForThisQueue)) {
      break;
    }
    Set<RMContainer> contToPrempt = preemptMap.get(c
        .getApplicationAttemptId());
    if (null == contToPrempt) {
      contToPrempt = new HashSet<RMContainer>();
      preemptMap.put(c.getApplicationAttemptId(), contToPrempt);
    }
    contToPrempt.add(c);
    
    Resources.subtractFrom(resToObtain, c.getContainer().getResource());
    Resources.subtractFrom(skippedAMSize, c.getContainer()
        .getResource());
  }
  skippedAMContainerlist.clear();
}
 
Example 5
Source File: FiCaSchedulerApp.java    From big-c with Apache License 2.0 5 votes vote down vote up
synchronized public NodeId getNodeIdToUnreserve(Priority priority,
    Resource resourceNeedUnreserve, ResourceCalculator rc,
    Resource clusterResource) {

  // first go around make this algorithm simple and just grab first
  // reservation that has enough resources
  Map<NodeId, RMContainer> reservedContainers = this.reservedContainers
      .get(priority);

  if ((reservedContainers != null) && (!reservedContainers.isEmpty())) {
    for (Map.Entry<NodeId, RMContainer> entry : reservedContainers.entrySet()) {
      NodeId nodeId = entry.getKey();
      Resource containerResource = entry.getValue().getContainer().getResource();
      
      LOG.info("find unreserved node:"+entry.getKey()+"container:"+entry.getValue().getContainerId());
      // make sure we unreserve one with at least the same amount of
      // resources, otherwise could affect capacity limits
      if (Resources.lessThanOrEqual(rc, clusterResource,
          resourceNeedUnreserve, containerResource)) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("unreserving node with reservation size: "
              + containerResource
              + " in order to allocate container with size: " + resourceNeedUnreserve);
        }
        return nodeId;
      }
    }
  }
  return null;
}
 
Example 6
Source File: ProportionalCapacityPreemptionPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * As more resources are needed for preemption, saved AMContainers has to be
 * rescanned. Such AMContainers can be preempted based on resToObtain, but 
 * maxAMCapacityForThisQueue resources will be still retained.
 *  
 * @param clusterResource
 * @param preemptMap
 * @param skippedAMContainerlist
 * @param resToObtain
 * @param skippedAMSize
 * @param maxAMCapacityForThisQueue
 */
private void preemptAMContainers(Resource clusterResource,
    Map<ApplicationAttemptId, Map<RMContainer,Resource>> preemptMap,
    List<RMContainer> skippedAMContainerlist, Resource resToObtain,
    Resource skippedAMSize, Resource maxAMCapacityForThisQueue) {
  for (RMContainer c : skippedAMContainerlist) {
    // Got required amount of resources for preemption, can stop now
    if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain,
        Resources.none())) {
      break;
    }
    // Once skippedAMSize reaches down to maxAMCapacityForThisQueue,
    // container selection iteration for preemption will be stopped. 
    if (Resources.lessThanOrEqual(rc, clusterResource, skippedAMSize,
        maxAMCapacityForThisQueue)) {
      break;
    }
    Map<RMContainer,Resource> contToPrempt = preemptMap.get(c
        .getApplicationAttemptId());
    if (null == contToPrempt) {
      contToPrempt = new HashMap<RMContainer,Resource>();
      preemptMap.put(c.getApplicationAttemptId(), contToPrempt);
    }
    
    LOG.info("preempt am container "+c.getContainerId());
    contToPrempt.put(c,c.getContainer().getResource());
    
    Resources.subtractFrom(resToObtain, c.getContainer().getResource());
    Resources.subtractFrom(skippedAMSize, c.getContainer()
        .getResource());
  }
  skippedAMContainerlist.clear();
}
 
Example 7
Source File: LeafQueue.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private synchronized void activateApplications() {
  // limit of allowed resource usage for application masters
  Map<String, Resource> amPartitionLimit = new HashMap<String, Resource>();
  Map<String, Resource> userAmPartitionLimit =
      new HashMap<String, Resource>();
      
  for (Iterator<FiCaSchedulerApp> i=pendingApplications.iterator(); 
       i.hasNext(); ) {
    FiCaSchedulerApp application = i.next();

    // Get the am-node-partition associated with each application
    // and calculate max-am resource limit for this partition.
    String partitionName = application.getAppAMNodePartitionName();

    Resource amLimit = amPartitionLimit.get(partitionName);
    // Verify whether we already calculated am-limit for this label.
    if (amLimit == null) {
      amLimit = getAMResourceLimitPerPartition(partitionName);
      amPartitionLimit.put(partitionName, amLimit);
    }
    // Check am resource limit.
    Resource amIfStarted = Resources.add(
        application.getAMResource(partitionName),
        queueUsage.getAMUsed(partitionName));
    
    if (LOG.isDebugEnabled()) {
      LOG.debug("application AMResource "
          + application.getAMResource(partitionName)
          + " maxAMResourcePerQueuePercent " + maxAMResourcePerQueuePercent
          + " amLimit " + amLimit + " lastClusterResource "
          + lastClusterResource + " amIfStarted " + amIfStarted
          + " AM node-partition name " + partitionName);
    }
    
    if (!Resources.lessThanOrEqual(resourceCalculator, lastClusterResource,
        amIfStarted, amLimit)) {
      if (getNumActiveApplications() < 1
          || (Resources.lessThanOrEqual(resourceCalculator,
              lastClusterResource, queueUsage.getAMUsed(partitionName),
              Resources.none()))) {
        LOG.warn("maximum-am-resource-percent is insufficient to start a"
            + " single application in queue, it is likely set too low."
            + " skipping enforcement to allow at least one application"
            + " to start");
      } else {
        LOG.info("not starting application as amIfStarted exceeds amLimit");
        continue;
      }
    }

    // Check user am resource limit
    User user = getUser(application.getUser());
    Resource userAMLimit = userAmPartitionLimit.get(partitionName);

    // Verify whether we already calculated user-am-limit for this label.
    if (userAMLimit == null) {
      userAMLimit = getUserAMResourceLimitPerPartition(partitionName);
      userAmPartitionLimit.put(partitionName, userAMLimit);
    }

    Resource userAmIfStarted = Resources.add(
        application.getAMResource(partitionName),
        user.getConsumedAMResources(partitionName));

    if (!Resources.lessThanOrEqual(resourceCalculator, lastClusterResource,
        userAmIfStarted, userAMLimit)) {
      if (getNumActiveApplications() < 1
          || (Resources.lessThanOrEqual(resourceCalculator,
              lastClusterResource, queueUsage.getAMUsed(partitionName),
              Resources.none()))) {
        LOG.warn("maximum-am-resource-percent is insufficient to start a"
            + " single application in queue for user, it is likely set too"
            + " low. skipping enforcement to allow at least one application"
            + " to start");
      } else {
        LOG.info("not starting application as amIfStarted exceeds " +
          "userAmLimit");
        continue;
      }
    }
    user.activateApplication();
    activeApplications.add(application);
    queueUsage.incAMUsed(partitionName,
        application.getAMResource(partitionName));
    user.getResourceUsage().incAMUsed(partitionName,
        application.getAMResource(partitionName));
    i.remove();
    LOG.info("Application " + application.getApplicationId() +
        " from user: " + application.getUser() + 
        " activated in queue: " + getQueueName());
  }
}
 
Example 8
Source File: LeafQueue.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Private
protected synchronized boolean canAssignToUser(Resource clusterResource,
    String userName, Resource limit, FiCaSchedulerApp application,
    Set<String> requestLabels, ResourceLimits currentResoureLimits) {
  User user = getUser(userName);
  
  String label = CommonNodeLabelsManager.NO_LABEL;
  if (requestLabels != null && !requestLabels.isEmpty()) {
    label = requestLabels.iterator().next();
  }

  // Note: We aren't considering the current request since there is a fixed
  // overhead of the AM, but it's a > check, not a >= check, so...
  if (Resources
      .greaterThan(resourceCalculator, clusterResource,
          user.getUsed(label),
          limit)) {
    // if enabled, check to see if could we potentially use this node instead
    // of a reserved node if the application has reserved containers
    if (this.reservationsContinueLooking
        && label.equals(CommonNodeLabelsManager.NO_LABEL)) {
      if (Resources.lessThanOrEqual(
          resourceCalculator,
          clusterResource,
          Resources.subtract(user.getUsed(), application.getCurrentReservation()),
          limit)) {

        if (LOG.isDebugEnabled()) {
          LOG.debug("User " + userName + " in queue " + getQueueName()
              + " will exceed limit based on reservations - " + " consumed: "
              + user.getUsed() + " reserved: "
              + application.getCurrentReservation() + " limit: " + limit);
        }
        Resource amountNeededToUnreserve = Resources.subtract(user.getUsed(label), limit);
        // we can only acquire a new container if we unreserve first since we ignored the
        // user limit. Choose the max of user limit or what was previously set by max
        // capacity.
        currentResoureLimits.setAmountNeededUnreserve(Resources.max(resourceCalculator,
            clusterResource, currentResoureLimits.getAmountNeededUnreserve(),
            amountNeededToUnreserve));
        return true;
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("User " + userName + " in queue " + getQueueName()
          + " will exceed limit - " + " consumed: "
          + user.getUsed() + " limit: " + limit);
    }
    return false;
  }
  return true;
}
 
Example 9
Source File: ProportionalCapacityPreemptionPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Based a resource preemption target drop reservations of containers and
 * if necessary select containers for preemption from applications in each
 * over-capacity queue. It uses {@link #NATURAL_TERMINATION_FACTOR} to
 * account for containers that will naturally complete.
 *
 * @param queues set of leaf queues to preempt from
 * @param clusterResource total amount of cluster resources
 * @return a map of applciationID to set of containers to preempt
 */
private Map<ApplicationAttemptId,Set<RMContainer>> getContainersToPreempt(
    List<TempQueue> queues, Resource clusterResource) {

  Map<ApplicationAttemptId,Set<RMContainer>> preemptMap =
      new HashMap<ApplicationAttemptId,Set<RMContainer>>();
  List<RMContainer> skippedAMContainerlist = new ArrayList<RMContainer>();

  for (TempQueue qT : queues) {
    if (qT.preemptionDisabled && qT.leafQueue != null) {
      if (LOG.isDebugEnabled()) {
        if (Resources.greaterThan(rc, clusterResource,
            qT.toBePreempted, Resource.newInstance(0, 0, 0))) {
          LOG.debug("Tried to preempt the following "
                    + "resources from non-preemptable queue: "
                    + qT.queueName + " - Resources: " + qT.toBePreempted);
        }
      }
      continue;
    }
    // we act only if we are violating balance by more than
    // maxIgnoredOverCapacity
    if (Resources.greaterThan(rc, clusterResource, qT.current,
        Resources.multiply(qT.guaranteed, 1.0 + maxIgnoredOverCapacity))) {
      // we introduce a dampening factor naturalTerminationFactor that
      // accounts for natural termination of containers
      Resource resToObtain =
        Resources.multiply(qT.toBePreempted, naturalTerminationFactor);
      Resource skippedAMSize = Resource.newInstance(0, 0, 0);

      // lock the leafqueue while we scan applications and unreserve
      synchronized (qT.leafQueue) {
        NavigableSet<FiCaSchedulerApp> ns = 
            (NavigableSet<FiCaSchedulerApp>) qT.leafQueue.getApplications();
        Iterator<FiCaSchedulerApp> desc = ns.descendingIterator();
        qT.actuallyPreempted = Resources.clone(resToObtain);
        while (desc.hasNext()) {
          FiCaSchedulerApp fc = desc.next();
          if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain,
              Resources.none())) {
            break;
          }
          preemptMap.put(
              fc.getApplicationAttemptId(),
              preemptFrom(fc, clusterResource, resToObtain,
                  skippedAMContainerlist, skippedAMSize));
        }
        Resource maxAMCapacityForThisQueue = Resources.multiply(
            Resources.multiply(clusterResource,
                qT.leafQueue.getAbsoluteCapacity()),
            qT.leafQueue.getMaxAMResourcePerQueuePercent());

        // Can try preempting AMContainers (still saving atmost
        // maxAMCapacityForThisQueue AMResource's) if more resources are
        // required to be preempted from this Queue.
        preemptAMContainers(clusterResource, preemptMap,
            skippedAMContainerlist, resToObtain, skippedAMSize,
            maxAMCapacityForThisQueue);
      }
    }
  }
  return preemptMap;
}
 
Example 10
Source File: LeafQueue.java    From big-c with Apache License 2.0 4 votes vote down vote up
private synchronized void activateApplications() {
  //limit of allowed resource usage for application masters
  Resource amLimit = getAMResourceLimit();
  Resource userAMLimit = getUserAMResourceLimit();
      
  for (Iterator<FiCaSchedulerApp> i=pendingApplications.iterator(); 
       i.hasNext(); ) {
    FiCaSchedulerApp application = i.next();
    
    // Check am resource limit
    Resource amIfStarted = 
      Resources.add(application.getAMResource(), queueUsage.getAMUsed());
    
    if (LOG.isDebugEnabled()) {
      LOG.debug("application AMResource " + application.getAMResource() +
        " maxAMResourcePerQueuePercent " + maxAMResourcePerQueuePercent +
        " amLimit " + amLimit +
        " lastClusterResource " + lastClusterResource +
        " amIfStarted " + amIfStarted);
    }
    
    if (!Resources.lessThanOrEqual(
      resourceCalculator, lastClusterResource, amIfStarted, amLimit)) {
      if (getNumActiveApplications() < 1) {
        LOG.warn("maximum-am-resource-percent is insufficient to start a" +
          " single application in queue, it is likely set too low." +
          " skipping enforcement to allow at least one application to start"); 
      } else {
        LOG.info("not starting application as amIfStarted exceeds amLimit");
        continue;
      }
    }
    
    // Check user am resource limit
    
    User user = getUser(application.getUser());
    
    Resource userAmIfStarted = 
      Resources.add(application.getAMResource(),
        user.getConsumedAMResources());
      
    if (!Resources.lessThanOrEqual(
        resourceCalculator, lastClusterResource, userAmIfStarted, 
        userAMLimit)) {
      if (getNumActiveApplications() < 1) {
        LOG.warn("maximum-am-resource-percent is insufficient to start a" +
          " single application in queue for user, it is likely set too low." +
          " skipping enforcement to allow at least one application to start"); 
      } else {
        LOG.info("not starting application as amIfStarted exceeds " +
          "userAmLimit");
        continue;
      }
    }
    user.activateApplication();
    activeApplications.add(application);
    queueUsage.incAMUsed(application.getAMResource());
    user.getResourceUsage().incAMUsed(application.getAMResource());
    i.remove();
    LOG.info("Application " + application.getApplicationId() +
        " from user: " + application.getUser() + 
        " activated in queue: " + getQueueName());
  }
}
 
Example 11
Source File: LeafQueue.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Private
protected synchronized boolean assignToUser(Resource clusterResource,
    String userName, Resource limit, FiCaSchedulerApp application,
    Set<String> requestLabels, ResourceLimits currentResoureLimits) {
  User user = getUser(userName);
  
  String label = CommonNodeLabelsManager.NO_LABEL;
  if (requestLabels != null && !requestLabels.isEmpty()) {
    label = requestLabels.iterator().next();
  }

  // Note: We aren't considering the current request since there is a fixed
  // overhead of the AM, but it's a > check, not a >= check, so...
  if (Resources
      .greaterThan(resourceCalculator, clusterResource,
          user.getUsed(label),
          limit)) {
    // if enabled, check to see if could we potentially use this node instead
    // of a reserved node if the application has reserved containers
    if (this.reservationsContinueLooking) {
      if (Resources.lessThanOrEqual(
          resourceCalculator,
          clusterResource,
          Resources.subtract(user.getUsed(), application.getCurrentReservation()),
          limit)) {

        if (LOG.isDebugEnabled()) {
          LOG.debug("User " + userName + " in queue " + getQueueName()
              + " will exceed limit based on reservations - " + " consumed: "
              + user.getUsed() + " reserved: "
              + application.getCurrentReservation() + " limit: " + limit);
        }
        Resource amountNeededToUnreserve = Resources.subtract(user.getUsed(label), limit);
        // we can only acquire a new container if we unreserve first since we ignored the
        // user limit. Choose the max of user limit or what was previously set by max
        // capacity.
        currentResoureLimits.setAmountNeededUnreserve(Resources.max(resourceCalculator,
            clusterResource, currentResoureLimits.getAmountNeededUnreserve(),
            amountNeededToUnreserve));
        return true;
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("User " + userName + " in queue " + getQueueName()
          + " will exceed limit - " + " consumed: "
          + user.getUsed() + " limit: " + limit);
    }
    return false;
  }
  return true;
}
 
Example 12
Source File: ProportionalCapacityPreemptionPolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Based a resource preemption target drop reservations of containers and
 * if necessary select containers for preemption from applications in each
 * over-capacity queue. It uses {@link #NATURAL_TERMINATION_FACTOR} to
 * account for containers that will naturally complete.
 *
 * @param queues set of leaf queues to preempt from
 * @param clusterResource total amount of cluster resources
 * @return a map of applciationID to set of containers to preempt
 */
private Map<ApplicationAttemptId,Map<RMContainer,Resource>> getContainersToPreempt(
    List<TempQueue> queues, Resource clusterResource) {

  Map<ApplicationAttemptId, Map<RMContainer,Resource>> preemptMap =
      new HashMap<ApplicationAttemptId, Map<RMContainer,Resource>>();
  
  List<RMContainer> skippedAMContainerlist = new ArrayList<RMContainer>();
  
  //for test only
  if(isTest){
  	
  	getContainersToPreemptForTest(preemptMap, queues, clusterResource);
  }
  

  for (TempQueue qT : queues) {
    if (qT.preemptionDisabled && qT.leafQueue != null) {
      if (LOG.isDebugEnabled()) {
        if (Resources.greaterThan(rc, clusterResource,
            qT.toBePreempted, Resource.newInstance(0, 0))) {
          LOG.info("Tried to preempt the following "
                    + "resources from non-preemptable queue: "
                    + qT.queueName + " - Resources: " + qT.toBePreempted);
        }
      }
      continue;
    }
    // we act only if we are violating balance by more than
    // maxIgnoredOverCapacity
    if (Resources.greaterThan(rc, clusterResource, qT.current,
        Resources.multiply(qT.guaranteed, 1.0 + maxIgnoredOverCapacity))) {
      // we introduce a dampening factor naturalTerminationFactor that
      // accounts for natural termination of containers
      Resource resToObtain =
        Resources.multiply(qT.toBePreempted, naturalTerminationFactor);
      Resource skippedAMSize = Resource.newInstance(0, 0);
      
      LOG.info("try to preempt: "+resToObtain+" from queue: "+qT.queueName);
      if(resToObtain.getMemory() > 0){
      	LOG.info("resToObtain memory: "+resToObtain.getMemory());
      }
      // lock the leafqueue while we scan applications and unreserve
      synchronized (qT.leafQueue) {
        //what is the descending order
        NavigableSet<FiCaSchedulerApp> ns = 
            (NavigableSet<FiCaSchedulerApp>) qT.leafQueue.getApplications();
       Iterator<FiCaSchedulerApp> desc = ns.descendingIterator();
        qT.actuallyPreempted = Resources.clone(resToObtain);
        while (desc.hasNext()) {
          FiCaSchedulerApp fc = desc.next();
          if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain,
              Resources.none())) {
            break;
          }
          LOG.info("now try to preempt applicatin:"+fc.getApplicationId());
          preemptMap.put(
              fc.getApplicationAttemptId(),
              preemptFrom(fc, clusterResource, resToObtain,
                  skippedAMContainerlist, skippedAMSize));
        }
        
        //we allow preempt AM for kill based approach
        if(false){
       //we will never preempt am resource 
        Resource maxAMCapacityForThisQueue = Resources.multiply(
           Resources.multiply(clusterResource,
                qT.leafQueue.getAbsoluteCapacity()),
            qT.leafQueue.getMaxAMResourcePerQueuePercent());

         //Can try preempting AMContainers (still saving atmost
        // maxAMCapacityForThisQueue AMResource's) if more resources are
        // required to be preempted from this Queue.
        preemptAMContainers(clusterResource, preemptMap,
           skippedAMContainerlist, resToObtain, skippedAMSize,
           maxAMCapacityForThisQueue);
        }
      }
    }
  }
  return preemptMap;
}