Java Code Examples for org.apache.hadoop.yarn.util.resource.Resources#clone()

The following examples show how to use org.apache.hadoop.yarn.util.resource.Resources#clone() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractYarnScheduler.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public Resource getMaximumResourceCapability() {
  Resource maxResource;
  maxAllocReadLock.lock();
  try {
    if (useConfiguredMaximumAllocationOnly) {
      if (System.currentTimeMillis() - ResourceManager.getClusterTimeStamp()
          > configuredMaximumAllocationWaitTime) {
        useConfiguredMaximumAllocationOnly = false;
      }
      maxResource = Resources.clone(configuredMaximumAllocation);
    } else {
      maxResource = Resources.clone(maximumAllocation);
    }
  } finally {
    maxAllocReadLock.unlock();
  }
  return maxResource;
}
 
Example 2
Source File: AbstractYarnScheduler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected void refreshMaximumAllocation(Resource newMaxAlloc) {
  maxAllocWriteLock.lock();
  try {
    configuredMaximumAllocation = Resources.clone(newMaxAlloc);
    int maxMemory = newMaxAlloc.getMemory();
    if (maxNodeMemory != -1) {
      maxMemory = Math.min(maxMemory, maxNodeMemory);
    }
    int maxVcores = newMaxAlloc.getVirtualCores();
    if (maxNodeVCores != -1) {
      maxVcores = Math.min(maxVcores, maxNodeVCores);
    }
    int maxGcores = newMaxAlloc.getGpuCores();
    if (maxNodeGCores != -1) {
      maxGcores = Math.min(maxGcores, maxNodeGCores);
    }
    maximumAllocation = Resources.createResource(maxMemory, maxVcores, maxGcores);
  } finally {
    maxAllocWriteLock.unlock();
  }
}
 
Example 3
Source File: FairScheduler.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Check for queues that need tasks preempted, either because they have been
 * below their guaranteed share for minSharePreemptionTimeout or they have
 * been below their fair share threshold for the fairSharePreemptionTimeout. If
 * such queues exist, compute how many tasks of each type need to be preempted
 * and then select the right ones using preemptTasks.
 */
protected synchronized void preemptTasksIfNecessary() {
  if (!shouldAttemptPreemption()) {
    return;
  }

  long curTime = getClock().getTime();
  if (curTime - lastPreemptCheckTime < preemptionInterval) {
    return;
  }
  lastPreemptCheckTime = curTime;

  Resource resToPreempt = Resources.clone(Resources.none());
  for (FSLeafQueue sched : queueMgr.getLeafQueues()) {
    Resources.addTo(resToPreempt, resToPreempt(sched, curTime));
  }
  if (Resources.greaterThan(RESOURCE_CALCULATOR, clusterResource, resToPreempt,
      Resources.none())) {
    preemptResources(resToPreempt);
  }
}
 
Example 4
Source File: YarnTaskSchedulerService.java    From incubator-tez with Apache License 2.0 6 votes vote down vote up
@Override
public float getProgress() {
  if (isStopped.get()) {
    return 1;
  }

  if(totalResources.getMemory() == 0) {
    // assume this is the first allocate callback. nothing is allocated.
    // available resource = totalResource
    // TODO this will not handle dynamic changes in resources
    totalResources = Resources.clone(getAvailableResources());
    LOG.info("App total resource memory: " + totalResources.getMemory() +
             " cpu: " + totalResources.getVirtualCores() +
             " taskAllocations: " + taskAllocations.size());
  }

  preemptIfNeeded();

  return appClientDelegate.getProgress();
}
 
Example 5
Source File: AbstractYarnScheduler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public Resource getMaximumResourceCapability() {
  Resource maxResource;
  maxAllocReadLock.lock();
  try {
    if (useConfiguredMaximumAllocationOnly) {
      if (System.currentTimeMillis() - ResourceManager.getClusterTimeStamp()
          > configuredMaximumAllocationWaitTime) {
        useConfiguredMaximumAllocationOnly = false;
      }
      maxResource = Resources.clone(configuredMaximumAllocation);
    } else {
      maxResource = Resources.clone(maximumAllocation);
    }
  } finally {
    maxAllocReadLock.unlock();
  }
  return maxResource;
}
 
Example 6
Source File: SchedulerNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public SchedulerNode(RMNode node, boolean usePortForNodeName,
    Set<String> labels) {
  this.rmNode = node;
  this.availableResource = Resources.clone(node.getTotalCapability());
  this.totalResourceCapability = Resources.clone(node.getTotalCapability());
  if (usePortForNodeName) {
    nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort();
  } else {
    nodeName = rmNode.getHostName();
  }
  this.labels = ImmutableSet.copyOf(labels);
}
 
Example 7
Source File: InMemoryPlan.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public Resource getConsumptionForUser(String user, long t) {
  readLock.lock();
  try {
    RLESparseResourceAllocation userResAlloc = userResourceAlloc.get(user);
    if (userResAlloc != null) {
      return userResAlloc.getCapacityAtTime(t);
    } else {
      return Resources.clone(ZERO_RESOURCE);
    }
  } finally {
    readLock.unlock();
  }
}
 
Example 8
Source File: RMContainerImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void addPreemptedResource(Resource resource) {
	
	try{
		readLock.lock();
		this.lastPreempted = Resources.clone(resource);
		Resources.addTo(preempted, resource);
	}finally{
		readLock.unlock();
	}
	
}
 
Example 9
Source File: CommonNodeLabelsManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
public Node copy() {
  Node c = new Node(nodeId);
  if (labels != null) {
    c.labels =
        Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
    c.labels.addAll(labels);
  } else {
    c.labels = null;
  }
  c.resource = Resources.clone(resource);
  c.running = running;
  return c;
}
 
Example 10
Source File: InMemoryPlan.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public Resource getTotalCapacity() {
  readLock.lock();
  try {
    return Resources.clone(totalCapacity);
  } finally {
    readLock.unlock();
  }
}
 
Example 11
Source File: CommonNodeLabelsManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public Node copy() {
  Node c = new Node(nodeId);
  if (labels != null) {
    c.labels =
        Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
    c.labels.addAll(labels);
  } else {
    c.labels = null;
  }
  c.resource = Resources.clone(resource);
  c.running = running;
  return c;
}
 
Example 12
Source File: InMemoryPlan.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void setTotalCapacity(Resource cap) {
  writeLock.lock();
  try {
    totalCapacity = Resources.clone(cap);
  } finally {
    writeLock.unlock();
  }
}
 
Example 13
Source File: SchedulerApplicationAttempt.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public synchronized ApplicationResourceUsageReport getResourceUsageReport() {
  AggregateAppResourceUsage runningResourceUsage =
      getRunningAggregateAppResourceUsage();
  Resource usedResourceClone =
      Resources.clone(attemptResourceUsage.getUsed());
  Resource reservedResourceClone =
      Resources.clone(attemptResourceUsage.getReserved());
  return ApplicationResourceUsageReport.newInstance(liveContainers.size(),
      reservedContainers.size(), usedResourceClone, reservedResourceClone,
      Resources.add(usedResourceClone, reservedResourceClone),
      runningResourceUsage.getMemorySeconds(),
      runningResourceUsage.getVcoreSeconds(),
      runningResourceUsage.getGcoreSeconds());
}
 
Example 14
Source File: AbstractYarnScheduler.java    From big-c with Apache License 2.0 5 votes vote down vote up
protected void initMaximumResourceCapability(Resource maximumAllocation) {
  maxAllocWriteLock.lock();
  try {
    if (this.configuredMaximumAllocation == null) {
      this.configuredMaximumAllocation = Resources.clone(maximumAllocation);
      this.maximumAllocation = Resources.clone(maximumAllocation);
    }
  } finally {
    maxAllocWriteLock.unlock();
  }
}
 
Example 15
Source File: RLESparseResourceAllocation.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the capacity, i.e. total resources allocated at the specified point
 * of time
 * 
 * @param tick the time (UTC in ms) at which the capacity is requested
 * @return the resources allocated at the specified time
 */
public Resource getCapacityAtTime(long tick) {
  readLock.lock();
  try {
    Entry<Long, Resource> closestStep = cumulativeCapacity.floorEntry(tick);
    if (closestStep != null) {
      return Resources.clone(closestStep.getValue());
    }
    return Resources.clone(ZERO_RESOURCE);
  } finally {
    readLock.unlock();
  }
}
 
Example 16
Source File: InMemoryPlan.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public Resource getTotalCapacity() {
  readLock.lock();
  try {
    return Resources.clone(totalCapacity);
  } finally {
    readLock.unlock();
  }
}
 
Example 17
Source File: InMemoryPlan.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public Resource getConsumptionForUser(String user, long t) {
  readLock.lock();
  try {
    RLESparseResourceAllocation userResAlloc = userResourceAlloc.get(user);
    if (userResAlloc != null) {
      return userResAlloc.getCapacityAtTime(t);
    } else {
      return Resources.clone(ZERO_RESOURCE);
    }
  } finally {
    readLock.unlock();
  }
}
 
Example 18
Source File: ProportionalCapacityPreemptionPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Given a set of queues compute the fix-point distribution of unassigned
 * resources among them. As pending request of a queue are exhausted, the
 * queue is removed from the set and remaining capacity redistributed among
 * remaining queues. The distribution is weighted based on guaranteed
 * capacity, unless asked to ignoreGuarantee, in which case resources are
 * distributed uniformly.
 */
private void computeFixpointAllocation(ResourceCalculator rc,
    Resource tot_guarant, Collection<TempQueue> qAlloc, Resource unassigned, 
    boolean ignoreGuarantee) {
  // Prior to assigning the unused resources, process each queue as follows:
  // If current > guaranteed, idealAssigned = guaranteed + untouchable extra
  // Else idealAssigned = current;
  // Subtract idealAssigned resources from unassigned.
  // If the queue has all of its needs met (that is, if 
  // idealAssigned >= current + pending), remove it from consideration.
  // Sort queues from most under-guaranteed to most over-guaranteed.
  TQComparator tqComparator = new TQComparator(rc, tot_guarant);
  PriorityQueue<TempQueue> orderedByNeed =
                               new PriorityQueue<TempQueue>(10,tqComparator);
  for (Iterator<TempQueue> i = qAlloc.iterator(); i.hasNext();) {
    TempQueue q = i.next();
    if (Resources.greaterThan(rc, tot_guarant, q.current, q.guaranteed)) {
      q.idealAssigned = Resources.add(q.guaranteed, q.untouchableExtra);
    } else {
      q.idealAssigned = Resources.clone(q.current);
    }
    Resources.subtractFrom(unassigned, q.idealAssigned);
    // If idealAssigned < (current + pending), q needs more resources, so
    // add it to the list of underserved queues, ordered by need.
    Resource curPlusPend = Resources.add(q.current, q.pending);
    if (Resources.lessThan(rc, tot_guarant, q.idealAssigned, curPlusPend)) {
      orderedByNeed.add(q);
    }
  }

  //assign all cluster resources until no more demand, or no resources are left
  while (!orderedByNeed.isEmpty()
     && Resources.greaterThan(rc,tot_guarant, unassigned,Resources.none())) {
    Resource wQassigned = Resource.newInstance(0, 0, 0);
    // we compute normalizedGuarantees capacity based on currently active
    // queues
    resetCapacity(rc, unassigned, orderedByNeed, ignoreGuarantee);

    // For each underserved queue (or set of queues if multiple are equally
    // underserved), offer its share of the unassigned resources based on its
    // normalized guarantee. After the offer, if the queue is not satisfied,
    // place it back in the ordered list of queues, recalculating its place
    // in the order of most under-guaranteed to most over-guaranteed. In this
    // way, the most underserved queue(s) are always given resources first.
    Collection<TempQueue> underserved =
        getMostUnderservedQueues(orderedByNeed, tqComparator);
    for (Iterator<TempQueue> i = underserved.iterator(); i.hasNext();) {
      TempQueue sub = i.next();
      Resource wQavail = Resources.multiplyAndNormalizeUp(rc,
          unassigned, sub.normalizedGuarantee, Resource.newInstance(1, 1, 0));
      Resource wQidle = sub.offer(wQavail, rc, tot_guarant);
      Resource wQdone = Resources.subtract(wQavail, wQidle);

      if (Resources.greaterThan(rc, tot_guarant,
            wQdone, Resources.none())) {
        // The queue is still asking for more. Put it back in the priority
        // queue, recalculating its order based on need.
        orderedByNeed.add(sub);
      }
      Resources.addTo(wQassigned, wQdone);
    }
    Resources.subtractFrom(unassigned, wQassigned);
  }
}
 
Example 19
Source File: ProportionalCapacityPreemptionPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Based a resource preemption target drop reservations of containers and
 * if necessary select containers for preemption from applications in each
 * over-capacity queue. It uses {@link #NATURAL_TERMINATION_FACTOR} to
 * account for containers that will naturally complete.
 *
 * @param queues set of leaf queues to preempt from
 * @param clusterResource total amount of cluster resources
 * @return a map of applciationID to set of containers to preempt
 */
private Map<ApplicationAttemptId,Set<RMContainer>> getContainersToPreempt(
    List<TempQueue> queues, Resource clusterResource) {

  Map<ApplicationAttemptId,Set<RMContainer>> preemptMap =
      new HashMap<ApplicationAttemptId,Set<RMContainer>>();
  List<RMContainer> skippedAMContainerlist = new ArrayList<RMContainer>();

  for (TempQueue qT : queues) {
    if (qT.preemptionDisabled && qT.leafQueue != null) {
      if (LOG.isDebugEnabled()) {
        if (Resources.greaterThan(rc, clusterResource,
            qT.toBePreempted, Resource.newInstance(0, 0, 0))) {
          LOG.debug("Tried to preempt the following "
                    + "resources from non-preemptable queue: "
                    + qT.queueName + " - Resources: " + qT.toBePreempted);
        }
      }
      continue;
    }
    // we act only if we are violating balance by more than
    // maxIgnoredOverCapacity
    if (Resources.greaterThan(rc, clusterResource, qT.current,
        Resources.multiply(qT.guaranteed, 1.0 + maxIgnoredOverCapacity))) {
      // we introduce a dampening factor naturalTerminationFactor that
      // accounts for natural termination of containers
      Resource resToObtain =
        Resources.multiply(qT.toBePreempted, naturalTerminationFactor);
      Resource skippedAMSize = Resource.newInstance(0, 0, 0);

      // lock the leafqueue while we scan applications and unreserve
      synchronized (qT.leafQueue) {
        NavigableSet<FiCaSchedulerApp> ns = 
            (NavigableSet<FiCaSchedulerApp>) qT.leafQueue.getApplications();
        Iterator<FiCaSchedulerApp> desc = ns.descendingIterator();
        qT.actuallyPreempted = Resources.clone(resToObtain);
        while (desc.hasNext()) {
          FiCaSchedulerApp fc = desc.next();
          if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain,
              Resources.none())) {
            break;
          }
          preemptMap.put(
              fc.getApplicationAttemptId(),
              preemptFrom(fc, clusterResource, resToObtain,
                  skippedAMContainerlist, skippedAMSize));
        }
        Resource maxAMCapacityForThisQueue = Resources.multiply(
            Resources.multiply(clusterResource,
                qT.leafQueue.getAbsoluteCapacity()),
            qT.leafQueue.getMaxAMResourcePerQueuePercent());

        // Can try preempting AMContainers (still saving atmost
        // maxAMCapacityForThisQueue AMResource's) if more resources are
        // required to be preempted from this Queue.
        preemptAMContainers(clusterResource, preemptMap,
            skippedAMContainerlist, resToObtain, skippedAMSize,
            maxAMCapacityForThisQueue);
      }
    }
  }
  return preemptMap;
}
 
Example 20
Source File: InMemoryPlan.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public Resource getMinimumAllocation() {
  return Resources.clone(minAlloc);
}