Java Code Examples for org.apache.hadoop.yarn.util.resource.Resources#subtractFrom()
The following examples show how to use
org.apache.hadoop.yarn.util.resource.Resources#subtractFrom() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: YarnTaskSchedulerService.java From tez with Apache License 2.0 | 6 votes |
private void releaseContainer(ContainerId containerId) { Object assignedTask = containerAssignments.remove(containerId); if (assignedTask != null) { // A task was assigned to this container at some point. Inform the app. getContext().containerBeingReleased(containerId); } HeldContainer delayedContainer = heldContainers.remove(containerId); if (delayedContainer != null) { Resources.subtractFrom(allocatedResources, delayedContainer.getContainer().getResource()); } if (delayedContainer != null || !shouldReuseContainers) { amRmClient.releaseAssignedContainer(containerId); } if (assignedTask != null) { // A task was assigned at some point. Add to release list since we are // releasing the container. releasedContainers.put(containerId, assignedTask); } }
Example 2
Source File: YarnTaskSchedulerService.java From incubator-tez with Apache License 2.0 | 6 votes |
private void releaseContainer(ContainerId containerId) { Object assignedTask = containerAssignments.remove(containerId); if (assignedTask != null) { // A task was assigned to this container at some point. Inform the app. appClientDelegate.containerBeingReleased(containerId); } HeldContainer delayedContainer = heldContainers.remove(containerId); if (delayedContainer != null) { Resources.subtractFrom(allocatedResources, delayedContainer.getContainer().getResource()); } if (delayedContainer != null || !shouldReuseContainers) { amRmClient.releaseAssignedContainer(containerId); } if (assignedTask != null) { // A task was assigned at some point. Add to release list since we are // releasing the container. releasedContainers.put(containerId, assignedTask); } }
Example 3
Source File: FifoScheduler.java From big-c with Apache License 2.0 | 6 votes |
private synchronized void removeNode(RMNode nodeInfo) { FiCaSchedulerNode node = getNode(nodeInfo.getNodeID()); if (node == null) { return; } // Kill running containers for(RMContainer container : node.getRunningContainers()) { completedContainer(container, SchedulerUtils.createAbnormalContainerStatus( container.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } //Remove the node this.nodes.remove(nodeInfo.getNodeID()); updateMaximumAllocation(node, false); // Update cluster metrics Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability()); }
Example 4
Source File: Application.java From big-c with Apache License 2.0 | 6 votes |
public synchronized void finishTask(Task task) throws IOException, YarnException { Set<Task> tasks = this.tasks.get(task.getPriority()); if (!tasks.remove(task)) { throw new IllegalStateException( "Finishing unknown task " + task.getTaskId() + " from application " + applicationId); } NodeManager nodeManager = task.getNodeManager(); ContainerId containerId = task.getContainerId(); task.stop(); List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(containerId); StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds); nodeManager.stopContainers(stopRequest); Resources.subtractFrom(used, requestSpec.get(task.getPriority())); LOG.info("Finished task " + task.getTaskId() + " of application " + applicationId + " on node " + nodeManager.getHostName() + ", currently using " + used + " resources"); }
Example 5
Source File: Application.java From hadoop with Apache License 2.0 | 6 votes |
public synchronized void finishTask(Task task) throws IOException, YarnException { Set<Task> tasks = this.tasks.get(task.getPriority()); if (!tasks.remove(task)) { throw new IllegalStateException( "Finishing unknown task " + task.getTaskId() + " from application " + applicationId); } NodeManager nodeManager = task.getNodeManager(); ContainerId containerId = task.getContainerId(); task.stop(); List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(containerId); StopContainersRequest stopRequest = StopContainersRequest.newInstance(containerIds); nodeManager.stopContainers(stopRequest); Resources.subtractFrom(used, requestSpec.get(task.getPriority())); LOG.info("Finished task " + task.getTaskId() + " of application " + applicationId + " on node " + nodeManager.getHostName() + ", currently using " + used + " resources"); }
Example 6
Source File: LeafQueue.java From big-c with Apache License 2.0 | 6 votes |
synchronized void allocateResource(Resource clusterResource, SchedulerApplicationAttempt application, Resource resource, Set<String> nodeLabels,boolean isResume) { super.allocateResource(clusterResource, resource, nodeLabels,isResume); // Update user metrics String userName = application.getUser(); User user = getUser(userName); user.assignContainer(resource, nodeLabels); // Note this is a bit unconventional since it gets the object and modifies // it here, rather then using set routine if(!isResume){ Resources.subtractFrom(application.getHeadroom(), resource); // headroom metrics.setAvailableResourcesToUser(userName, application.getHeadroom()); } //if (LOG.isDebugEnabled()) { LOG.info(getQueueName() + " used=" + queueUsage.getUsed() + " numContainers=" + numContainers + " headroom = " + application.getHeadroom() + " user-resources=" + user.getUsed()+"allocate resource:"+resource+ " absUsed= "+getAbsoluteUsedCapacity() ); // } }
Example 7
Source File: FSLeafQueue.java From hadoop with Apache License 2.0 | 6 votes |
/** * Removes the given app from this queue. * @return whether or not the app was runnable */ public boolean removeApp(FSAppAttempt app) { boolean runnable = false; // Remove app from runnable/nonRunnable list while holding the write lock writeLock.lock(); try { runnable = runnableApps.remove(app); if (!runnable) { // removeNonRunnableApp acquires the write lock again, which is fine if (!removeNonRunnableApp(app)) { throw new IllegalStateException("Given app to remove " + app + " does not exist in queue " + this); } } } finally { writeLock.unlock(); } // Update AM resource usage if needed if (runnable && app.isAmRunning() && app.getAMResource() != null) { Resources.subtractFrom(amResourceUsage, app.getAMResource()); } return runnable; }
Example 8
Source File: FifoScheduler.java From hadoop with Apache License 2.0 | 6 votes |
private synchronized void removeNode(RMNode nodeInfo) { FiCaSchedulerNode node = getNode(nodeInfo.getNodeID()); if (node == null) { return; } // Kill running containers for(RMContainer container : node.getRunningContainers()) { completedContainer(container, SchedulerUtils.createAbnormalContainerStatus( container.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } //Remove the node this.nodes.remove(nodeInfo.getNodeID()); updateMaximumAllocation(node, false); // Update cluster metrics Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability()); }
Example 9
Source File: SchedulerNode.java From hadoop with Apache License 2.0 | 5 votes |
private synchronized void addAvailableResource(Resource resource) { if (resource == null) { LOG.error("Invalid resource addition of null resource for " + rmNode.getNodeAddress()); return; } Resources.addTo(availableResource, resource); Resources.subtractFrom(usedResource, resource); }
Example 10
Source File: ResourceUsage.java From hadoop with Apache License 2.0 | 5 votes |
private void _dec(String label, ResourceType type, Resource res) { try { writeLock.lock(); UsageByLabel usage = getAndAddIfMissing(label); Resources.subtractFrom(usage.resArr[type.idx], res); } finally { writeLock.unlock(); } }
Example 11
Source File: FairScheduler.java From big-c with Apache License 2.0 | 5 votes |
private synchronized void removeNode(RMNode rmNode) { FSSchedulerNode node = getFSSchedulerNode(rmNode.getNodeID()); // This can occur when an UNHEALTHY node reconnects if (node == null) { return; } Resources.subtractFrom(clusterResource, rmNode.getTotalCapability()); updateRootQueueMetrics(); // Remove running containers List<RMContainer> runningContainers = node.getRunningContainers(); for (RMContainer container : runningContainers) { completedContainer(container, SchedulerUtils.createAbnormalContainerStatus( container.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } // Remove reservations, if any RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { completedContainer(reservedContainer, SchedulerUtils.createAbnormalContainerStatus( reservedContainer.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } nodes.remove(rmNode.getNodeID()); queueMgr.getRootQueue().setSteadyFairShare(clusterResource); queueMgr.getRootQueue().recomputeSteadyShares(); updateMaximumAllocation(node, false); LOG.info("Removed node " + rmNode.getNodeAddress() + " cluster capacity: " + clusterResource); }
Example 12
Source File: ResourceUsage.java From big-c with Apache License 2.0 | 5 votes |
private void _dec(String label, ResourceType type, Resource res) { try { writeLock.lock(); UsageByLabel usage = getAndAddIfMissing(label); Resources.subtractFrom(usage.resArr[type.idx], res); } finally { writeLock.unlock(); } }
Example 13
Source File: ProportionalCapacityPreemptionPolicy.java From big-c with Apache License 2.0 | 5 votes |
/** * As more resources are needed for preemption, saved AMContainers has to be * rescanned. Such AMContainers can be preempted based on resToObtain, but * maxAMCapacityForThisQueue resources will be still retained. * * @param clusterResource * @param preemptMap * @param skippedAMContainerlist * @param resToObtain * @param skippedAMSize * @param maxAMCapacityForThisQueue */ private void preemptAMContainers(Resource clusterResource, Map<ApplicationAttemptId, Map<RMContainer,Resource>> preemptMap, List<RMContainer> skippedAMContainerlist, Resource resToObtain, Resource skippedAMSize, Resource maxAMCapacityForThisQueue) { for (RMContainer c : skippedAMContainerlist) { // Got required amount of resources for preemption, can stop now if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain, Resources.none())) { break; } // Once skippedAMSize reaches down to maxAMCapacityForThisQueue, // container selection iteration for preemption will be stopped. if (Resources.lessThanOrEqual(rc, clusterResource, skippedAMSize, maxAMCapacityForThisQueue)) { break; } Map<RMContainer,Resource> contToPrempt = preemptMap.get(c .getApplicationAttemptId()); if (null == contToPrempt) { contToPrempt = new HashMap<RMContainer,Resource>(); preemptMap.put(c.getApplicationAttemptId(), contToPrempt); } LOG.info("preempt am container "+c.getContainerId()); contToPrempt.put(c,c.getContainer().getResource()); Resources.subtractFrom(resToObtain, c.getContainer().getResource()); Resources.subtractFrom(skippedAMSize, c.getContainer() .getResource()); } skippedAMContainerlist.clear(); }
Example 14
Source File: ProportionalCapacityPreemptionPolicy.java From hadoop with Apache License 2.0 | 5 votes |
/** * As more resources are needed for preemption, saved AMContainers has to be * rescanned. Such AMContainers can be preempted based on resToObtain, but * maxAMCapacityForThisQueue resources will be still retained. * * @param clusterResource * @param preemptMap * @param skippedAMContainerlist * @param resToObtain * @param skippedAMSize * @param maxAMCapacityForThisQueue */ private void preemptAMContainers(Resource clusterResource, Map<ApplicationAttemptId, Set<RMContainer>> preemptMap, List<RMContainer> skippedAMContainerlist, Resource resToObtain, Resource skippedAMSize, Resource maxAMCapacityForThisQueue) { for (RMContainer c : skippedAMContainerlist) { // Got required amount of resources for preemption, can stop now if (Resources.lessThanOrEqual(rc, clusterResource, resToObtain, Resources.none())) { break; } // Once skippedAMSize reaches down to maxAMCapacityForThisQueue, // container selection iteration for preemption will be stopped. if (Resources.lessThanOrEqual(rc, clusterResource, skippedAMSize, maxAMCapacityForThisQueue)) { break; } Set<RMContainer> contToPrempt = preemptMap.get(c .getApplicationAttemptId()); if (null == contToPrempt) { contToPrempt = new HashSet<RMContainer>(); preemptMap.put(c.getApplicationAttemptId(), contToPrempt); } contToPrempt.add(c); Resources.subtractFrom(resToObtain, c.getContainer().getResource()); Resources.subtractFrom(skippedAMSize, c.getContainer() .getResource()); } skippedAMContainerlist.clear(); }
Example 15
Source File: FairScheduler.java From hadoop with Apache License 2.0 | 5 votes |
private synchronized void removeNode(RMNode rmNode) { FSSchedulerNode node = getFSSchedulerNode(rmNode.getNodeID()); // This can occur when an UNHEALTHY node reconnects if (node == null) { return; } Resources.subtractFrom(clusterResource, rmNode.getTotalCapability()); updateRootQueueMetrics(); // Remove running containers List<RMContainer> runningContainers = node.getRunningContainers(); for (RMContainer container : runningContainers) { completedContainer(container, SchedulerUtils.createAbnormalContainerStatus( container.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } // Remove reservations, if any RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { completedContainer(reservedContainer, SchedulerUtils.createAbnormalContainerStatus( reservedContainer.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } nodes.remove(rmNode.getNodeID()); queueMgr.getRootQueue().setSteadyFairShare(clusterResource); queueMgr.getRootQueue().recomputeSteadyShares(); updateMaximumAllocation(node, false); LOG.info("Removed node " + rmNode.getNodeAddress() + " cluster capacity: " + clusterResource); }
Example 16
Source File: FSAppAttempt.java From big-c with Apache License 2.0 | 5 votes |
synchronized public void containerCompleted(RMContainer rmContainer, ContainerStatus containerStatus, RMContainerEventType event) { Container container = rmContainer.getContainer(); ContainerId containerId = container.getId(); // Remove from the list of newly allocated containers if found newlyAllocatedContainers.remove(rmContainer); // Inform the container rmContainer.handle( new RMContainerFinishedEvent( containerId, containerStatus, event) ); LOG.info("Completed container: " + rmContainer.getContainerId() + " in state: " + rmContainer.getState() + " event:" + event); // Remove from the list of containers liveContainers.remove(rmContainer.getContainerId()); RMAuditLogger.logSuccess(getUser(), AuditConstants.RELEASE_CONTAINER, "SchedulerApp", getApplicationId(), containerId); // Update usage metrics Resource containerResource = rmContainer.getContainer().getResource(); queue.getMetrics().releaseResources(getUser(), 1, containerResource); Resources.subtractFrom(currentConsumption, containerResource); // remove from preemption map if it is completed preemptionMap.remove(rmContainer); // Clear resource utilization metrics cache. lastMemoryAggregateAllocationUpdateTime = -1; }
Example 17
Source File: ProportionalCapacityPreemptionPolicy.java From hadoop with Apache License 2.0 | 4 votes |
/** * Given a set of queues compute the fix-point distribution of unassigned * resources among them. As pending request of a queue are exhausted, the * queue is removed from the set and remaining capacity redistributed among * remaining queues. The distribution is weighted based on guaranteed * capacity, unless asked to ignoreGuarantee, in which case resources are * distributed uniformly. */ private void computeFixpointAllocation(ResourceCalculator rc, Resource tot_guarant, Collection<TempQueue> qAlloc, Resource unassigned, boolean ignoreGuarantee) { // Prior to assigning the unused resources, process each queue as follows: // If current > guaranteed, idealAssigned = guaranteed + untouchable extra // Else idealAssigned = current; // Subtract idealAssigned resources from unassigned. // If the queue has all of its needs met (that is, if // idealAssigned >= current + pending), remove it from consideration. // Sort queues from most under-guaranteed to most over-guaranteed. TQComparator tqComparator = new TQComparator(rc, tot_guarant); PriorityQueue<TempQueue> orderedByNeed = new PriorityQueue<TempQueue>(10,tqComparator); for (Iterator<TempQueue> i = qAlloc.iterator(); i.hasNext();) { TempQueue q = i.next(); if (Resources.greaterThan(rc, tot_guarant, q.current, q.guaranteed)) { q.idealAssigned = Resources.add(q.guaranteed, q.untouchableExtra); } else { q.idealAssigned = Resources.clone(q.current); } Resources.subtractFrom(unassigned, q.idealAssigned); // If idealAssigned < (current + pending), q needs more resources, so // add it to the list of underserved queues, ordered by need. Resource curPlusPend = Resources.add(q.current, q.pending); if (Resources.lessThan(rc, tot_guarant, q.idealAssigned, curPlusPend)) { orderedByNeed.add(q); } } //assign all cluster resources until no more demand, or no resources are left while (!orderedByNeed.isEmpty() && Resources.greaterThan(rc,tot_guarant, unassigned,Resources.none())) { Resource wQassigned = Resource.newInstance(0, 0, 0); // we compute normalizedGuarantees capacity based on currently active // queues resetCapacity(rc, unassigned, orderedByNeed, ignoreGuarantee); // For each underserved queue (or set of queues if multiple are equally // underserved), offer its share of the unassigned resources based on its // normalized guarantee. After the offer, if the queue is not satisfied, // place it back in the ordered list of queues, recalculating its place // in the order of most under-guaranteed to most over-guaranteed. In this // way, the most underserved queue(s) are always given resources first. Collection<TempQueue> underserved = getMostUnderservedQueues(orderedByNeed, tqComparator); for (Iterator<TempQueue> i = underserved.iterator(); i.hasNext();) { TempQueue sub = i.next(); Resource wQavail = Resources.multiplyAndNormalizeUp(rc, unassigned, sub.normalizedGuarantee, Resource.newInstance(1, 1, 0)); Resource wQidle = sub.offer(wQavail, rc, tot_guarant); Resource wQdone = Resources.subtract(wQavail, wQidle); if (Resources.greaterThan(rc, tot_guarant, wQdone, Resources.none())) { // The queue is still asking for more. Put it back in the priority // queue, recalculating its order based on need. orderedByNeed.add(sub); } Resources.addTo(wQassigned, wQdone); } Resources.subtractFrom(unassigned, wQassigned); } }
Example 18
Source File: CapacityScheduler.java From big-c with Apache License 2.0 | 4 votes |
private synchronized void removeNode(RMNode nodeInfo) { // update this node to node label manager if (labelManager != null) { labelManager.deactivateNode(nodeInfo.getNodeID()); } FiCaSchedulerNode node = nodes.get(nodeInfo.getNodeID()); if (node == null) { return; } Resources.subtractFrom(clusterResource, node.getRMNode().getTotalCapability()); root.updateClusterResource(clusterResource, new ResourceLimits( clusterResource)); int numNodes = numNodeManagers.decrementAndGet(); if (scheduleAsynchronously && numNodes == 0) { asyncSchedulerThread.suspendSchedule(); } // Remove running containers List<RMContainer> runningContainers = node.getRunningContainers(); for (RMContainer container : runningContainers) { completedContainer(container, SchedulerUtils.createAbnormalContainerStatus( container.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } // Remove reservations, if any RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { completedContainer(reservedContainer, SchedulerUtils.createAbnormalContainerStatus( reservedContainer.getContainerId(), SchedulerUtils.LOST_CONTAINER), RMContainerEventType.KILL); } this.nodes.remove(nodeInfo.getNodeID()); updateMaximumAllocation(node, false); LOG.info("Removed node " + nodeInfo.getNodeAddress() + " clusterResource: " + clusterResource); }
Example 19
Source File: FifoScheduler.java From big-c with Apache License 2.0 | 4 votes |
@Lock(FifoScheduler.class) @Override protected synchronized void completedContainer(RMContainer rmContainer, ContainerStatus containerStatus, RMContainerEventType event) { if (rmContainer == null) { LOG.info("Null container completed..."); return; } // Get the application for the finished container Container container = rmContainer.getContainer(); FiCaSchedulerApp application = getCurrentAttemptForContainer(container.getId()); ApplicationId appId = container.getId().getApplicationAttemptId().getApplicationId(); // Get the node on which the container was allocated FiCaSchedulerNode node = getNode(container.getNodeId()); if (application == null) { LOG.info("Unknown application: " + appId + " released container " + container.getId() + " on node: " + node + " with event: " + event); return; } // Inform the application application.containerCompleted(rmContainer, containerStatus, event); // Inform the node node.releaseContainer(container,container.getResource()); // Update total usage Resources.subtractFrom(usedResource, container.getResource()); LOG.info("Application attempt " + application.getApplicationAttemptId() + " released container " + container.getId() + " on node: " + node + " with event: " + event); }
Example 20
Source File: NodeLabel.java From hadoop with Apache License 2.0 | 4 votes |
public void removeNode(Resource nodeRes) { Resources.subtractFrom(resource, nodeRes); numActiveNMs--; }