Java Code Examples for org.apache.hadoop.yarn.util.resource.Resources

The following examples show how to use org.apache.hadoop.yarn.util.resource.Resources. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: FifoPolicy.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void computeShares(Collection<? extends Schedulable> schedulables,
    Resource totalResources) {
  if (schedulables.isEmpty()) {
    return;
  }

  Schedulable earliest = null;
  for (Schedulable schedulable : schedulables) {
    if (earliest == null ||
        schedulable.getStartTime() < earliest.getStartTime()) {
      earliest = schedulable;
    }
  }
  earliest.setFairShare(Resources.clone(totalResources));
}
 
Example 2
Source Project: big-c   Source File: FiCaSchedulerApp.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This method produces an Allocation that includes the current view
 * of the resources that will be allocated to and preempted from this
 * application.
 *
 * @param rc
 * @param clusterResource
 * @param minimumAllocation
 * @return an allocation
 */
public synchronized Allocation getAllocation(ResourceCalculator rc,
    Resource clusterResource, Resource minimumAllocation) {

  Set<ContainerId> currentContPreemption = Collections.unmodifiableSet(
      new HashSet<ContainerId>(containersToPreempt));
  containersToPreempt.clear();
  Resource tot = Resource.newInstance(0, 0);
  for(ContainerId c : currentContPreemption){
    Resources.addTo(tot,
        liveContainers.get(c).getContainer().getResource());
  }
  int numCont = (int) Math.ceil(
      Resources.divide(rc, clusterResource, tot, minimumAllocation));
  ResourceRequest rr = ResourceRequest.newInstance(
      Priority.UNDEFINED, ResourceRequest.ANY,
      minimumAllocation, numCont);
  ContainersAndNMTokensAllocation allocation =
      pullNewlyAllocatedContainersAndNMTokens();
  Resource headroom = getHeadroom();
  setApplicationHeadroomForMetrics(headroom);
  return new Allocation(allocation.getContainerList(), headroom, null,
    currentContPreemption, Collections.singletonList(rr),
    allocation.getNMTokenList());
}
 
Example 3
Source Project: hadoop   Source File: TestUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static FiCaSchedulerNode getMockNode(
    String host, String rack, int port, int capability) {
  NodeId nodeId = mock(NodeId.class);
  when(nodeId.getHost()).thenReturn(host);
  when(nodeId.getPort()).thenReturn(port);
  RMNode rmNode = mock(RMNode.class);
  when(rmNode.getNodeID()).thenReturn(nodeId);
  when(rmNode.getTotalCapability()).thenReturn(
      Resources.createResource(capability, 1));
  when(rmNode.getNodeAddress()).thenReturn(host+":"+port);
  when(rmNode.getHostName()).thenReturn(host);
  when(rmNode.getRackName()).thenReturn(rack);
  
  FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false));
  LOG.info("node = " + host + " avail=" + node.getAvailableResource());
  return node;
}
 
Example 4
Source Project: big-c   Source File: FSParentQueue.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void updateDemand() {
  // Compute demand by iterating through apps in the queue
  // Limit demand to maxResources
  Resource maxRes = scheduler.getAllocationConfiguration()
      .getMaxResources(getName());
  demand = Resources.createResource(0);
  for (FSQueue childQueue : childQueues) {
    childQueue.updateDemand();
    Resource toAdd = childQueue.getDemand();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Counting resource from " + childQueue.getName() + " " + 
          toAdd + "; Total resource consumption for " + getName() +
          " now " + demand);
    }
    demand = Resources.add(demand, toAdd);
    demand = Resources.componentwiseMin(demand, maxRes);
    if (Resources.equals(demand, maxRes)) {
      break;
    }
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("The updated demand for " + getName() + " is " + demand +
        "; the max is " + maxRes);
  }    
}
 
Example 5
Source Project: big-c   Source File: TestFairScheduler.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAggregateCapacityTracking() throws Exception {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  // Add a node
  RMNode node1 =
      MockNodes
          .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);
  assertEquals(1024, scheduler.getClusterResource().getMemory());

  // Add another node
  RMNode node2 =
      MockNodes.newNodeInfo(1, Resources.createResource(512), 2, "127.0.0.2");
  NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
  scheduler.handle(nodeEvent2);
  assertEquals(1536, scheduler.getClusterResource().getMemory());

  // Remove the first node
  NodeRemovedSchedulerEvent nodeEvent3 = new NodeRemovedSchedulerEvent(node1);
  scheduler.handle(nodeEvent3);
  assertEquals(512, scheduler.getClusterResource().getMemory());
}
 
Example 6
Source Project: hadoop   Source File: FiCaSchedulerApp.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This method produces an Allocation that includes the current view
 * of the resources that will be allocated to and preempted from this
 * application.
 *
 * @param rc
 * @param clusterResource
 * @param minimumAllocation
 * @return an allocation
 */
public synchronized Allocation getAllocation(ResourceCalculator rc,
    Resource clusterResource, Resource minimumAllocation) {

  Set<ContainerId> currentContPreemption = Collections.unmodifiableSet(
      new HashSet<ContainerId>(containersToPreempt));
  containersToPreempt.clear();
  Resource tot = Resource.newInstance(0, 0, 0);
  for(ContainerId c : currentContPreemption){
    Resources.addTo(tot,
        liveContainers.get(c).getContainer().getResource());
  }
  int numCont = (int) Math.ceil(
      Resources.divide(rc, clusterResource, tot, minimumAllocation));
  ResourceRequest rr = ResourceRequest.newInstance(
      Priority.UNDEFINED, ResourceRequest.ANY,
      minimumAllocation, numCont);
  ContainersAndNMTokensAllocation allocation =
      pullNewlyAllocatedContainersAndNMTokens();
  Resource headroom = getHeadroom();
  setApplicationHeadroomForMetrics(headroom);
  return new Allocation(allocation.getContainerList(), headroom, null,
    currentContPreemption, Collections.singletonList(rr),
    allocation.getNMTokenList());
}
 
Example 7
Source Project: hadoop   Source File: TestReservationQueue.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  // setup a context / conf
  csConf = new CapacitySchedulerConfiguration();
  YarnConfiguration conf = new YarnConfiguration();
  csContext = mock(CapacitySchedulerContext.class);
  when(csContext.getConfiguration()).thenReturn(csConf);
  when(csContext.getConf()).thenReturn(conf);
  when(csContext.getMinimumResourceCapability()).thenReturn(
      Resources.createResource(GB, 1, 1));
  when(csContext.getMaximumResourceCapability()).thenReturn(
      Resources.createResource(16 * GB, 32, 32));
  when(csContext.getClusterResource()).thenReturn(
      Resources.createResource(100 * 16 * GB, 100 * 32, 100 * 32));
  when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
  
  RMContext mockRMContext = TestUtils.getMockRMContext();
  when(csContext.getRMContext()).thenReturn(mockRMContext);

  // create a queue
  PlanQueue pq = new PlanQueue(csContext, "root", null, null);
  reservationQueue = new ReservationQueue(csContext, "a", pq);

}
 
Example 8
Source Project: hadoop   Source File: TestClientRMService.java    License: Apache License 2.0 6 votes vote down vote up
private static YarnScheduler mockYarnScheduler() {
  YarnScheduler yarnScheduler = mock(YarnScheduler.class);
  when(yarnScheduler.getMinimumResourceCapability()).thenReturn(
      Resources.createResource(
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
  when(yarnScheduler.getMaximumResourceCapability()).thenReturn(
      Resources.createResource(
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB));
  when(yarnScheduler.getAppsInQueue(QUEUE_1)).thenReturn(
      Arrays.asList(getApplicationAttemptId(101), getApplicationAttemptId(102)));
  when(yarnScheduler.getAppsInQueue(QUEUE_2)).thenReturn(
      Arrays.asList(getApplicationAttemptId(103)));
  ApplicationAttemptId attemptId = getApplicationAttemptId(1);
  when(yarnScheduler.getAppResourceUsageReport(attemptId)).thenReturn(null);
  ResourceCalculator rc = new DefaultResourceCalculator();
  when(yarnScheduler.getResourceCalculator()).thenReturn(rc);
  return yarnScheduler;
}
 
Example 9
Source Project: tez   Source File: YarnTaskSchedulerService.java    License: Apache License 2.0 6 votes vote down vote up
private void releaseContainer(ContainerId containerId) {
  Object assignedTask = containerAssignments.remove(containerId);
  if (assignedTask != null) {
    // A task was assigned to this container at some point. Inform the app.
    getContext().containerBeingReleased(containerId);
  }
  HeldContainer delayedContainer = heldContainers.remove(containerId);
  if (delayedContainer != null) {
    Resources.subtractFrom(allocatedResources,
        delayedContainer.getContainer().getResource());
  }
  if (delayedContainer != null || !shouldReuseContainers) {
    amRmClient.releaseAssignedContainer(containerId);
  }
  if (assignedTask != null) {
    // A task was assigned at some point. Add to release list since we are
    // releasing the container.
    releasedContainers.put(containerId, assignedTask);
  }
}
 
Example 10
Source Project: hadoop   Source File: FifoScheduler.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized void initScheduler(Configuration conf) {
  validateConf(conf);
  //Use ConcurrentSkipListMap because applications need to be ordered
  this.applications =
      new ConcurrentSkipListMap<ApplicationId, SchedulerApplication<FiCaSchedulerApp>>();
  this.minimumAllocation =
      Resources.createResource(conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
  initMaximumResourceCapability(
      Resources.createResource(conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
        conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES),
        conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_GCORES,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_GCORES)));
  this.usePortForNodeName = conf.getBoolean(
      YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
      YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
  this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false,
      conf);
  this.activeUsersManager = new ActiveUsersManager(metrics);
}
 
Example 11
Source Project: big-c   Source File: CapacityScheduler.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized void addNode(RMNode nodeManager) {
  FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager,
      usePortForNodeName, nodeManager.getNodeLabels());
  this.nodes.put(nodeManager.getNodeID(), schedulerNode);
  Resources.addTo(clusterResource, nodeManager.getTotalCapability());

  // update this node to node label manager
  if (labelManager != null) {
    labelManager.activateNode(nodeManager.getNodeID(),
        nodeManager.getTotalCapability());
  }
  
  root.updateClusterResource(clusterResource, new ResourceLimits(
      clusterResource));
  int numNodes = numNodeManagers.incrementAndGet();
  updateMaximumAllocation(schedulerNode, true);
  
  LOG.info("Added node " + nodeManager.getNodeAddress() + 
      " clusterResource: " + clusterResource);

  if (scheduleAsynchronously && numNodes == 1) {
    asyncSchedulerThread.beginSchedule();
  }
}
 
Example 12
Source Project: hadoop   Source File: LeafQueue.java    License: Apache License 2.0 6 votes vote down vote up
public synchronized Resource getUserAMResourceLimitPerPartition(
    String nodePartition) {
  /*
   * The user am resource limit is based on the same approach as the user
   * limit (as it should represent a subset of that). This means that it uses
   * the absolute queue capacity (per partition) instead of the max and is
   * modified by the userlimit and the userlimit factor as is the userlimit
   */
  float effectiveUserLimit = Math.max(userLimit / 100.0f,
      1.0f / Math.max(getActiveUsersManager().getNumActiveUsers(), 1));

  Resource queuePartitionResource = Resources.multiplyAndNormalizeUp(
      resourceCalculator,
      labelManager.getResourceByLabel(nodePartition, lastClusterResource),
      queueCapacities.getAbsoluteCapacity(nodePartition), minimumAllocation);

  return Resources.multiplyAndNormalizeUp(resourceCalculator,
      queuePartitionResource,
      queueCapacities.getMaxAMResourcePercentage(nodePartition)
          * effectiveUserLimit * userLimitFactor, minimumAllocation);
}
 
Example 13
Source Project: big-c   Source File: FifoScheduler.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized void initScheduler(Configuration conf) {
  validateConf(conf);
  //Use ConcurrentSkipListMap because applications need to be ordered
  this.applications =
      new ConcurrentSkipListMap<ApplicationId, SchedulerApplication<FiCaSchedulerApp>>();
  this.minimumAllocation =
      Resources.createResource(conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
  initMaximumResourceCapability(
      Resources.createResource(conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB),
        conf.getInt(
          YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)));
  this.usePortForNodeName = conf.getBoolean(
      YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
      YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
  this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false,
      conf);
  this.activeUsersManager = new ActiveUsersManager(metrics);
}
 
Example 14
Source Project: hadoop   Source File: FairScheduler.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Check for queues that need tasks preempted, either because they have been
 * below their guaranteed share for minSharePreemptionTimeout or they have
 * been below their fair share threshold for the fairSharePreemptionTimeout. If
 * such queues exist, compute how many tasks of each type need to be preempted
 * and then select the right ones using preemptTasks.
 */
protected synchronized void preemptTasksIfNecessary() {
  if (!shouldAttemptPreemption()) {
    return;
  }

  long curTime = getClock().getTime();
  if (curTime - lastPreemptCheckTime < preemptionInterval) {
    return;
  }
  lastPreemptCheckTime = curTime;

  Resource resToPreempt = Resources.clone(Resources.none());
  for (FSLeafQueue sched : queueMgr.getLeafQueues()) {
    Resources.addTo(resToPreempt, resToPreempt(sched, curTime));
  }
  if (Resources.greaterThan(RESOURCE_CALCULATOR, clusterResource, resToPreempt,
      Resources.none())) {
    preemptResources(resToPreempt);
  }
}
 
Example 15
Source Project: hadoop   Source File: CapacityScheduler.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized void addNode(RMNode nodeManager) {
  FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager,
      usePortForNodeName, nodeManager.getNodeLabels());
  this.nodes.put(nodeManager.getNodeID(), schedulerNode);
  Resources.addTo(clusterResource, nodeManager.getTotalCapability());

  // update this node to node label manager
  if (labelManager != null) {
    labelManager.activateNode(nodeManager.getNodeID(),
        nodeManager.getTotalCapability());
  }
  
  root.updateClusterResource(clusterResource, new ResourceLimits(
      clusterResource));
  int numNodes = numNodeManagers.incrementAndGet();
  updateMaximumAllocation(schedulerNode, true);
  
  LOG.info("Added node " + nodeManager.getNodeAddress() + 
      " clusterResource: " + clusterResource);

  if (scheduleAsynchronously && numNodes == 1) {
    asyncSchedulerThread.beginSchedule();
  }
}
 
Example 16
private double getIdealPctOfGuaranteed(TempQueue q) {
  double pctOver = Integer.MAX_VALUE;
  if (q != null && Resources.greaterThan(
      rc, clusterRes, q.guaranteed, Resources.none())) {
    pctOver =
        Resources.divide(rc, clusterRes, q.idealAssigned, q.guaranteed);
  }
  return (pctOver);
}
 
Example 17
Source Project: big-c   Source File: TestFairScheduler.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 3000)
public void testMaxAssignWithZeroMemoryContainers() throws Exception {
  conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
  conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
  
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  RMNode node =
      MockNodes.newNodeInfo(1, Resources.createResource(16384, 16), 0,
          "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
  NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
  scheduler.handle(nodeEvent);

  ApplicationAttemptId attId =
      createSchedulingRequest(0, 1, "root.default", "user", 8);
  FSAppAttempt app = scheduler.getSchedulerApp(attId);

  // set maxAssign to 2: only 2 containers should be allocated
  scheduler.maxAssign = 2;
  scheduler.update();
  scheduler.handle(updateEvent);
  assertEquals("Incorrect number of containers allocated", 2, app
      .getLiveContainers().size());

  // set maxAssign to -1: all remaining containers should be allocated
  scheduler.maxAssign = -1;
  scheduler.update();
  scheduler.handle(updateEvent);
  assertEquals("Incorrect number of containers allocated", 8, app
      .getLiveContainers().size());
}
 
Example 18
public void assignPreemption(float scalingFactor,
    ResourceCalculator rc, Resource clusterResource) {
  if (Resources.greaterThan(rc, clusterResource, current, idealAssigned)) {
      toBePreempted = Resources.multiply(
          Resources.subtract(current, idealAssigned), scalingFactor);
  } else {
    toBePreempted = Resource.newInstance(0, 0, 0);
  }
}
 
Example 19
Source Project: hadoop   Source File: TestWorkPreservingRMRestart.java    License: Apache License 2.0 5 votes vote down vote up
private void checkCSQueue(MockRM rm,
    SchedulerApplication<SchedulerApplicationAttempt> app,
    Resource clusterResource, Resource queueResource, Resource usedResource,
    int numContainers)
    throws Exception {
  checkCSLeafQueue(rm, app, clusterResource, queueResource, usedResource,
      numContainers);

  LeafQueue queue = (LeafQueue) app.getQueue();
  Resource availableResources =
      Resources.subtract(queueResource, usedResource);
  // ************ check app headroom ****************
  SchedulerApplicationAttempt schedulerAttempt = app.getCurrentAppAttempt();
  assertEquals(availableResources, schedulerAttempt.getHeadroom());

  // ************* check Queue metrics ************
  QueueMetrics queueMetrics = queue.getMetrics();
  assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
      availableResources.getVirtualCores(), usedResource.getMemory(),
      usedResource.getVirtualCores());

  // ************ check user metrics ***********
  QueueMetrics userMetrics =
      queueMetrics.getUserMetrics(app.getUser());
  assertMetrics(userMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
      availableResources.getVirtualCores(), usedResource.getMemory(),
      usedResource.getVirtualCores());
}
 
Example 20
Source Project: submarine   Source File: RunJobCliParsingMXNetYamlTest.java    License: Apache License 2.0 5 votes vote down vote up
private void verifyPsValues(RunJobParameters jobRunParameters,
    String prefix) {
  assertTrue(RunJobParameters.class + " must be an instance of " +
          MXNetRunJobParameters.class,
      jobRunParameters instanceof MXNetRunJobParameters);
  MXNetRunJobParameters mxNetParams =
      (MXNetRunJobParameters) jobRunParameters;

  assertEquals(4, mxNetParams.getNumPS());
  assertEquals(prefix + "testLaunchCmdPs", mxNetParams.getPSLaunchCmd());
  assertEquals(prefix + "testDockerImagePs",
      mxNetParams.getPsDockerImage());
  assertEquals(Resources.createResource(20500, 34),
      mxNetParams.getPsResource());
}
 
Example 21
Source Project: submarine   Source File: RunJobCliParsingMXNetYamlTest.java    License: Apache License 2.0 5 votes vote down vote up
private void verifySchedulerValues(RunJobParameters jobRunParameters,
    String prefix) {
  assertTrue(RunJobParameters.class + " must be an instance of " +
      MXNetRunJobParameters.class, jobRunParameters instanceof MXNetRunJobParameters);
  MXNetRunJobParameters mxNetParams = (MXNetRunJobParameters) jobRunParameters;
  assertEquals(1, mxNetParams.getNumSchedulers());
  assertEquals(prefix + "testLaunchCmdScheduler",
      mxNetParams.getSchedulerLaunchCmd());
  assertEquals(prefix + "testDockerImageScheduler", mxNetParams.getSchedulerDockerImage());
  assertEquals(Resources.createResource(10240, 16),
      mxNetParams.getSchedulerResource());
}
 
Example 22
Source Project: hadoop   Source File: TestFairScheduler.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSimpleFairShareCalculation() throws IOException {
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, resourceManager.getRMContext());

  // Add one big node (only care about aggregate capacity)
  RMNode node1 =
      MockNodes.newNodeInfo(1, Resources.createResource(10 * 1024), 1,
          "127.0.0.1");
  NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
  scheduler.handle(nodeEvent1);

  // Have two queues which want entire cluster capacity
  createSchedulingRequest(10 * 1024, "queue1", "user1");
  createSchedulingRequest(10 * 1024, "queue2", "user1");
  createSchedulingRequest(10 * 1024, "root.default", "user1");

  scheduler.update();
  scheduler.getQueueManager().getRootQueue()
      .setSteadyFairShare(scheduler.getClusterResource());
  scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();

  Collection<FSLeafQueue> queues = scheduler.getQueueManager().getLeafQueues();
  assertEquals(3, queues.size());
  
  // Divided three ways - between the two queues and the default queue
  for (FSLeafQueue p : queues) {
    assertEquals(3414, p.getFairShare().getMemory());
    assertEquals(3414, p.getMetrics().getFairShareMB());
    assertEquals(3414, p.getSteadyFairShare().getMemory());
    assertEquals(3414, p.getMetrics().getSteadyFairShareMB());
  }
}
 
Example 23
Source Project: hadoop   Source File: TestSchedulerUtils.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout = 30000)
public void testNormalizeRequestWithDominantResourceCalculator() {
  ResourceCalculator resourceCalculator = new DominantResourceCalculator();
  
  Resource minResource = Resources.createResource(1024, 1, 0);
  Resource maxResource = Resources.createResource(10240, 10, 10);
  Resource clusterResource = Resources.createResource(10 * 1024, 10, 10);
  
  ResourceRequest ask = new ResourceRequestPBImpl();

  // case negative memory/vcores/gcores
  ask.setCapability(Resources.createResource(-1024, -1, -1));
  SchedulerUtils.normalizeRequest(
      ask, resourceCalculator, clusterResource, minResource, maxResource);
  assertEquals(minResource, ask.getCapability());

  // case zero memory/vcores/gcores
  ask.setCapability(Resources.createResource(0, 0, 0));
  SchedulerUtils.normalizeRequest(
      ask, resourceCalculator, clusterResource, minResource, maxResource);
  assertEquals(minResource, ask.getCapability());
  assertEquals(1, ask.getCapability().getVirtualCores());
  assertEquals(1024, ask.getCapability().getMemory());
  assertEquals(0, ask.getCapability().getGpuCores());

  // case non-zero memory & zero cores
  ask.setCapability(Resources.createResource(1536, 0, 0));
  SchedulerUtils.normalizeRequest(
      ask, resourceCalculator, clusterResource, minResource, maxResource);
  assertEquals(Resources.createResource(2048, 1, 0), ask.getCapability());
  assertEquals(1, ask.getCapability().getVirtualCores());
  assertEquals(2048, ask.getCapability().getMemory());
  assertEquals(0, ask.getCapability().getGpuCores());
}
 
Example 24
Source Project: big-c   Source File: RMContainerImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void addResumedResource(Resource resource) {
	try{
		readLock.lock();
		this.lastResumed = Resources.clone(resource);
		Resources.subtractFrom(preempted, resource);
	}finally{
		readLock.unlock();
	}	
}
 
Example 25
Source Project: hadoop   Source File: SchedulerNode.java    License: Apache License 2.0 5 votes vote down vote up
public SchedulerNode(RMNode node, boolean usePortForNodeName,
    Set<String> labels) {
  this.rmNode = node;
  this.availableResource = Resources.clone(node.getTotalCapability());
  this.totalResourceCapability = Resources.clone(node.getTotalCapability());
  if (usePortForNodeName) {
    nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort();
  } else {
    nodeName = rmNode.getHostName();
  }
  this.labels = ImmutableSet.copyOf(labels);
}
 
Example 26
Source Project: big-c   Source File: TestComputeFairShares.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Basic test for weighted shares with no minimum shares and no low demands.
 * Each pool should get slots in proportion to its weight.
 */
@Test
public void testWeightedSharing() {
  scheds.add(new FakeSchedulable(0, 2.0));
  scheds.add(new FakeSchedulable(0, 1.0));
  scheds.add(new FakeSchedulable(0, 1.0));
  scheds.add(new FakeSchedulable(0, 0.5));
  ComputeFairShares.computeShares(scheds,
      Resources.createResource(45), ResourceType.MEMORY);
  verifyMemoryShares(20, 10, 10, 5);
}
 
Example 27
Source Project: big-c   Source File: CapacitySchedulerConfiguration.java    License: Apache License 2.0 5 votes vote down vote up
public Resource getMaximumAllocation() {
  int maximumMemory = getInt(
      YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
  int maximumCores = getInt(
      YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
  return Resources.createResource(maximumMemory, maximumCores);
}
 
Example 28
Source Project: big-c   Source File: LeafQueue.java    License: Apache License 2.0 5 votes vote down vote up
boolean shouldAllocOrReserveNewContainer(FiCaSchedulerApp application,
    Priority priority, Resource required) {
  int requiredContainers = application.getTotalRequiredResources(priority);
  int reservedContainers = application.getNumReservedContainers(priority);
  int starvation = 0;
  if (reservedContainers > 0) {
    float nodeFactor = 
        Resources.ratio(
            resourceCalculator, required, getMaximumAllocation()
            );
    
    // Use percentage of node required to bias against large containers...
    // Protect against corner case where you need the whole node with
    // Math.min(nodeFactor, minimumAllocationFactor)
    // 在该优先级reserve被call的次数。
    starvation = 
        (int)((application.getReReservations(priority) / (float)reservedContainers) * 
              (1.0f - (Math.min(nodeFactor, getMinimumAllocationFactor())))
             );
    
    if (LOG.isDebugEnabled()) {
      LOG.debug("needsContainers:" +
          " app.#re-reserve=" + application.getReReservations(priority) + 
          " reserved=" + reservedContainers + 
          " nodeFactor=" + nodeFactor + 
          " minAllocFactor=" + getMinimumAllocationFactor() +
          " starvation=" + starvation);
    }
  }

  return (((starvation + requiredContainers) - reservedContainers) > 0);
}
 
Example 29
Source Project: hadoop   Source File: FSParentQueue.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Resource getResourceUsage() {
  Resource usage = Resources.createResource(0);
  for (FSQueue child : childQueues) {
    Resources.addTo(usage, child.getResourceUsage());
  }
  return usage;
}
 
Example 30
Source Project: big-c   Source File: RMContainerImpl.java    License: Apache License 2.0 5 votes vote down vote up
public Resource getCurrentUsedResource(){
 if(isSuspending){
  return Resources.subtract(container.getResource(), preempted);
 }else{
  return container.getResource();
 }
}