Java Code Examples for org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils#newJobId()

The following examples show how to use org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils#newJobId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestJobListCache.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 1000)
public void testEviction() throws InterruptedException {
  int maxSize = 2;
  JobListCache cache = new JobListCache(maxSize, 1000);

  JobId jobId1 = MRBuilderUtils.newJobId(1, 1, 1);
  HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1);

  JobId jobId2 = MRBuilderUtils.newJobId(2, 2, 2);
  HistoryFileInfo fileInfo2 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2);

  JobId jobId3 = MRBuilderUtils.newJobId(3, 3, 3);
  HistoryFileInfo fileInfo3 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3);

  cache.addIfAbsent(fileInfo1);
  cache.addIfAbsent(fileInfo2);
  cache.addIfAbsent(fileInfo3);

  Collection <HistoryFileInfo> values;
  for (int i = 0; i < 9; i++) {
    values = cache.values();
    if (values.size() > maxSize) {
      Thread.sleep(100);
    } else {
      assertFalse("fileInfo1 should have been evicted",
        values.contains(fileInfo1));
      return;
    }
  }
  fail("JobListCache didn't delete the extra entry");
}
 
Example 2
Source File: TestJobIdHistoryFileInfoMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Trivial test case that verifies basic functionality of {@link
 * JobIdHistoryFileInfoMap}
 */
@Test(timeout = 2000)
public void testWithSingleElement() throws InterruptedException {
  JobIdHistoryFileInfoMap mapWithSize = new JobIdHistoryFileInfoMap();

  JobId jobId = MRBuilderUtils.newJobId(1, 1, 1);
  HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo1.getJobId()).thenReturn(jobId);

  // add it twice
  assertEquals("Incorrect return on putIfAbsent()",
      null, mapWithSize.putIfAbsent(jobId, fileInfo1));
  assertEquals("Incorrect return on putIfAbsent()",
      fileInfo1, mapWithSize.putIfAbsent(jobId, fileInfo1));

  // check get()
  assertEquals("Incorrect get()", fileInfo1, mapWithSize.get(jobId));
  assertTrue("Incorrect size()", checkSize(mapWithSize, 1));

  // check navigableKeySet()
  NavigableSet<JobId> set = mapWithSize.navigableKeySet();
  assertEquals("Incorrect navigableKeySet()", 1, set.size());
  assertTrue("Incorrect navigableKeySet()", set.contains(jobId));

  // check values()
  Collection<HistoryFileInfo> values = mapWithSize.values();
  assertEquals("Incorrect values()", 1, values.size());
  assertTrue("Incorrect values()", values.contains(fileInfo1));
}
 
Example 3
Source File: TestJobListCache.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 1000)
public void testAddExisting() {
  JobListCache cache = new JobListCache(2, 1000);

  JobId jobId = MRBuilderUtils.newJobId(1, 1, 1);
  HistoryFileInfo fileInfo = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo.getJobId()).thenReturn(jobId);

  cache.addIfAbsent(fileInfo);
  cache.addIfAbsent(fileInfo);
  assertEquals("Incorrect number of cache entries", 1,
      cache.values().size());
}
 
Example 4
Source File: TestContainerLauncherImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, 
    TaskType taskType, int id) {
  ApplicationId aID = ApplicationId.newInstance(ts, appId);
  JobId jID = MRBuilderUtils.newJobId(aID, id);
  TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
  return MRBuilderUtils.newTaskAttemptId(tID, id);
}
 
Example 5
Source File: TestJobInfo.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 10000)
public void testAverageMergeTime() throws IOException {
  String historyFileName =
      "job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
  String confFileName =
      "job_1329348432655_0001_conf.xml";
  Configuration conf = new Configuration();
  JobACLsManager jobAclsMgr = new JobACLsManager(conf);
  Path fulleHistoryPath =
      new Path(TestJobHistoryEntities.class.getClassLoader()
          .getResource(historyFileName)
          .getFile());
  Path fullConfPath =
      new Path(TestJobHistoryEntities.class.getClassLoader()
          .getResource(confFileName)
          .getFile());

  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);

  JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1);
  CompletedJob completedJob =
      new CompletedJob(conf, jobId, fulleHistoryPath, true, "user",
          info, jobAclsMgr);
  JobInfo jobInfo = new JobInfo(completedJob);
  // There are 2 tasks with merge time of 45 and 55 respectively. So average
  // merge time should be 50.
  Assert.assertEquals(50L, jobInfo.getAvgMergeTime().longValue());
}
 
Example 6
Source File: TestTaskAttempt.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testTooManyFetchFailureAfterKill() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId =
    ApplicationAttemptId.newInstance(appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});

  AppContext appCtx = mock(AppContext.class);
  ClusterInfo clusterInfo = mock(ClusterInfo.class);
  Resource resource = mock(Resource.class);
  when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
  when(resource.getMemory()).thenReturn(1024);

  TaskAttemptImpl taImpl =
    new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
      splits, jobConf, taListener,
      mock(Token.class), new Credentials(),
      new SystemClock(), appCtx);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);
  when(container.getNodeHttpAddress()).thenReturn("localhost:0");

  taImpl.handle(new TaskAttemptEvent(attemptId,
    TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
    container, mock(Map.class)));
  taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
  taImpl.handle(new TaskAttemptEvent(attemptId,
    TaskAttemptEventType.TA_DONE));
  taImpl.handle(new TaskAttemptEvent(attemptId,
    TaskAttemptEventType.TA_CONTAINER_CLEANED));

  assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
    TaskAttemptState.SUCCEEDED);
  taImpl.handle(new TaskAttemptEvent(attemptId,
    TaskAttemptEventType.TA_KILL));
  assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
    TaskAttemptState.KILLED);
  taImpl.handle(new TaskAttemptEvent(attemptId,
    TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
  assertEquals("Task attempt is not in KILLED state, still", taImpl.getState(),
    TaskAttemptState.KILLED);
  assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
    eventHandler.internalError);
}
 
Example 7
Source File: TestContainerLauncher.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 5000)
public void testPoolLimits() throws InterruptedException {
  ApplicationId appId = ApplicationId.newInstance(12345, 67);
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
    appId, 3);
  JobId jobId = MRBuilderUtils.newJobId(appId, 8);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
  TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10);

  AppContext context = mock(AppContext.class);
  CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
    context);
  Configuration conf = new Configuration();
  conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
  containerLauncher.init(conf);
  containerLauncher.start();

  ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();

  // 10 different hosts
  containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
  for (int i = 0; i < 10; i++) {
    containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
      containerId, "host" + i + ":1234", null,
      ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
  }
  waitForEvents(containerLauncher, 10);
  Assert.assertEquals(10, threadPool.getPoolSize());
  Assert.assertNull(containerLauncher.foundErrors);

  // 4 more different hosts, but thread pool size should be capped at 12
  containerLauncher.expectedCorePoolSize = 12 ;
  for (int i = 1; i <= 4; i++) {
    containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
      containerId, "host1" + i + ":1234", null,
      ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
  }
  waitForEvents(containerLauncher, 12);
  Assert.assertEquals(12, threadPool.getPoolSize());
  Assert.assertNull(containerLauncher.foundErrors);

  // Make some threads ideal so that remaining events are also done.
  containerLauncher.finishEventHandling = true;
  waitForEvents(containerLauncher, 14);
  Assert.assertEquals(12, threadPool.getPoolSize());
  Assert.assertNull(containerLauncher.foundErrors);

  containerLauncher.stop();
}
 
Example 8
Source File: TestRMContainerAllocator.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 30000)
public void testPreemptReducers() throws Exception {
  LOG.info("Running testPreemptReducers");

  Configuration conf = new Configuration();
  MyResourceManager rm = new MyResourceManager(conf);
  rm.start();
  DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
      .getDispatcher();

  // Submit the application
  RMApp app = rm.submitApp(1024);
  dispatcher.await();

  MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
  amNodeManager.nodeHeartbeat(true);
  dispatcher.await();

  ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
      .getAppAttemptId();
  rm.sendAMLaunched(appAttemptId);
  dispatcher.await();

  JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
  Job mockJob = mock(Job.class);
  when(mockJob.getReport()).thenReturn(
      MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
          0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
  MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
      appAttemptId, mockJob, new SystemClock());
  allocator.setMapResourceRequest(BuilderUtils.newResource(1024, 1));
  allocator.setReduceResourceRequest(BuilderUtils.newResource(1024, 1));
  RMContainerAllocator.AssignedRequests assignedRequests =
      allocator.getAssignedRequests();
  RMContainerAllocator.ScheduledRequests scheduledRequests =
      allocator.getScheduledRequests();
  ContainerRequestEvent event1 =
      createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
  scheduledRequests.maps.put(mock(TaskAttemptId.class),
      new RMContainerRequestor.ContainerRequest(event1, null));
  assignedRequests.reduces.put(mock(TaskAttemptId.class),
      mock(Container.class));

  allocator.preemptReducesIfNeeded();
  Assert.assertEquals("The reducer is not preempted",
      1, assignedRequests.preemptionWaitingReduces.size());
}
 
Example 9
Source File: TestRMContainerAllocator.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testSimple() throws Exception {

  LOG.info("Running testSimple");

  Configuration conf = new Configuration();
  MyResourceManager rm = new MyResourceManager(conf);
  rm.start();
  DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
      .getDispatcher();

  // Submit the application
  RMApp app = rm.submitApp(1024);
  dispatcher.await();

  MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
  amNodeManager.nodeHeartbeat(true);
  dispatcher.await();

  ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
      .getAppAttemptId();
  rm.sendAMLaunched(appAttemptId);
  dispatcher.await();

  JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
  Job mockJob = mock(Job.class);
  when(mockJob.getReport()).thenReturn(
      MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
          0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
  MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
      appAttemptId, mockJob);

  // add resources to scheduler
  MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
  MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
  MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
  dispatcher.await();

  // create the container request
  ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
      new String[] { "h1" });
  allocator.sendRequest(event1);

  // send 1 more request with different resource req
  ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
      new String[] { "h2" });
  allocator.sendRequest(event2);

  // this tells the scheduler about the requests
  // as nodes are not added, no allocations
  List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
  Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());

  // send another request with different resource and priority
  ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
      new String[] { "h3" });
  allocator.sendRequest(event3);

  // this tells the scheduler about the requests
  // as nodes are not added, no allocations
  assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
  Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
  
  // update resources in scheduler
  nodeManager1.nodeHeartbeat(true); // Node heartbeat
  nodeManager2.nodeHeartbeat(true); // Node heartbeat
  nodeManager3.nodeHeartbeat(true); // Node heartbeat
  dispatcher.await();

  assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size());
  checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
      assigned, false);
  
  // check that the assigned container requests are cancelled
  assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size());    
}
 
Example 10
Source File: TestRMContainerAllocator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testMapReduceScheduling() throws Exception {

  LOG.info("Running testMapReduceScheduling");

  Configuration conf = new Configuration();
  MyResourceManager rm = new MyResourceManager(conf);
  rm.start();
  DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
      .getDispatcher();

  // Submit the application
  RMApp app = rm.submitApp(1024);
  dispatcher.await();

  MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
  amNodeManager.nodeHeartbeat(true);
  dispatcher.await();

  ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
      .getAppAttemptId();
  rm.sendAMLaunched(appAttemptId);
  dispatcher.await();

  JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
  Job mockJob = mock(Job.class);
  when(mockJob.getReport()).thenReturn(
      MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
          0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
  MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
      appAttemptId, mockJob);

  // add resources to scheduler
  MockNM nodeManager1 = rm.registerNode("h1:1234", 1024);
  MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
  MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
  dispatcher.await();

  // create the container request
  // send MAP request
  ContainerRequestEvent event1 = createReq(jobId, 1, 2048, new String[] {
      "h1", "h2" }, true, false);
  allocator.sendRequest(event1);

  // send REDUCE request
  ContainerRequestEvent event2 = createReq(jobId, 2, 3000,
      new String[] { "h1" }, false, true);
  allocator.sendRequest(event2);

  // send MAP request
  ContainerRequestEvent event3 = createReq(jobId, 3, 2048,
      new String[] { "h3" }, false, false);
  allocator.sendRequest(event3);

  // this tells the scheduler about the requests
  // as nodes are not added, no allocations
  List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals("No of assignments must be 0", 0, assigned.size());

  // update resources in scheduler
  nodeManager1.nodeHeartbeat(true); // Node heartbeat
  nodeManager2.nodeHeartbeat(true); // Node heartbeat
  nodeManager3.nodeHeartbeat(true); // Node heartbeat
  dispatcher.await();

  assigned = allocator.schedule();
  dispatcher.await();
  checkAssignments(new ContainerRequestEvent[] { event1, event3 },
      assigned, false);

  // validate that no container is assigned to h1 as it doesn't have 2048
  for (TaskAttemptContainerAssignedEvent assig : assigned) {
    Assert.assertFalse("Assigned count not correct", "h1".equals(assig
        .getContainer().getNodeId().getHost()));
  }
}
 
Example 11
Source File: TestIds.java    From big-c with Apache License 2.0 4 votes vote down vote up
private JobId createJobId(long clusterTimestamp, int idInt) {
  return MRBuilderUtils.newJobId(
      ApplicationId.newInstance(clusterTimestamp, idInt), idInt);
}
 
Example 12
Source File: TestRMContainerAllocator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testSimple() throws Exception {

  LOG.info("Running testSimple");

  Configuration conf = new Configuration();
  MyResourceManager rm = new MyResourceManager(conf);
  rm.start();
  DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
      .getDispatcher();

  // Submit the application
  RMApp app = rm.submitApp(1024);
  dispatcher.await();

  MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
  amNodeManager.nodeHeartbeat(true);
  dispatcher.await();

  ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
      .getAppAttemptId();
  rm.sendAMLaunched(appAttemptId);
  dispatcher.await();

  JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
  Job mockJob = mock(Job.class);
  when(mockJob.getReport()).thenReturn(
      MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 
          0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
  MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
      appAttemptId, mockJob);

  // add resources to scheduler
  MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
  MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
  MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
  dispatcher.await();

  // create the container request
  ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
      new String[] { "h1" });
  allocator.sendRequest(event1);

  // send 1 more request with different resource req
  ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
      new String[] { "h2" });
  allocator.sendRequest(event2);

  // this tells the scheduler about the requests
  // as nodes are not added, no allocations
  List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
  Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());

  // send another request with different resource and priority
  ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
      new String[] { "h3" });
  allocator.sendRequest(event3);

  // this tells the scheduler about the requests
  // as nodes are not added, no allocations
  assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
  Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
  
  // update resources in scheduler
  nodeManager1.nodeHeartbeat(true); // Node heartbeat
  nodeManager2.nodeHeartbeat(true); // Node heartbeat
  nodeManager3.nodeHeartbeat(true); // Node heartbeat
  dispatcher.await();

  assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size());
  checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
      assigned, false);
  
  // check that the assigned container requests are cancelled
  assigned = allocator.schedule();
  dispatcher.await();
  Assert.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size());    
}
 
Example 13
Source File: TestTaskAttempt.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testDoubleTooManyFetchFailure() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId =
    ApplicationAttemptId.newInstance(appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});

  AppContext appCtx = mock(AppContext.class);
  ClusterInfo clusterInfo = mock(ClusterInfo.class);
  Resource resource = mock(Resource.class);
  when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
  when(resource.getMemory()).thenReturn(1024);

  TaskAttemptImpl taImpl =
    new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
        splits, jobConf, taListener,
        new Token(), new Credentials(),
        new SystemClock(), appCtx);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);
  when(container.getNodeHttpAddress()).thenReturn("localhost:0");

  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
      container, mock(Map.class)));
  taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_DONE));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_CLEANED));

  assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
      TaskAttemptState.SUCCEEDED);
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
  assertEquals("Task attempt is not in FAILED state", taImpl.getState(),
      TaskAttemptState.FAILED);
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
  assertEquals("Task attempt is not in FAILED state, still", taImpl.getState(),
      TaskAttemptState.FAILED);
  assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
      eventHandler.internalError);
}
 
Example 14
Source File: TestTaskAttempt.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testContainerCleanedWhileCommitting() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId =
    ApplicationAttemptId.newInstance(appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] {});

  AppContext appCtx = mock(AppContext.class);
  ClusterInfo clusterInfo = mock(ClusterInfo.class);
  Resource resource = mock(Resource.class);
  when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
  when(resource.getMemory()).thenReturn(1024);

  TaskAttemptImpl taImpl =
    new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
        splits, jobConf, taListener,
        new Token(), new Credentials(),
        new SystemClock(), appCtx);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);
  when(container.getNodeHttpAddress()).thenReturn("localhost:0");

  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
      container, mock(Map.class)));
  taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_COMMIT_PENDING));

  assertEquals("Task attempt is not in commit pending state", taImpl.getState(),
      TaskAttemptState.COMMIT_PENDING);
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_CLEANED));
  assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
      eventHandler.internalError);
  assertEquals("Task attempt is assigned locally", Locality.OFF_SWITCH,
      taImpl.getLocality());
}
 
Example 15
Source File: TestTaskAttempt.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testLaunchFailedWhileKilling() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId =
    ApplicationAttemptId.newInstance(appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});

  TaskAttemptImpl taImpl =
    new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
        splits, jobConf, taListener,
        new Token(), new Credentials(),
        new SystemClock(), null);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);

  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
      container, mock(Map.class)));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_KILL));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_CLEANED));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
  assertFalse(eventHandler.internalError);
  assertEquals("Task attempt is not assigned on the local node", 
      Locality.NODE_LOCAL, taImpl.getLocality());
}
 
Example 16
Source File: TestTaskAttempt.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testLaunchFailedWhileKilling() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId =
    ApplicationAttemptId.newInstance(appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});

  TaskAttemptImpl taImpl =
    new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
        splits, jobConf, taListener,
        new Token(), new Credentials(),
        new SystemClock(), null);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);

  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
      container, mock(Map.class)));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_KILL));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_CLEANED));
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
  assertFalse(eventHandler.internalError);
  assertEquals("Task attempt is not assigned on the local node", 
      Locality.NODE_LOCAL, taImpl.getLocality());
}
 
Example 17
Source File: TestTaskAttemptContainerRequest.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testAttemptContainerRequest() throws Exception {
  final Text SECRET_KEY_ALIAS = new Text("secretkeyalias");
  final byte[] SECRET_KEY = ("secretkey").getBytes();
  Map<ApplicationAccessType, String> acls =
      new HashMap<ApplicationAccessType, String>(1);
  acls.put(ApplicationAccessType.VIEW_APP, "otheruser");
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  Path jobFile = mock(Path.class);

  EventHandler eventHandler = mock(EventHandler.class);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");

  // setup UGI for security so tokens and keys are preserved
  jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(jobConf);

  Credentials credentials = new Credentials();
  credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY);
  Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
      ("tokenid").getBytes(), ("tokenpw").getBytes(),
      new Text("tokenkind"), new Text("tokenservice"));

  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          mock(TaskSplitMetaInfo.class), jobConf, taListener,
          jobToken, credentials,
          new SystemClock(), null);

  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());

  ContainerLaunchContext launchCtx =
      TaskAttemptImpl.createContainerLaunchContext(acls,
          jobConf, jobToken, taImpl.createRemoteTask(),
          TypeConverter.fromYarn(jobId),
          mock(WrappedJvmID.class), taListener,
          credentials);

  Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs());
  Credentials launchCredentials = new Credentials();

  DataInputByteBuffer dibb = new DataInputByteBuffer();
  dibb.reset(launchCtx.getTokens());
  launchCredentials.readTokenStorageStream(dibb);

  // verify all tokens specified for the task attempt are in the launch context
  for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
    Token<? extends TokenIdentifier> launchToken =
        launchCredentials.getToken(token.getService());
    Assert.assertNotNull("Token " + token.getService() + " is missing",
        launchToken);
    Assert.assertEquals("Token " + token.getService() + " mismatch",
        token, launchToken);
  }

  // verify the secret key is in the launch context
  Assert.assertNotNull("Secret key missing",
      launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
  Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY,
      launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
 
Example 18
Source File: TestTaskAttempt.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testContainerCleanedWhileRunning() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId =
    ApplicationAttemptId.newInstance(appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});

  AppContext appCtx = mock(AppContext.class);
  ClusterInfo clusterInfo = mock(ClusterInfo.class);
  Resource resource = mock(Resource.class);
  when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
  when(resource.getMemory()).thenReturn(1024);

  TaskAttemptImpl taImpl =
    new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
        splits, jobConf, taListener,
        new Token(), new Credentials(),
        new SystemClock(), appCtx);

  NodeId nid = NodeId.newInstance("127.0.0.2", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);
  when(container.getNodeHttpAddress()).thenReturn("localhost:0");

  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
      container, mock(Map.class)));
  taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
  assertEquals("Task attempt is not in running state", taImpl.getState(),
      TaskAttemptState.RUNNING);
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_CONTAINER_CLEANED));
  assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
      eventHandler.internalError);
  assertEquals("Task attempt is not assigned on the local rack",
      Locality.RACK_LOCAL, taImpl.getLocality());
}
 
Example 19
Source File: TestTaskAttempt.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testAppDiognosticEventOnUnassignedTask() throws Exception {
  ApplicationId appId = ApplicationId.newInstance(1, 2);
  ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
      appId, 0);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
  Path jobFile = mock(Path.class);

  MockEventHandler eventHandler = new MockEventHandler();
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(
      new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");

  TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
  when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });

  AppContext appCtx = mock(AppContext.class);
  ClusterInfo clusterInfo = mock(ClusterInfo.class);
  Resource resource = mock(Resource.class);
  when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
  when(resource.getMemory()).thenReturn(1024);

  TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
      jobFile, 1, splits, jobConf, taListener,
      new Token(), new Credentials(), new SystemClock(), appCtx);

  NodeId nid = NodeId.newInstance("127.0.0.1", 0);
  ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
  Container container = mock(Container.class);
  when(container.getId()).thenReturn(contId);
  when(container.getNodeId()).thenReturn(nid);
  when(container.getNodeHttpAddress()).thenReturn("localhost:0");
  taImpl.handle(new TaskAttemptEvent(attemptId,
      TaskAttemptEventType.TA_SCHEDULE));
  taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId,
      "Task got killed"));
  assertFalse(
      "InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task",
      eventHandler.internalError);
}
 
Example 20
Source File: RMCommunicator.java    From jumbune with GNU Lesser General Public License v3.0 2 votes vote down vote up
/**
 * Get the Job Id for the given Application Report.
 *
 * @param report the report
 * @return the job id
 */
public JobId getJobId(ApplicationReport report){
	ApplicationId applicationId = report.getApplicationId();
	return MRBuilderUtils.newJobId(applicationId, applicationId.getId());
}