Java Code Examples for org.apache.hadoop.yarn.server.utils.BuilderUtils#newApplicationId()

The following examples show how to use org.apache.hadoop.yarn.server.utils.BuilderUtils#newApplicationId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestClientRMService.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testForceKillNonExistingApplication() throws YarnException {
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getRMApps()).thenReturn(
      new ConcurrentHashMap<ApplicationId, RMApp>());
  ClientRMService rmService = new ClientRMService(rmContext, null, null,
      null, null, null);
  ApplicationId applicationId =
      BuilderUtils.newApplicationId(System.currentTimeMillis(), 0);
  KillApplicationRequest request =
      KillApplicationRequest.newInstance(applicationId);
  try {
    rmService.forceKillApplication(request);
    Assert.fail();
  } catch (ApplicationNotFoundException ex) {
    Assert.assertEquals(ex.getMessage(),
        "Trying to kill an absent " +
            "application " + request.getApplicationId());
  }
}
 
Example 2
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout=20000)
public void testStopAfterError() throws Exception {
  DeletionService delSrvc = mock(DeletionService.class);

  // get the AppLogAggregationImpl thread to crash
  LocalDirsHandlerService mockedDirSvc = mock(LocalDirsHandlerService.class);
  when(mockedDirSvc.getLogDirs()).thenThrow(new RuntimeException());
  
  LogAggregationService logAggregationService =
      new LogAggregationService(dispatcher, this.context, delSrvc,
                                mockedDirSvc);
  logAggregationService.init(this.conf);
  logAggregationService.start();

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
  logAggregationService.handle(new LogHandlerAppStartedEvent(
          application1, this.user, null,
          ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());
  logAggregationService.close();
}
 
Example 3
Source File: TestClientRMService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testForceKillNonExistingApplication() throws YarnException {
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getRMApps()).thenReturn(
      new ConcurrentHashMap<ApplicationId, RMApp>());
  ClientRMService rmService = new ClientRMService(rmContext, null, null,
      null, null, null);
  ApplicationId applicationId =
      BuilderUtils.newApplicationId(System.currentTimeMillis(), 0);
  KillApplicationRequest request =
      KillApplicationRequest.newInstance(applicationId);
  try {
    rmService.forceKillApplication(request);
    Assert.fail();
  } catch (ApplicationNotFoundException ex) {
    Assert.assertEquals(ex.getMessage(),
        "Trying to kill an absent " +
            "application " + request.getApplicationId());
  }
}
 
Example 4
Source File: TestClientRMService.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (expected = ApplicationNotFoundException.class)
public void testMoveAbsentApplication() throws YarnException {
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getRMApps()).thenReturn(
      new ConcurrentHashMap<ApplicationId, RMApp>());
  ClientRMService rmService = new ClientRMService(rmContext, null, null,
      null, null, null);
  ApplicationId applicationId =
      BuilderUtils.newApplicationId(System.currentTimeMillis(), 0);
  MoveApplicationAcrossQueuesRequest request =
      MoveApplicationAcrossQueuesRequest.newInstance(applicationId, "newqueue");
  rmService.moveApplicationAcrossQueues(request);
}
 
Example 5
Source File: TestDelegationTokenRenewer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Basic idea of the test:
 * 1. register a token for 2 seconds with no cancel at the end
 * 2. cancel it immediately
 * 3. Sleep and check that the 2 seconds renew didn't happen 
 * (totally 5 renewals)
 * 4. check cancellation
 * @throws IOException
 * @throws URISyntaxException
 */
@Test(timeout=60000)
public void testDTRenewalWithNoCancel () throws Exception {
  MyFS dfs = (MyFS)FileSystem.get(conf);
  LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+conf.hashCode());

  Credentials ts = new Credentials();
  MyToken token1 = dfs.getDelegationToken("user1");

  //to cause this one to be set for renew in 2 secs
  Renewer.tokenToRenewIn2Sec = token1; 
  LOG.info("token="+token1+" should be renewed for 2 secs");
  
  String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
  ts.addToken(new Text(nn1), token1);
  

  ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
  delegationTokenRenewer.addApplicationAsync(applicationId_1, ts, false, "user");
  waitForEventsToGetProcessed(delegationTokenRenewer);
  delegationTokenRenewer.applicationFinished(applicationId_1);
  waitForEventsToGetProcessed(delegationTokenRenewer);
  int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far
  try {
    Thread.sleep(6*1000); // sleep 6 seconds, so it has time to renew
  } catch (InterruptedException e) {}
  LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed);
  
  // counter and the token should still be the old ones
  assertEquals("renew wasn't called as many times as expected",
      numberOfExpectedRenewals, Renewer.counter);
  
  // also renewing of the canceled token should not fail, because it has not
  // been canceled
  token1.renew(conf);
}
 
Example 6
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testLogAggregatorCleanup() throws Exception {
  DeletionService delSrvc = mock(DeletionService.class);

  // get the AppLogAggregationImpl thread to crash
  LocalDirsHandlerService mockedDirSvc = mock(LocalDirsHandlerService.class);

  LogAggregationService logAggregationService =
      new LogAggregationService(dispatcher, this.context, delSrvc,
                                mockedDirSvc);
  logAggregationService.init(this.conf);
  logAggregationService.start();

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
  logAggregationService.handle(new LogHandlerAppStartedEvent(
          application1, this.user, null,
          ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));

  logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
  dispatcher.await();
  int timeToWait = 20 * 1000;
  while (timeToWait > 0 && logAggregationService.getNumAggregators() > 0) {
    Thread.sleep(100);
    timeToWait -= 100;
  }
  Assert.assertEquals("Log aggregator failed to cleanup!", 0,
      logAggregationService.getNumAggregators());
  logAggregationService.stop();
  logAggregationService.close();
}
 
Example 7
Source File: TestFifoScheduler.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=5000)
public void testAppAttemptMetrics() throws Exception {
  AsyncDispatcher dispatcher = new InlineDispatcher();
  
  FifoScheduler scheduler = new FifoScheduler();
  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  RMContext rmContext = new RMContextImpl(dispatcher, null,
      null, null, null, null, null, null, null, writer, scheduler);
  ((RMContextImpl) rmContext).setSystemMetricsPublisher(
      mock(SystemMetricsPublisher.class));

  Configuration conf = new Configuration();
  scheduler.setRMContext(rmContext);
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, rmContext);
  QueueMetrics metrics = scheduler.getRootQueueMetrics();
  int beforeAppsSubmitted = metrics.getAppsSubmitted();

  ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);

  SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
  scheduler.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  scheduler.handle(attemptEvent);

  appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
  SchedulerEvent attemptEvent2 =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  scheduler.handle(attemptEvent2);

  int afterAppsSubmitted = metrics.getAppsSubmitted();
  Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
  scheduler.stop();
}
 
Example 8
Source File: TestQueueMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static AppSchedulingInfo mockApp(String user) {
  AppSchedulingInfo app = mock(AppSchedulingInfo.class);
  when(app.getUser()).thenReturn(user);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1);
  when(app.getApplicationAttemptId()).thenReturn(id);
  return app;
}
 
Example 9
Source File: MockApp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public MockApp(String user, long clusterTimeStamp, int uniqId) {
  super();
  this.user = user;
  // Add an application and the corresponding containers
  RecordFactory recordFactory = RecordFactoryProvider
      .getRecordFactory(new Configuration());
  this.appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp,
      uniqId);
  appState = ApplicationState.NEW;
}
 
Example 10
Source File: TestRMNodeTransitions.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=20000)
public void testUpdateHeartbeatResponseForCleanup() {
  RMNodeImpl node = getRunningNode();
  NodeId nodeId = node.getNodeID();

  // Expire a container
ContainerId completedContainerId = BuilderUtils.newContainerId(
		BuilderUtils.newApplicationAttemptId(
				BuilderUtils.newApplicationId(0, 0), 0), 0);
  node.handle(new RMNodeCleanContainerEvent(nodeId, completedContainerId));
  Assert.assertEquals(1, node.getContainersToCleanUp().size());

  // Finish an application
  ApplicationId finishedAppId = BuilderUtils.newApplicationId(0, 1);
  node.handle(new RMNodeCleanAppEvent(nodeId, finishedAppId));
  Assert.assertEquals(1, node.getAppsToCleanup().size());

  // Verify status update does not clear containers/apps to cleanup
  // but updating heartbeat response for cleanup does
  RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent();
  node.handle(statusEvent);
  Assert.assertEquals(1, node.getContainersToCleanUp().size());
  Assert.assertEquals(1, node.getAppsToCleanup().size());
  NodeHeartbeatResponse hbrsp = Records.newRecord(NodeHeartbeatResponse.class);
  node.updateNodeHeartbeatResponseForCleanup(hbrsp);
  Assert.assertEquals(0, node.getContainersToCleanUp().size());
  Assert.assertEquals(0, node.getAppsToCleanup().size());
  Assert.assertEquals(1, hbrsp.getContainersToCleanup().size());
  Assert.assertEquals(completedContainerId, hbrsp.getContainersToCleanup().get(0));
  Assert.assertEquals(1, hbrsp.getApplicationsToCleanup().size());
  Assert.assertEquals(finishedAppId, hbrsp.getApplicationsToCleanup().get(0));
}
 
Example 11
Source File: TestDelegationTokenRenewer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testAppRejectionWithCancelledDelegationToken() throws Exception {
  MyFS dfs = (MyFS)FileSystem.get(conf);
  LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+conf.hashCode());

  MyToken token = dfs.getDelegationToken("user1");
  token.cancelToken();

  Credentials ts = new Credentials();
  ts.addToken(token.getKind(), token);
  
  // register the tokens for renewal
  ApplicationId appId =  BuilderUtils.newApplicationId(0, 0);
  delegationTokenRenewer.addApplicationAsync(appId, ts, true, "user");
  int waitCnt = 20;
  while (waitCnt-- >0) {
    if (!eventQueue.isEmpty()) {
      Event evt = eventQueue.take();
      if (evt.getType() == RMAppEventType.APP_REJECTED) {
        Assert.assertTrue(
            ((RMAppEvent) evt).getApplicationId().equals(appId));
        return;
      }
    } else {
      Thread.sleep(500);
    }
  }
  fail("App submission with a cancelled token should have failed");
}
 
Example 12
Source File: TestApplication.java    From hadoop with Apache License 2.0 4 votes vote down vote up
WrappedApplication(int id, long timestamp, String user, int numContainers) {
  Configuration conf = new Configuration();
  
  dispatcher = new DrainDispatcher();
  containerTokenIdentifierMap =
      new HashMap<ContainerId, ContainerTokenIdentifier>();
  dispatcher.init(conf);

  localizerBus = mock(EventHandler.class);
  launcherBus = mock(EventHandler.class);
  monitorBus = mock(EventHandler.class);
  auxBus = mock(EventHandler.class);
  containerBus = mock(EventHandler.class);
  logAggregationBus = mock(EventHandler.class);

  dispatcher.register(LocalizationEventType.class, localizerBus);
  dispatcher.register(ContainersLauncherEventType.class, launcherBus);
  dispatcher.register(ContainersMonitorEventType.class, monitorBus);
  dispatcher.register(AuxServicesEventType.class, auxBus);
  dispatcher.register(ContainerEventType.class, containerBus);
  dispatcher.register(LogHandlerEventType.class, logAggregationBus);

  nmTokenSecretMgr = mock(NMTokenSecretManagerInNM.class);

  context = mock(Context.class);
  
  when(context.getContainerTokenSecretManager()).thenReturn(
    new NMContainerTokenSecretManager(conf));
  when(context.getApplicationACLsManager()).thenReturn(
    new ApplicationACLsManager(conf));
  when(context.getNMTokenSecretManager()).thenReturn(nmTokenSecretMgr);
  
  // Setting master key
  MasterKey masterKey = new MasterKeyPBImpl();
  masterKey.setKeyId(123);
  masterKey.setBytes(ByteBuffer.wrap(new byte[] { (new Integer(123)
    .byteValue()) }));
  context.getContainerTokenSecretManager().setMasterKey(masterKey);
  
  this.user = user;
  this.appId = BuilderUtils.newApplicationId(timestamp, id);

  app = new ApplicationImpl(dispatcher, this.user, appId, null, context);
  containers = new ArrayList<Container>();
  for (int i = 0; i < numContainers; i++) {
    Container container = createMockedContainer(this.appId, i);
    containers.add(container);
    long currentTime = System.currentTimeMillis();
    ContainerTokenIdentifier identifier =
        new ContainerTokenIdentifier(container.getContainerId(), "", "",
          null, currentTime + 2000, masterKey.getKeyId(), currentTime,
          Priority.newInstance(0), 0);
    containerTokenIdentifierMap
      .put(identifier.getContainerID(), identifier);
    context.getContainerTokenSecretManager().startContainerSuccessful(
      identifier);
    Assert.assertFalse(context.getContainerTokenSecretManager()
      .isValidStartContainerRequest(identifier));
  }

  dispatcher.start();
}
 
Example 13
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void verifyLocalFileDeletion(
    LogAggregationService logAggregationService) throws Exception {
  logAggregationService.init(this.conf);
  logAggregationService.start();

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);

  // AppLogDir should be created
  File app1LogDir =
      new File(localLogDir, ConverterUtils.toString(application1));
  app1LogDir.mkdir();
  logAggregationService
      .handle(new LogHandlerAppStartedEvent(
          application1, this.user, null,
          ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));

  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(application1, 1);
  ContainerId container11 = BuilderUtils.newContainerId(appAttemptId, 1);
  // Simulate log-file creation
  writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
      "stderr", "syslog" });
  logAggregationService.handle(
      new LogHandlerContainerFinishedEvent(container11, 0));

  logAggregationService.handle(new LogHandlerAppFinishedEvent(
      application1));

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());
  // ensure filesystems were closed
  verify(logAggregationService).closeFileSystems(
      any(UserGroupInformation.class));
  verify(delSrvc).delete(eq(user), eq((Path) null),
    eq(new Path(app1LogDir.getAbsolutePath())));
  delSrvc.stop();
  
  String containerIdStr = ConverterUtils.toString(container11);
  File containerLogDir = new File(app1LogDir, containerIdStr);
  for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
    File f = new File(containerLogDir, fileType);
    Assert.assertFalse("check "+f, f.exists());
  }

  Assert.assertFalse(app1LogDir.exists());

  Path logFilePath =
      logAggregationService.getRemoteNodeLogFileForApp(application1,
          this.user);

  Assert.assertTrue("Log file [" + logFilePath + "] not found", new File(
      logFilePath.toUri().getPath()).exists());
  
  dispatcher.await();
  
  ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
      new ApplicationEvent(
          appAttemptId.getApplicationId(),
          ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
      new ApplicationEvent(
          appAttemptId.getApplicationId(),
          ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
  };

  checkEvents(appEventHandler, expectedEvents, true, "getType",
      "getApplicationID");
}
 
Example 14
Source File: TestContainerLogsPage.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=30000)
public void testContainerLogDirs() throws IOException, YarnException {
  File absLogDir = new File("target",
    TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
  String logdirwithFile = absLogDir.toURI().toString();
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
  NodeHealthCheckerService healthChecker = new NodeHealthCheckerService();
  healthChecker.init(conf);
  LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
  NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler,
      new ApplicationACLsManager(conf), new NMNullStateStoreService(), null);
  // Add an application and the corresponding containers
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
  String user = "nobody";
  long clusterTimeStamp = 1234;
  ApplicationId appId = BuilderUtils.newApplicationId(recordFactory,
      clusterTimeStamp, 1);
  Application app = mock(Application.class);
  when(app.getUser()).thenReturn(user);
  when(app.getAppId()).thenReturn(appId);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId,
      appAttemptId, 0);
  nmContext.getApplications().put(appId, app);

  MockContainer container =
      new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user,
          appId, 1);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);   
  List<File> files = null;
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));
  
  // After container is completed, it is removed from nmContext
  nmContext.getContainers().remove(container1);
  Assert.assertNull(nmContext.getContainers().get(container1));
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));

  // Create a new context to check if correct container log dirs are fetched
  // on full disk.
  LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
  // good log dirs are empty and nm log dir is in the full log dir list.
  when(dirsHandlerForFullDisk.getLogDirs()).
      thenReturn(new ArrayList<String>());
  when(dirsHandlerForFullDisk.getLogDirsForRead()).
      thenReturn(Arrays.asList(new String[] {absLogDir.getAbsolutePath()}));
  nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk,
      new ApplicationACLsManager(conf), new NMNullStateStoreService(), null);
  nmContext.getApplications().put(appId, app);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);
  List<File> dirs =
      ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  File containerLogDir = new File(absLogDir, appId + "/" + container1);
  Assert.assertTrue(dirs.contains(containerLogDir));
}
 
Example 15
Source File: TestResourceLocalizationService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test( timeout = 10000)
@SuppressWarnings("unchecked") // mocked generics
public void testLocalizerRunnerException() throws Exception {
  DrainDispatcher dispatcher = new DrainDispatcher();
  dispatcher.init(conf);
  dispatcher.start();
  EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
  dispatcher.register(ApplicationEventType.class, applicationBus);
  EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
  dispatcher.register(ContainerEventType.class, containerBus);

  ContainerExecutor exec = mock(ContainerExecutor.class);
  LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
  LocalDirsHandlerService dirsHandlerSpy = spy(dirsHandler);
  dirsHandlerSpy.init(conf);

  DeletionService delServiceReal = new DeletionService(exec);
  DeletionService delService = spy(delServiceReal);
  delService.init(new Configuration());
  delService.start();

  ResourceLocalizationService rawService =
      new ResourceLocalizationService(dispatcher, exec, delService,
      dirsHandlerSpy, nmContext);
  ResourceLocalizationService spyService = spy(rawService);
  doReturn(mockServer).when(spyService).createServer();
  try {
    spyService.init(conf);
    spyService.start();

    // init application
    final Application app = mock(Application.class);
    final ApplicationId appId =
        BuilderUtils.newApplicationId(314159265358979L, 3);
    when(app.getUser()).thenReturn("user0");
    when(app.getAppId()).thenReturn(appId);
    spyService.handle(new ApplicationLocalizationEvent(
        LocalizationEventType.INIT_APPLICATION_RESOURCES, app));
    dispatcher.await();

    Random r = new Random();
    long seed = r.nextLong();
    System.out.println("SEED: " + seed);
    r.setSeed(seed);
    final Container c = getMockContainer(appId, 42, "user0");
    final LocalResource resource1 = getPrivateMockedResource(r);
    System.out.println("Here 4");
    
    final LocalResourceRequest req1 = new LocalResourceRequest(resource1);
    Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs =
      new HashMap<LocalResourceVisibility, 
                  Collection<LocalResourceRequest>>();
    List<LocalResourceRequest> privateResourceList =
        new ArrayList<LocalResourceRequest>();
    privateResourceList.add(req1);
    rsrcs.put(LocalResourceVisibility.PRIVATE, privateResourceList);

    final Constructor<?>[] constructors =
        FSError.class.getDeclaredConstructors();
    constructors[0].setAccessible(true);
    FSError fsError =
        (FSError) constructors[0].newInstance(new IOException("Disk Error"));

    Mockito
      .doThrow(fsError)
      .when(dirsHandlerSpy)
      .getLocalPathForWrite(isA(String.class));
    spyService.handle(new ContainerLocalizationRequestEvent(c, rsrcs));
    Thread.sleep(1000);
    dispatcher.await();
    // Verify if ContainerResourceFailedEvent is invoked on FSError
    verify(containerBus).handle(isA(ContainerResourceFailedEvent.class));
  } finally {
    spyService.stop();
    dispatcher.stop();
    delService.stop();
  }
}
 
Example 16
Source File: TestNMWebServer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
  public void testNMWebApp() throws IOException, YarnException {
    Context nmContext = new NodeManager.NMContext(null, null, null, null,
        null);
    ResourceView resourceView = new ResourceView() {
      @Override
      public long getVmemAllocatedForContainers() {
        return 0;
      }
      @Override
      public long getPmemAllocatedForContainers() {
        return 0;
      }
      @Override
      public long getVCoresAllocatedForContainers() {
        return 0;
      }
      @Override
      public long getGCoresAllocatedForContainers() {
        return 0;
      }
      @Override
      public boolean isVmemCheckEnabled() {
        return true;
      }
      @Override
      public boolean isPmemCheckEnabled() {
        return true;
      }
    };
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
    conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
    NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
    healthChecker.init(conf);
    LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();

    WebServer server = new WebServer(nmContext, resourceView,
        new ApplicationACLsManager(conf), dirsHandler);
    server.init(conf);
    server.start();

    // Add an application and the corresponding containers
    RecordFactory recordFactory =
        RecordFactoryProvider.getRecordFactory(conf);
    Dispatcher dispatcher = new AsyncDispatcher();
    String user = "nobody";
    long clusterTimeStamp = 1234;
    ApplicationId appId =
        BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
    Application app = mock(Application.class);
    when(app.getUser()).thenReturn(user);
    when(app.getAppId()).thenReturn(appId);
    nmContext.getApplications().put(appId, app);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
        appId, 1);
    ContainerId container1 =
        BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
    ContainerId container2 =
        BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
    NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
    NMStateStoreService stateStore = new NMNullStateStoreService();
    for (ContainerId containerId : new ContainerId[] { container1,
        container2}) {
      // TODO: Use builder utils
      ContainerLaunchContext launchContext =
          recordFactory.newRecordInstance(ContainerLaunchContext.class);
      long currentTime = System.currentTimeMillis();
      Token containerToken =
          BuilderUtils.newContainerToken(containerId, "127.0.0.1", 1234, user,
            BuilderUtils.newResource(1024, 1), currentTime + 10000L, 123,
            "password".getBytes(), currentTime);
      Container container =
          new ContainerImpl(conf, dispatcher, stateStore, launchContext,
            null, metrics,
            BuilderUtils.newContainerTokenIdentifier(containerToken)) {

            @Override
            public ContainerState getContainerState() {
              return ContainerState.RUNNING;
            };
          };
      nmContext.getContainers().put(containerId, container);
      //TODO: Gross hack. Fix in code.
      ApplicationId applicationId = 
          containerId.getApplicationAttemptId().getApplicationId();
      nmContext.getApplications().get(applicationId).getContainers()
          .put(containerId, container);
      writeContainerLogs(nmContext, containerId, dirsHandler);

    }
    // TODO: Pull logs and test contents.
//    Thread.sleep(1000000);
  }
 
Example 17
Source File: ApplicationIdUtils.java    From tajo with Apache License 2.0 4 votes vote down vote up
public static ApplicationId queryIdToAppId(QueryId queryId) {
  return BuilderUtils.newApplicationId(Long.parseLong(queryId.getId()), queryId.getSeq());
}
 
Example 18
Source File: TestCapacityScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testApplicationHeadRoom() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();

  ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);

  RMAppAttemptMetrics attemptMetric =
      new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
  RMAppImpl app = mock(RMAppImpl.class);
  when(app.getApplicationId()).thenReturn(appId);
  RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
  when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
  when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
  when(app.getCurrentAppAttempt()).thenReturn(attempt);

  rm.getRMContext().getRMApps().put(appId, app);

  SchedulerEvent addAppEvent =
      new AppAddedSchedulerEvent(appId, "default", "user");
  cs.handle(addAppEvent);
  SchedulerEvent addAttemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  cs.handle(addAttemptEvent);

  Allocation allocate =
      cs.allocate(appAttemptId, Collections.<ResourceRequest> emptyList(),
          Collections.<ContainerId> emptyList(), null, null);

  Assert.assertNotNull(attempt);

  Assert
      .assertEquals(Resource.newInstance(0, 0), allocate.getResourceLimit());
  Assert.assertEquals(Resource.newInstance(0, 0),
      attemptMetric.getApplicationAttemptHeadroom());

  // Add a node to cluster
  Resource newResource = Resource.newInstance(4 * GB, 1);
  RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
  cs.handle(new NodeAddedSchedulerEvent(node));

  allocate =
      cs.allocate(appAttemptId, Collections.<ResourceRequest> emptyList(),
          Collections.<ContainerId> emptyList(), null, null);

  // All resources should be sent as headroom
  Assert.assertEquals(newResource, allocate.getResourceLimit());
  Assert.assertEquals(newResource,
      attemptMetric.getApplicationAttemptHeadroom());

  rm.stop();
}
 
Example 19
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 20000)
public void testAddNewTokenSentFromRMForLogAggregation() throws Exception {
  Configuration conf = new YarnConfiguration();
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
    "kerberos");
  UserGroupInformation.setConfiguration(conf);

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
  Application mockApp = mock(Application.class);
  when(mockApp.getContainers()).thenReturn(
    new HashMap<ContainerId, Container>());
  this.context.getApplications().put(application1, mockApp);
  @SuppressWarnings("resource")
  LogAggregationService logAggregationService =
      new LogAggregationService(dispatcher, this.context, this.delSrvc,
        super.dirsHandler);
  logAggregationService.init(this.conf);
  logAggregationService.start();
  logAggregationService.handle(new LogHandlerAppStartedEvent(application1,
    this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls,
    Records.newRecord(LogAggregationContext.class)));

  // Inject new token for log-aggregation after app log-aggregator init
  Text userText1 = new Text("user1");
  RMDelegationTokenIdentifier dtId1 =
      new RMDelegationTokenIdentifier(userText1, new Text("renewer1"),
        userText1);
  final Token<RMDelegationTokenIdentifier> token1 =
      new Token<RMDelegationTokenIdentifier>(dtId1.getBytes(),
        "password1".getBytes(), dtId1.getKind(), new Text("service1"));
  Credentials credentials = new Credentials();
  credentials.addToken(userText1, token1);
  this.context.getSystemCredentialsForApps().put(application1, credentials);

  logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));

  final UserGroupInformation ugi =
      ((AppLogAggregatorImpl) logAggregationService.getAppLogAggregators()
        .get(application1)).getUgi();

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    public Boolean get() {
      boolean hasNewToken = false;
      for (Token<?> token : ugi.getCredentials().getAllTokens()) {
        if (token.equals(token1)) {
          hasNewToken = true;
        }
      }
      return hasNewToken;
    }
  }, 1000, 20000);
  logAggregationService.stop();
}
 
Example 20
Source File: TestLogAggregationService.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testNoContainerOnNode() throws Exception {
  this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
  this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
      this.remoteRootLogDir.getAbsolutePath());
  
  LogAggregationService logAggregationService =
      new LogAggregationService(dispatcher, this.context, this.delSrvc,
                                super.dirsHandler);
  logAggregationService.init(this.conf);
  logAggregationService.start();

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);

  // AppLogDir should be created
  File app1LogDir =
    new File(localLogDir, ConverterUtils.toString(application1));
  app1LogDir.mkdir();
  logAggregationService
      .handle(new LogHandlerAppStartedEvent(
          application1, this.user, null,
          ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));

  logAggregationService.handle(new LogHandlerAppFinishedEvent(
      application1));

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());

  Assert.assertFalse(new File(logAggregationService
      .getRemoteNodeLogFileForApp(application1, this.user).toUri().getPath())
      .exists());

  dispatcher.await();
  
  ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
      new ApplicationEvent(
          application1,
          ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
      new ApplicationEvent(
          application1,
          ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
  };
  checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
  logAggregationService.close();
}