Java Code Examples for org.apache.hadoop.yarn.server.utils.BuilderUtils#newApplicationAttemptId()

The following examples show how to use org.apache.hadoop.yarn.server.utils.BuilderUtils#newApplicationAttemptId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestNMWebServicesApps.java    From big-c with Apache License 2.0 6 votes vote down vote up
private HashMap<String, String> addAppContainers(Application app) 
    throws IOException {
  Dispatcher dispatcher = new AsyncDispatcher();
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      app.getAppId(), 1);
  Container container1 = new MockContainer(appAttemptId, dispatcher, conf,
      app.getUser(), app.getAppId(), 1);
  Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
      app.getUser(), app.getAppId(), 2);
  nmContext.getContainers()
      .put(container1.getContainerId(), container1);
  nmContext.getContainers()
      .put(container2.getContainerId(), container2);

  app.getContainers().put(container1.getContainerId(), container1);
  app.getContainers().put(container2.getContainerId(), container2);
  HashMap<String, String> hash = new HashMap<String, String>();
  hash.put(container1.getContainerId().toString(), container1
      .getContainerId().toString());
  hash.put(container2.getContainerId().toString(), container2
      .getContainerId().toString());
  return hash;
}
 
Example 2
Source File: TestQueueMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static AppSchedulingInfo mockApp(String user) {
  AppSchedulingInfo app = mock(AppSchedulingInfo.class);
  when(app.getUser()).thenReturn(user);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1);
  when(app.getApplicationAttemptId()).thenReturn(id);
  return app;
}
 
Example 3
Source File: TestApplication.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private Container createMockedContainer(ApplicationId appId, int containerId) {
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, containerId);
  Container c = mock(Container.class);
  when(c.getContainerId()).thenReturn(cId);
  ContainerLaunchContext launchContext = mock(ContainerLaunchContext.class);
  when(c.getLaunchContext()).thenReturn(launchContext);
  when(launchContext.getApplicationACLs()).thenReturn(
      new HashMap<ApplicationAccessType, String>());
  return c;
}
 
Example 4
Source File: TestQueueMetrics.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static AppSchedulingInfo mockApp(String user) {
  AppSchedulingInfo app = mock(AppSchedulingInfo.class);
  when(app.getUser()).thenReturn(user);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId id = BuilderUtils.newApplicationAttemptId(appId, 1);
  when(app.getApplicationAttemptId()).thenReturn(id);
  return app;
}
 
Example 5
Source File: TestFifoScheduler.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=5000)
public void testAppAttemptMetrics() throws Exception {
  AsyncDispatcher dispatcher = new InlineDispatcher();
  
  FifoScheduler scheduler = new FifoScheduler();
  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  RMContext rmContext = new RMContextImpl(dispatcher, null,
      null, null, null, null, null, null, null, writer, scheduler);
  ((RMContextImpl) rmContext).setSystemMetricsPublisher(
      mock(SystemMetricsPublisher.class));

  Configuration conf = new Configuration();
  scheduler.setRMContext(rmContext);
  scheduler.init(conf);
  scheduler.start();
  scheduler.reinitialize(conf, rmContext);
  QueueMetrics metrics = scheduler.getRootQueueMetrics();
  int beforeAppsSubmitted = metrics.getAppsSubmitted();

  ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);

  SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
  scheduler.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  scheduler.handle(attemptEvent);

  appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
  SchedulerEvent attemptEvent2 =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  scheduler.handle(attemptEvent2);

  int afterAppsSubmitted = metrics.getAppsSubmitted();
  Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
  scheduler.stop();
}
 
Example 6
Source File: TestApplication.java    From big-c with Apache License 2.0 5 votes vote down vote up
private Container createMockedContainer(ApplicationId appId, int containerId) {
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, containerId);
  Container c = mock(Container.class);
  when(c.getContainerId()).thenReturn(cId);
  ContainerLaunchContext launchContext = mock(ContainerLaunchContext.class);
  when(c.getLaunchContext()).thenReturn(launchContext);
  when(launchContext.getApplicationACLs()).thenReturn(
      new HashMap<ApplicationAccessType, String>());
  return c;
}
 
Example 7
Source File: TestClientToAMTokens.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void verifyTokenWithTamperedID(final Configuration conf,
    final CustomAM am, Token<ClientToAMTokenIdentifier> token)
    throws IOException {
  // Malicious user, messes with appId
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me");
  ClientToAMTokenIdentifier maliciousID =
      new ClientToAMTokenIdentifier(BuilderUtils.newApplicationAttemptId(
        BuilderUtils.newApplicationId(am.appAttemptId.getApplicationId()
          .getClusterTimestamp(), 42), 43), UserGroupInformation
        .getCurrentUser().getShortUserName());

  verifyTamperedToken(conf, am, token, ugi, maliciousID);
}
 
Example 8
Source File: TestCapacityScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testBlackListNodes() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();

  String host = "127.0.0.1";
  RMNode node =
      MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, host);
  cs.handle(new NodeAddedSchedulerEvent(node));

  ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);

  RMAppAttemptMetrics attemptMetric =
      new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
  RMAppImpl app = mock(RMAppImpl.class);
  when(app.getApplicationId()).thenReturn(appId);
  RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
  when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
  when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
  when(app.getCurrentAppAttempt()).thenReturn(attempt);

  rm.getRMContext().getRMApps().put(appId, app);

  SchedulerEvent addAppEvent =
      new AppAddedSchedulerEvent(appId, "default", "user");
  cs.handle(addAppEvent);
  SchedulerEvent addAttemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  cs.handle(addAttemptEvent);

  // Verify the blacklist can be updated independent of requesting containers
  cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
      Collections.<ContainerId>emptyList(),
      Collections.singletonList(host), null);
  Assert.assertTrue(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
  cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(),
      Collections.<ContainerId>emptyList(), null,
      Collections.singletonList(host));
  Assert.assertFalse(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
  rm.stop();
}
 
Example 9
Source File: TestFifoScheduler.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 50000)
public void testHeadroom() throws Exception {
  
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();

  // Add a node
  RMNode n1 =
      MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2");
  fs.handle(new NodeAddedSchedulerEvent(n1));
  
  // Add two applications
  ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(
      appId1, 1);
  createMockRMApp(appAttemptId1, rm.getRMContext());
  SchedulerEvent appEvent =
      new AppAddedSchedulerEvent(appId1, "queue", "user");
  fs.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
  fs.handle(attemptEvent);

  ApplicationId appId2 = BuilderUtils.newApplicationId(200, 2);
  ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(
      appId2, 1);
  createMockRMApp(appAttemptId2, rm.getRMContext());
  SchedulerEvent appEvent2 =
      new AppAddedSchedulerEvent(appId2, "queue", "user");
  fs.handle(appEvent2);
  SchedulerEvent attemptEvent2 =
      new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
  fs.handle(attemptEvent2);

  List<ContainerId> emptyId = new ArrayList<ContainerId>();
  List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>();

  // Set up resource requests
  
  // Ask for a 1 GB container for app 1
  List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
  ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
      ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1));
  fs.allocate(appAttemptId1, ask1, emptyId, null, null);

  // Ask for a 2 GB container for app 2
  List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
  ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
      ResourceRequest.ANY, BuilderUtils.newResource(2 * GB, 1), 1));
  fs.allocate(appAttemptId2, ask2, emptyId, null, null);
  
  // Trigger container assignment
  fs.handle(new NodeUpdateSchedulerEvent(n1));
  
  // Get the allocation for the applications and verify headroom
  Allocation allocation1 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null);
  Assert.assertEquals("Allocation headroom", 1 * GB,
      allocation1.getResourceLimit().getMemory());

  Allocation allocation2 = fs.allocate(appAttemptId2, emptyAsk, emptyId, null, null);
  Assert.assertEquals("Allocation headroom", 1 * GB,
      allocation2.getResourceLimit().getMemory());

  rm.stop();
}
 
Example 10
Source File: TestReservations.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetAppToUnreserve() throws Exception {

  CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
  setup(csConf);
  final String user_0 = "user_0";
  final ApplicationAttemptId appAttemptId_0 = TestUtils
      .getMockApplicationAttemptId(0, 0);
  LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
  FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a,
      mock(ActiveUsersManager.class), spyRMContext);

  String host_0 = "host_0";
  FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
      8 * GB);
  String host_1 = "host_1";
  FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0,
      8 * GB);
  
  Resource clusterResource = Resources.createResource(2 * 8 * GB);

  // Setup resource-requests
  Priority priorityMap = TestUtils.createMockPriority(5);
  Resource capability = Resources.createResource(2*GB, 0);

  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
  RMContext rmContext = mock(RMContext.class);
  ContainerAllocationExpirer expirer =
    mock(ContainerAllocationExpirer.class);
  DrainDispatcher drainDispatcher = new DrainDispatcher();
  when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
  when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
  when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
  when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      app_0.getApplicationId(), 1);
  ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
  Container container = TestUtils.getMockContainer(containerId,
      node_1.getNodeID(), Resources.createResource(2*GB), priorityMap);
  RMContainer rmContainer = new RMContainerImpl(container, appAttemptId,
      node_1.getNodeID(), "user", rmContext);

  Container container_1 = TestUtils.getMockContainer(containerId,
      node_0.getNodeID(), Resources.createResource(1*GB), priorityMap);
  RMContainer rmContainer_1 = new RMContainerImpl(container_1, appAttemptId,
      node_0.getNodeID(), "user", rmContext);

  // no reserved containers
  NodeId unreserveId =
      app_0.getNodeIdToUnreserve(priorityMap, capability,
          cs.getResourceCalculator(), clusterResource);
  assertEquals(null, unreserveId);

  // no reserved containers - reserve then unreserve
  app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
  app_0.unreserve(node_0, priorityMap);
  unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
      cs.getResourceCalculator(), clusterResource);
  assertEquals(null, unreserveId);

  // no container large enough is reserved
  app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
  unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
      cs.getResourceCalculator(), clusterResource);
  assertEquals(null, unreserveId);

  // reserve one that is now large enough
  app_0.reserve(node_1, priorityMap, rmContainer, container);
  unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
      cs.getResourceCalculator(), clusterResource);
  assertEquals(node_1.getNodeID(), unreserveId);
}
 
Example 11
Source File: ApplicationIdUtils.java    From tajo with Apache License 2.0 4 votes vote down vote up
public static ApplicationAttemptId createApplicationAttemptId(QueryId queryId, int attemptId) {
  return BuilderUtils.newApplicationAttemptId(queryIdToAppId(queryId), attemptId);
}
 
Example 12
Source File: TestFifoScheduler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 50000)
public void testHeadroom() throws Exception {
  
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();

  // Add a node
  RMNode n1 =
      MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2");
  fs.handle(new NodeAddedSchedulerEvent(n1));
  
  // Add two applications
  ApplicationId appId1 = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(
      appId1, 1);
  createMockRMApp(appAttemptId1, rm.getRMContext());
  SchedulerEvent appEvent =
      new AppAddedSchedulerEvent(appId1, "queue", "user");
  fs.handle(appEvent);
  SchedulerEvent attemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
  fs.handle(attemptEvent);

  ApplicationId appId2 = BuilderUtils.newApplicationId(200, 2);
  ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(
      appId2, 1);
  createMockRMApp(appAttemptId2, rm.getRMContext());
  SchedulerEvent appEvent2 =
      new AppAddedSchedulerEvent(appId2, "queue", "user");
  fs.handle(appEvent2);
  SchedulerEvent attemptEvent2 =
      new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
  fs.handle(attemptEvent2);

  List<ContainerId> emptyId = new ArrayList<ContainerId>();
  List<ResourceRequest> emptyAsk = new ArrayList<ResourceRequest>();

  // Set up resource requests
  
  // Ask for a 1 GB container for app 1
  List<ResourceRequest> ask1 = new ArrayList<ResourceRequest>();
  ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
      ResourceRequest.ANY, BuilderUtils.newResource(GB, 1), 1));
  fs.allocate(appAttemptId1, ask1, emptyId, null, null);

  // Ask for a 2 GB container for app 2
  List<ResourceRequest> ask2 = new ArrayList<ResourceRequest>();
  ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),
      ResourceRequest.ANY, BuilderUtils.newResource(2 * GB, 1), 1));
  fs.allocate(appAttemptId2, ask2, emptyId, null, null);
  
  // Trigger container assignment
  fs.handle(new NodeUpdateSchedulerEvent(n1));
  
  // Get the allocation for the applications and verify headroom
  Allocation allocation1 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null);
  Assert.assertEquals("Allocation headroom", 1 * GB,
      allocation1.getResourceLimit().getMemory());

  Allocation allocation2 = fs.allocate(appAttemptId2, emptyAsk, emptyId, null, null);
  Assert.assertEquals("Allocation headroom", 1 * GB,
      allocation2.getResourceLimit().getMemory());

  rm.stop();
}
 
Example 13
Source File: TestRMContainerImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testExpireWhileRunning() {

  DrainDispatcher drainDispatcher = new DrainDispatcher();
  EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(EventHandler.class);
  EventHandler generic = mock(EventHandler.class);
  drainDispatcher.register(RMAppAttemptEventType.class,
      appAttemptEventHandler);
  drainDispatcher.register(RMNodeEventType.class, generic);
  drainDispatcher.init(new YarnConfiguration());
  drainDispatcher.start();
  NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
  ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);

  Resource resource = BuilderUtils.newResource(512, 1, 1);
  Priority priority = BuilderUtils.newPriority(5);

  Container container = BuilderUtils.newContainer(containerId, nodeId,
      "host:3465", resource, priority, null);

  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
  when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
  when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
  when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
  when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
  RMContainer rmContainer = new RMContainerImpl(container, appAttemptId,
      nodeId, "user", rmContext);

  assertEquals(RMContainerState.NEW, rmContainer.getState());
  assertEquals(resource, rmContainer.getAllocatedResource());
  assertEquals(nodeId, rmContainer.getAllocatedNode());
  assertEquals(priority, rmContainer.getAllocatedPriority());
  verify(writer).containerStarted(any(RMContainer.class));
  verify(publisher).containerCreated(any(RMContainer.class), anyLong());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.START));
  drainDispatcher.await();
  assertEquals(RMContainerState.ALLOCATED, rmContainer.getState());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.ACQUIRED));
  drainDispatcher.await();
  assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.LAUNCHED));
  drainDispatcher.await();
  assertEquals(RMContainerState.RUNNING, rmContainer.getState());
  assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_000001/user",
      rmContainer.getLogURL());

  // In RUNNING state. Verify EXPIRE and associated actions.
  reset(appAttemptEventHandler);
  ContainerStatus containerStatus = SchedulerUtils
      .createAbnormalContainerStatus(containerId,
          SchedulerUtils.EXPIRED_CONTAINER);
  rmContainer.handle(new RMContainerFinishedEvent(containerId,
      containerStatus, RMContainerEventType.EXPIRE));
  drainDispatcher.await();
  assertEquals(RMContainerState.RUNNING, rmContainer.getState());
  verify(writer, never()).containerFinished(any(RMContainer.class));
  verify(publisher, never()).containerFinished(any(RMContainer.class),
      anyLong());
}
 
Example 14
Source File: TestContainerLogsPage.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=30000)
public void testContainerLogDirs() throws IOException, YarnException {
  File absLogDir = new File("target",
    TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
  String logdirwithFile = absLogDir.toURI().toString();
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
  NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
  healthChecker.init(conf);
  LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
  NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler,
      new ApplicationACLsManager(conf), new NMNullStateStoreService());
  // Add an application and the corresponding containers
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
  String user = "nobody";
  long clusterTimeStamp = 1234;
  ApplicationId appId = BuilderUtils.newApplicationId(recordFactory,
      clusterTimeStamp, 1);
  Application app = mock(Application.class);
  when(app.getUser()).thenReturn(user);
  when(app.getAppId()).thenReturn(appId);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId,
      appAttemptId, 0);
  nmContext.getApplications().put(appId, app);

  MockContainer container =
      new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user,
          appId, 1);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);   
  List<File> files = null;
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));
  
  // After container is completed, it is removed from nmContext
  nmContext.getContainers().remove(container1);
  Assert.assertNull(nmContext.getContainers().get(container1));
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));

  // Create a new context to check if correct container log dirs are fetched
  // on full disk.
  LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
  // good log dirs are empty and nm log dir is in the full log dir list.
  when(dirsHandlerForFullDisk.getLogDirs()).
      thenReturn(new ArrayList<String>());
  when(dirsHandlerForFullDisk.getLogDirsForRead()).
      thenReturn(Arrays.asList(new String[] {absLogDir.getAbsolutePath()}));
  nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk,
      new ApplicationACLsManager(conf), new NMNullStateStoreService());
  nmContext.getApplications().put(appId, app);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);
  List<File> dirs =
      ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  File containerLogDir = new File(absLogDir, appId + "/" + container1);
  Assert.assertTrue(dirs.contains(containerLogDir));
}
 
Example 15
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testFailedDirsLocalFileDeletionAfterUpload() throws Exception {

  // setup conf and services
  DeletionService mockDelService = mock(DeletionService.class);
  File[] localLogDirs =
      TestNonAggregatingLogHandler.getLocalLogDirFiles(this.getClass()
        .getName(), 7);
  final List<String> localLogDirPaths =
      new ArrayList<String>(localLogDirs.length);
  for (int i = 0; i < localLogDirs.length; i++) {
    localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
  }

  String localLogDirsString = StringUtils.join(localLogDirPaths, ",");

  this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
  this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
    this.remoteRootLogDir.getAbsolutePath());
  this.conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500);

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(application1, 1);

  this.dirsHandler = new LocalDirsHandlerService();
  LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);

  LogAggregationService logAggregationService =
      spy(new LogAggregationService(dispatcher, this.context, mockDelService,
        mockDirsHandler));
  AbstractFileSystem spylfs =
      spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
  FileContext lfs = FileContext.getFileContext(spylfs, conf);
  doReturn(lfs).when(logAggregationService).getLocalFileContext(
    isA(Configuration.class));

  logAggregationService.init(this.conf);
  logAggregationService.start();

  TestNonAggregatingLogHandler.runMockedFailedDirs(logAggregationService,
    application1, user, mockDelService, mockDirsHandler, conf, spylfs, lfs,
    localLogDirs);

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());
  verify(logAggregationService).closeFileSystems(
    any(UserGroupInformation.class));

  ApplicationEvent expectedEvents[] =
      new ApplicationEvent[] {
          new ApplicationEvent(appAttemptId.getApplicationId(),
            ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
          new ApplicationEvent(appAttemptId.getApplicationId(),
            ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };

  checkEvents(appEventHandler, expectedEvents, true, "getType",
    "getApplicationID");
}
 
Example 16
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void verifyLocalFileDeletion(
    LogAggregationService logAggregationService) throws Exception {
  logAggregationService.init(this.conf);
  logAggregationService.start();

  ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);

  // AppLogDir should be created
  File app1LogDir =
      new File(localLogDir, ConverterUtils.toString(application1));
  app1LogDir.mkdir();
  logAggregationService
      .handle(new LogHandlerAppStartedEvent(
          application1, this.user, null,
          ContainerLogsRetentionPolicy.ALL_CONTAINERS, this.acls));

  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(application1, 1);
  ContainerId container11 = BuilderUtils.newContainerId(appAttemptId, 1);
  // Simulate log-file creation
  writeContainerLogs(app1LogDir, container11, new String[] { "stdout",
      "stderr", "syslog" });
  logAggregationService.handle(
      new LogHandlerContainerFinishedEvent(container11, 0));

  logAggregationService.handle(new LogHandlerAppFinishedEvent(
      application1));

  logAggregationService.stop();
  assertEquals(0, logAggregationService.getNumAggregators());
  // ensure filesystems were closed
  verify(logAggregationService).closeFileSystems(
      any(UserGroupInformation.class));
  verify(delSrvc).delete(eq(user), eq((Path) null),
    eq(new Path(app1LogDir.getAbsolutePath())));
  delSrvc.stop();
  
  String containerIdStr = ConverterUtils.toString(container11);
  File containerLogDir = new File(app1LogDir, containerIdStr);
  for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
    File f = new File(containerLogDir, fileType);
    Assert.assertFalse("check "+f, f.exists());
  }

  Assert.assertFalse(app1LogDir.exists());

  Path logFilePath =
      logAggregationService.getRemoteNodeLogFileForApp(application1,
          this.user);

  Assert.assertTrue("Log file [" + logFilePath + "] not found", new File(
      logFilePath.toUri().getPath()).exists());
  
  dispatcher.await();
  
  ApplicationEvent expectedEvents[] = new ApplicationEvent[]{
      new ApplicationEvent(
          appAttemptId.getApplicationId(),
          ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),
      new ApplicationEvent(
          appAttemptId.getApplicationId(),
          ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)
  };

  checkEvents(appEventHandler, expectedEvents, true, "getType",
      "getApplicationID");
}
 
Example 17
Source File: TestContainerLogsPage.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=30000)
public void testContainerLogDirs() throws IOException, YarnException {
  File absLogDir = new File("target",
    TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
  String logdirwithFile = absLogDir.toURI().toString();
  Configuration conf = new Configuration();
  conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
  NodeHealthCheckerService healthChecker = new NodeHealthCheckerService();
  healthChecker.init(conf);
  LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
  NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler,
      new ApplicationACLsManager(conf), new NMNullStateStoreService(), null);
  // Add an application and the corresponding containers
  RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
  String user = "nobody";
  long clusterTimeStamp = 1234;
  ApplicationId appId = BuilderUtils.newApplicationId(recordFactory,
      clusterTimeStamp, 1);
  Application app = mock(Application.class);
  when(app.getUser()).thenReturn(user);
  when(app.getAppId()).thenReturn(appId);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId,
      appAttemptId, 0);
  nmContext.getApplications().put(appId, app);

  MockContainer container =
      new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user,
          appId, 1);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);   
  List<File> files = null;
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));
  
  // After container is completed, it is removed from nmContext
  nmContext.getContainers().remove(container1);
  Assert.assertNull(nmContext.getContainers().get(container1));
  files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  Assert.assertTrue(!(files.get(0).toString().contains("file:")));

  // Create a new context to check if correct container log dirs are fetched
  // on full disk.
  LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
  // good log dirs are empty and nm log dir is in the full log dir list.
  when(dirsHandlerForFullDisk.getLogDirs()).
      thenReturn(new ArrayList<String>());
  when(dirsHandlerForFullDisk.getLogDirsForRead()).
      thenReturn(Arrays.asList(new String[] {absLogDir.getAbsolutePath()}));
  nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk,
      new ApplicationACLsManager(conf), new NMNullStateStoreService(), null);
  nmContext.getApplications().put(appId, app);
  container.setState(ContainerState.RUNNING);
  nmContext.getContainers().put(container1, container);
  List<File> dirs =
      ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
  File containerLogDir = new File(absLogDir, appId + "/" + container1);
  Assert.assertTrue(dirs.contains(containerLogDir));
}
 
Example 18
Source File: TestRMContainerImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testReleaseWhileRunning() {

  DrainDispatcher drainDispatcher = new DrainDispatcher();
  EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(EventHandler.class);
  EventHandler generic = mock(EventHandler.class);
  drainDispatcher.register(RMAppAttemptEventType.class,
      appAttemptEventHandler);
  drainDispatcher.register(RMNodeEventType.class, generic);
  drainDispatcher.init(new YarnConfiguration());
  drainDispatcher.start();
  NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
  ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
  ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
      appId, 1);
  ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
  ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);

  Resource resource = BuilderUtils.newResource(512, 1);
  Priority priority = BuilderUtils.newPriority(5);

  Container container = BuilderUtils.newContainer(containerId, nodeId,
      "host:3465", resource, priority, null);
  ConcurrentMap<ApplicationId, RMApp> rmApps =
      spy(new ConcurrentHashMap<ApplicationId, RMApp>());
  RMApp rmApp = mock(RMApp.class);
  when(rmApp.getRMAppAttempt((ApplicationAttemptId)Matchers.any())).thenReturn(null);
  Mockito.doReturn(rmApp).when(rmApps).get((ApplicationId)Matchers.any());

  RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
  SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
  RMContext rmContext = mock(RMContext.class);
  when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
  when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
  when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
  when(rmContext.getRMApps()).thenReturn(rmApps);
  when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
  when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
  RMContainer rmContainer = new RMContainerImpl(container, appAttemptId,
      nodeId, "user", rmContext);

  assertEquals(RMContainerState.NEW, rmContainer.getState());
  assertEquals(resource, rmContainer.getAllocatedResource());
  assertEquals(nodeId, rmContainer.getAllocatedNode());
  assertEquals(priority, rmContainer.getAllocatedPriority());
  verify(writer).containerStarted(any(RMContainer.class));
  verify(publisher).containerCreated(any(RMContainer.class), anyLong());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.START));
  drainDispatcher.await();
  assertEquals(RMContainerState.ALLOCATED, rmContainer.getState());
  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.ACQUIRED));
  drainDispatcher.await();
  assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());

  rmContainer.handle(new RMContainerEvent(containerId,
      RMContainerEventType.LAUNCHED));
  drainDispatcher.await();
  assertEquals(RMContainerState.RUNNING, rmContainer.getState());
  assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_000001/user",
      rmContainer.getLogURL());

  // In RUNNING state. Verify RELEASED and associated actions.
  reset(appAttemptEventHandler);
  ContainerStatus containerStatus = SchedulerUtils
      .createAbnormalContainerStatus(containerId,
          SchedulerUtils.RELEASED_CONTAINER);
  rmContainer.handle(new RMContainerFinishedEvent(containerId,
      containerStatus, RMContainerEventType.RELEASED));
  drainDispatcher.await();
  assertEquals(RMContainerState.RELEASED, rmContainer.getState());
  assertEquals(SchedulerUtils.RELEASED_CONTAINER,
      rmContainer.getDiagnosticsInfo());
  assertEquals(ContainerExitStatus.ABORTED,
      rmContainer.getContainerExitStatus());
  assertEquals(ContainerState.COMPLETE, rmContainer.getContainerState());
  verify(writer).containerFinished(any(RMContainer.class));
  verify(publisher).containerFinished(any(RMContainer.class), anyLong());

  ArgumentCaptor<RMAppAttemptContainerFinishedEvent> captor = ArgumentCaptor
      .forClass(RMAppAttemptContainerFinishedEvent.class);
  verify(appAttemptEventHandler).handle(captor.capture());
  RMAppAttemptContainerFinishedEvent cfEvent = captor.getValue();
  assertEquals(appAttemptId, cfEvent.getApplicationAttemptId());
  assertEquals(containerStatus, cfEvent.getContainerStatus());
  assertEquals(RMAppAttemptEventType.CONTAINER_FINISHED, cfEvent.getType());
  
  // In RELEASED state. A FINIHSED event may come in.
  rmContainer.handle(new RMContainerFinishedEvent(containerId, SchedulerUtils
      .createAbnormalContainerStatus(containerId, "FinishedContainer"),
      RMContainerEventType.FINISHED));
  assertEquals(RMContainerState.RELEASED, rmContainer.getState());
}
 
Example 19
Source File: ApplicationIdUtils.java    From incubator-tajo with Apache License 2.0 4 votes vote down vote up
public static ApplicationAttemptId createApplicationAttemptId(QueryId queryId, int attemptId) {
  return BuilderUtils.newApplicationAttemptId(queryIdToAppId(queryId), attemptId);
}
 
Example 20
Source File: TestCapacityScheduler.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testApplicationHeadRoom() throws Exception {
  Configuration conf = new Configuration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
      ResourceScheduler.class);
  MockRM rm = new MockRM(conf);
  rm.start();
  CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();

  ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);

  RMAppAttemptMetrics attemptMetric =
      new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
  RMAppImpl app = mock(RMAppImpl.class);
  when(app.getApplicationId()).thenReturn(appId);
  RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
  when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
  when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
  when(app.getCurrentAppAttempt()).thenReturn(attempt);

  rm.getRMContext().getRMApps().put(appId, app);

  SchedulerEvent addAppEvent =
      new AppAddedSchedulerEvent(appId, "default", "user");
  cs.handle(addAppEvent);
  SchedulerEvent addAttemptEvent =
      new AppAttemptAddedSchedulerEvent(appAttemptId, false);
  cs.handle(addAttemptEvent);

  Allocation allocate =
      cs.allocate(appAttemptId, Collections.<ResourceRequest> emptyList(),
          Collections.<ContainerId> emptyList(), null, null);

  Assert.assertNotNull(attempt);

  Assert
      .assertEquals(Resource.newInstance(0, 0), allocate.getResourceLimit());
  Assert.assertEquals(Resource.newInstance(0, 0),
      attemptMetric.getApplicationAttemptHeadroom());

  // Add a node to cluster
  Resource newResource = Resource.newInstance(4 * GB, 1);
  RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
  cs.handle(new NodeAddedSchedulerEvent(node));

  allocate =
      cs.allocate(appAttemptId, Collections.<ResourceRequest> emptyList(),
          Collections.<ContainerId> emptyList(), null, null);

  // All resources should be sent as headroom
  Assert.assertEquals(newResource, allocate.getResourceLimit());
  Assert.assertEquals(newResource,
      attemptMetric.getApplicationAttemptHeadroom());

  rm.stop();
}