Java Code Examples for org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest#newInstance()

The following examples show how to use org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest#newInstance() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SparkIntegrationTest.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
private void waitForYarnAppCompleted(int timeout) throws YarnException {
  long start = System.currentTimeMillis();
  boolean yarnAppCompleted = false;
  while ((System.currentTimeMillis() - start) < timeout ) {
    GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    if (response.getApplicationList().isEmpty()) {
      yarnAppCompleted = true;
      break;
    }
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
  assertTrue("Yarn app is not completed in " + timeout + " milliseconds.", yarnAppCompleted);
}
 
Example 2
Source File: FlinkIntegrationTest.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
@Test
public void testYarnMode() throws IOException, InterpreterException, YarnException {
  InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
  flinkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
  flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
  flinkInterpreterSetting.setProperty("PATH", hadoopHome + "/bin:" + System.getenv("PATH"));
  flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
  flinkInterpreterSetting.setProperty("flink.execution.mode", "YARN");
  testInterpreterBasics();

  // 1 yarn application launched
  GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
  GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
  assertEquals(1, response.getApplicationList().size());

  interpreterSettingManager.close();
}
 
Example 3
Source File: YarnInterpreterLauncherIntegrationTest.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
@Test
public void testLaunchShellInYarn() throws YarnException, InterpreterException, InterruptedException {
  InterpreterSetting shellInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("sh");
  shellInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
  shellInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());

  Interpreter shellInterpreter = interpreterFactory.getInterpreter("sh", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("sh").createExecutionContext());

  InterpreterContext context = new InterpreterContext.Builder().setNoteId("note1").setParagraphId("paragraph_1").build();
  InterpreterResult interpreterResult = shellInterpreter.interpret("pwd", context);
  assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
  assertTrue(interpreterResult.toString(), interpreterResult.message().get(0).getData().contains("/usercache/"));

  Thread.sleep(1000);
  // 1 yarn application launched
  GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
  GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
  assertEquals(1, response.getApplicationList().size());

  interpreterSettingManager.close();
}
 
Example 4
Source File: SparkIntegrationTest.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
@Test
public void testYarnClusterMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
  InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
  sparkInterpreterSetting.setProperty("spark.master", "yarn-cluster");
  sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
  sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
  sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
  sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
  sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
  sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
  sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
  sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
  sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");

  try {
    setUpSparkInterpreterSetting(sparkInterpreterSetting);
    testInterpreterBasics();

    // 1 yarn application launched
    GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    assertEquals(1, response.getApplicationList().size());

  } finally {
    interpreterSettingManager.close();
    waitForYarnAppCompleted(30 * 1000);
  }
}
 
Example 5
Source File: SparkIntegrationTest.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
@Test
public void testYarnClientMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException {
  InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark");
  sparkInterpreterSetting.setProperty("spark.master", "yarn-client");
  sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());
  sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome);
  sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());
  sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false");
  sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false");
  sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec());
  sparkInterpreterSetting.setProperty("spark.driver.memory", "512m");
  sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false");
  sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false");

  try {
    setUpSparkInterpreterSetting(sparkInterpreterSetting);
    testInterpreterBasics();

    // 1 yarn application launched
    GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
    GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
    assertEquals(1, response.getApplicationList().size());

  } finally {
    interpreterSettingManager.close();
    waitForYarnAppCompleted(30 * 1000);
  }
}
 
Example 6
Source File: TestApplicationHistoryClientService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplications() throws IOException, YarnException {
  ApplicationId appId = null;
  appId = ApplicationId.newInstance(0, 1);
  ApplicationId appId1 = ApplicationId.newInstance(0, 2);
  GetApplicationsRequest request = GetApplicationsRequest.newInstance();
  GetApplicationsResponse response =
      clientService.getApplications(request);
  List<ApplicationReport> appReport = response.getApplicationList();
  Assert.assertNotNull(appReport);
  Assert.assertEquals(appId, appReport.get(0).getApplicationId());
  Assert.assertEquals(appId1, appReport.get(1).getApplicationId());
}
 
Example 7
Source File: AHSClientImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public List<ApplicationReport> getApplications() throws YarnException,
    IOException {
  GetApplicationsRequest request = GetApplicationsRequest.newInstance(null,
      null);
  GetApplicationsResponse response = ahsClient.getApplications(request);
  return response.getApplicationList();
}
 
Example 8
Source File: YarnClientImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public List<ApplicationReport> getApplications(Set<String> applicationTypes,
    EnumSet<YarnApplicationState> applicationStates) throws YarnException,
    IOException {
  GetApplicationsRequest request =
      GetApplicationsRequest.newInstance(applicationTypes, applicationStates);
  GetApplicationsResponse response = rmClient.getApplications(request);
  return response.getApplicationList();
}
 
Example 9
Source File: YarnClientImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public List<ApplicationReport> getApplications(Set<String> applicationTypes,
    EnumSet<YarnApplicationState> applicationStates) throws YarnException,
    IOException {
  GetApplicationsRequest request =
      GetApplicationsRequest.newInstance(applicationTypes, applicationStates);
  GetApplicationsResponse response = rmClient.getApplications(request);
  return response.getApplicationList();
}
 
Example 10
Source File: TestApplicationHistoryClientService.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplications() throws IOException, YarnException {
  ApplicationId appId = null;
  appId = ApplicationId.newInstance(0, 1);
  ApplicationId appId1 = ApplicationId.newInstance(0, 2);
  GetApplicationsRequest request = GetApplicationsRequest.newInstance();
  GetApplicationsResponse response =
      clientService.getApplications(request);
  List<ApplicationReport> appReport = response.getApplicationList();
  Assert.assertNotNull(appReport);
  Assert.assertEquals(appId, appReport.get(0).getApplicationId());
  Assert.assertEquals(appId1, appReport.get(1).getApplicationId());
}
 
Example 11
Source File: FlinkIntegrationTest.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
@Test
public void testLocalMode() throws IOException, YarnException, InterpreterException {
  InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink");
  flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome);
  flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath());

  testInterpreterBasics();

  // no yarn application launched
  GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
  GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
  assertEquals(0, response.getApplicationList().size());

  interpreterSettingManager.close();
}
 
Example 12
Source File: TestClientRMService.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testForceKillApplication() throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  MockRM rm = new MockRM();
  rm.init(conf);
  rm.start();

  ClientRMService rmService = rm.getClientRMService();
  GetApplicationsRequest getRequest = GetApplicationsRequest.newInstance(
      EnumSet.of(YarnApplicationState.KILLED));

  RMApp app1 = rm.submitApp(1024);
  RMApp app2 = rm.submitApp(1024, true);

  assertEquals("Incorrect number of apps in the RM", 0,
      rmService.getApplications(getRequest).getApplicationList().size());

  KillApplicationRequest killRequest1 =
      KillApplicationRequest.newInstance(app1.getApplicationId());
  KillApplicationRequest killRequest2 =
      KillApplicationRequest.newInstance(app2.getApplicationId());

  int killAttemptCount = 0;
  for (int i = 0; i < 100; i++) {
    KillApplicationResponse killResponse1 =
        rmService.forceKillApplication(killRequest1);
    killAttemptCount++;
    if (killResponse1.getIsKillCompleted()) {
      break;
    }
    Thread.sleep(10);
  }
  assertTrue("Kill attempt count should be greater than 1 for managed AMs",
      killAttemptCount > 1);
  assertEquals("Incorrect number of apps in the RM", 1,
      rmService.getApplications(getRequest).getApplicationList().size());

  KillApplicationResponse killResponse2 =
      rmService.forceKillApplication(killRequest2);
  assertTrue("Killing UnmanagedAM should falsely acknowledge true",
      killResponse2.getIsKillCompleted());
  for (int i = 0; i < 100; i++) {
    if (2 ==
        rmService.getApplications(getRequest).getApplicationList().size()) {
      break;
    }
    Thread.sleep(10);
  }
  assertEquals("Incorrect number of apps in the RM", 2,
      rmService.getApplications(getRequest).getApplicationList().size());
}
 
Example 13
Source File: YarnInterpreterLauncherIntegrationTest.java    From zeppelin with Apache License 2.0 4 votes vote down vote up
@Test
public void testJdbcPython_YarnLauncher() throws InterpreterException, YarnException, InterruptedException {
  InterpreterSetting jdbcInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("jdbc");
  jdbcInterpreterSetting.setProperty("default.driver", "com.mysql.jdbc.Driver");
  jdbcInterpreterSetting.setProperty("default.url", "jdbc:mysql://localhost:3306/");
  jdbcInterpreterSetting.setProperty("default.user", "root");
  jdbcInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
  jdbcInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512");
  jdbcInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());

  Dependency dependency = new Dependency("mysql:mysql-connector-java:5.1.46");
  jdbcInterpreterSetting.setDependencies(Lists.newArrayList(dependency));
  interpreterSettingManager.restart(jdbcInterpreterSetting.getId());
  jdbcInterpreterSetting.waitForReady(60 * 1000);

  InterpreterSetting pythonInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("python");
  pythonInterpreterSetting.setProperty("zeppelin.interpreter.launcher", "yarn");
  pythonInterpreterSetting.setProperty("zeppelin.interpreter.yarn.resource.memory", "512");
  pythonInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath());

  Interpreter jdbcInterpreter = interpreterFactory.getInterpreter("jdbc", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext());
  assertNotNull("JdbcInterpreter is null", jdbcInterpreter);

  InterpreterContext context = new InterpreterContext.Builder()
          .setNoteId("note1")
          .setParagraphId("paragraph_1")
          .setAuthenticationInfo(AuthenticationInfo.ANONYMOUS)
          .build();
  InterpreterResult interpreterResult = jdbcInterpreter.interpret("show databases;", context);
  assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());

  context.getLocalProperties().put("saveAs", "table_1");
  interpreterResult = jdbcInterpreter.interpret("SELECT 1 as c1, 2 as c2;", context);
  assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
  assertEquals(1, interpreterResult.message().size());
  assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType());
  assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData());

  // read table_1 from python interpreter
  Interpreter pythonInterpreter = interpreterFactory.getInterpreter("python", new ExecutionContextBuilder().setUser("user1").setNoteId("note1").setDefaultInterpreterGroup("test").createExecutionContext());
  assertNotNull("PythonInterpreter is null", pythonInterpreter);

  context = new InterpreterContext.Builder()
          .setNoteId("note1")
          .setParagraphId("paragraph_1")
          .setAuthenticationInfo(AuthenticationInfo.ANONYMOUS)
          .build();
  interpreterResult = pythonInterpreter.interpret("df=z.getAsDataFrame('table_1')\nz.show(df)", context);
  assertEquals(interpreterResult.toString(), InterpreterResult.Code.SUCCESS, interpreterResult.code());
  assertEquals(1, interpreterResult.message().size());
  assertEquals(InterpreterResult.Type.TABLE, interpreterResult.message().get(0).getType());
  assertEquals("c1\tc2\n1\t2\n", interpreterResult.message().get(0).getData());

  // 2 yarn application launched
  GetApplicationsRequest request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
  GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
  assertEquals(2, response.getApplicationList().size());

  interpreterSettingManager.close();

  // sleep for 5 seconds to make sure yarn apps are finished
  Thread.sleep(5* 1000);
  request = GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.RUNNING));
  response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService().getApplications(request);
  assertEquals(0, response.getApplicationList().size());
}
 
Example 14
Source File: TestGetApplicationsRequest.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetApplicationsRequest(){
  GetApplicationsRequest request = GetApplicationsRequest.newInstance();
  
  EnumSet<YarnApplicationState> appStates = 
    EnumSet.of(YarnApplicationState.ACCEPTED);
  request.setApplicationStates(appStates);
  
  Set<String> tags = new HashSet<String>();
  tags.add("tag1");
  request.setApplicationTags(tags);
  
  Set<String> types = new HashSet<String>();
  types.add("type1");
  request.setApplicationTypes(types);
  
  long startBegin = System.currentTimeMillis();
  long startEnd = System.currentTimeMillis() + 1;
  request.setStartRange(startBegin, startEnd);
  long finishBegin = System.currentTimeMillis() + 2;
  long finishEnd = System.currentTimeMillis() + 3;
  request.setFinishRange(finishBegin, finishEnd);
  
  long limit = 100L;
  request.setLimit(limit);
  
  Set<String> queues = new HashSet<String>();
  queues.add("queue1");
  request.setQueues(queues);
  
  
  Set<String> users = new HashSet<String>();
  users.add("user1");
  request.setUsers(users);
  
  ApplicationsRequestScope scope = ApplicationsRequestScope.ALL;
  request.setScope(scope);
  
  GetApplicationsRequest requestFromProto = new GetApplicationsRequestPBImpl(
      ((GetApplicationsRequestPBImpl)request).getProto());
  
  // verify the whole record equals with original record
  Assert.assertEquals(requestFromProto, request);

  // verify all properties are the same as original request
  Assert.assertEquals(
      "ApplicationStates from proto is not the same with original request",
      requestFromProto.getApplicationStates(), appStates);
  
  Assert.assertEquals(
      "ApplicationTags from proto is not the same with original request",
      requestFromProto.getApplicationTags(), tags);
  
  Assert.assertEquals(
      "ApplicationTypes from proto is not the same with original request",
      requestFromProto.getApplicationTypes(), types);
  
  Assert.assertEquals(
      "StartRange from proto is not the same with original request",
      requestFromProto.getStartRange(), new LongRange(startBegin, startEnd));
  
  Assert.assertEquals(
      "FinishRange from proto is not the same with original request",
      requestFromProto.getFinishRange(), new LongRange(finishBegin, finishEnd));
  
  Assert.assertEquals(
      "Limit from proto is not the same with original request",
      requestFromProto.getLimit(), limit);
  
  Assert.assertEquals(
      "Queues from proto is not the same with original request",
      requestFromProto.getQueues(), queues);
  
  Assert.assertEquals(
      "Users from proto is not the same with original request",
      requestFromProto.getUsers(), users);
}
 
Example 15
Source File: TestRMRestart.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 60000)
public void testRMRestartGetApplicationList() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  // start RM
  MockRM rm1 = createMockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
  nm1.registerNode();

  // a succeeded app.
  RMApp app0 = rm1.submitApp(200, "name", "user", null,
    false, "default", 1, null, "myType");
  MockAM am0 = launchAM(app0, rm1, nm1);
  finishApplicationMaster(app0, rm1, nm1, am0);

  // a failed app.
  RMApp app1 = rm1.submitApp(200, "name", "user", null,
    false, "default", 1, null, "myType");
  MockAM am1 = launchAM(app1, rm1, nm1);
  // fail the AM by sending CONTAINER_FINISHED event without registering.
  nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
  am1.waitForState(RMAppAttemptState.FAILED);
  rm1.waitForState(app1.getApplicationId(), RMAppState.FAILED);

  // a killed app.
  RMApp app2 = rm1.submitApp(200, "name", "user", null,
    false, "default", 1, null, "myType");
  MockAM am2 = launchAM(app2, rm1, nm1);
  rm1.killApp(app2.getApplicationId());
  rm1.waitForState(app2.getApplicationId(), RMAppState.KILLED);
  rm1.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.KILLED);

  // restart rm

  MockRM rm2 = new MockRM(conf, memStore) {
    @Override
    protected RMAppManager createRMAppManager() {
      return spy(super.createRMAppManager());
    }
  };
  rms.add(rm2);
  rm2.start();

  GetApplicationsRequest request1 =
      GetApplicationsRequest.newInstance(EnumSet.of(
        YarnApplicationState.FINISHED, YarnApplicationState.KILLED,
        YarnApplicationState.FAILED));
  GetApplicationsResponse response1 =
      rm2.getClientRMService().getApplications(request1);
  List<ApplicationReport> appList1 = response1.getApplicationList();

  // assert all applications exist according to application state after RM
  // restarts.
  boolean forApp0 = false, forApp1 = false, forApp2 = false;
  for (ApplicationReport report : appList1) {
    if (report.getApplicationId().equals(app0.getApplicationId())) {
      Assert.assertEquals(YarnApplicationState.FINISHED,
        report.getYarnApplicationState());
      forApp0 = true;
    }
    if (report.getApplicationId().equals(app1.getApplicationId())) {
      Assert.assertEquals(YarnApplicationState.FAILED,
        report.getYarnApplicationState());
      forApp1 = true;
    }
    if (report.getApplicationId().equals(app2.getApplicationId())) {
      Assert.assertEquals(YarnApplicationState.KILLED,
        report.getYarnApplicationState());
      forApp2 = true;
    }
  }
  Assert.assertTrue(forApp0 && forApp1 && forApp2);

  // assert all applications exist according to application type after RM
  // restarts.
  Set<String> appTypes = new HashSet<String>();
  appTypes.add("myType");
  GetApplicationsRequest request2 =
      GetApplicationsRequest.newInstance(appTypes);
  GetApplicationsResponse response2 =
      rm2.getClientRMService().getApplications(request2);
  List<ApplicationReport> appList2 = response2.getApplicationList();
  Assert.assertTrue(3 == appList2.size());

  // check application summary is logged for the completed apps after RM restart.
  verify(rm2.getRMAppManager(), times(3)).logApplicationSummary(
    isA(ApplicationId.class));
}
 
Example 16
Source File: TestRM.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 80000)
public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MockRM rm1 = new MockRM(conf);
  rm1.start();

  // a succeeded app
  RMApp app1 = rm1.submitApp(200);
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
  nm1.registerNode();
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am1);

  // a failed app
  RMApp app2 = rm1.submitApp(200);
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
  nm1.nodeHeartbeat(am2.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
  am2.waitForState(RMAppAttemptState.FAILED);
  rm1.waitForState(app2.getApplicationId(), RMAppState.FAILED);

  // a killed app
  RMApp app3 = rm1.submitApp(200);
  MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
  rm1.killApp(app3.getApplicationId());
  rm1.waitForState(app3.getApplicationId(), RMAppState.KILLED);
  rm1.waitForState(am3.getApplicationAttemptId(), RMAppAttemptState.KILLED);

  GetApplicationsRequest request1 =
      GetApplicationsRequest.newInstance(EnumSet.of(
        YarnApplicationState.FINISHED, YarnApplicationState.KILLED,
        YarnApplicationState.FAILED));
  GetApplicationsResponse response1 =
      rm1.getClientRMService().getApplications(request1);
  List<ApplicationReport> appList1 = response1.getApplicationList();

  Assert.assertEquals(3, appList1.size());
  for (ApplicationReport report : appList1) {
    // killed/failed apps host and rpc port are invalidated.
    if (report.getApplicationId().equals(app2.getApplicationId())
        || report.getApplicationId().equals(app3.getApplicationId())) {
      Assert.assertEquals("N/A", report.getHost());
      Assert.assertEquals(-1, report.getRpcPort());
    }
    // succeeded app's host and rpc port is not invalidated
    if (report.getApplicationId().equals(app1.getApplicationId())) {
      Assert.assertFalse(report.getHost().equals("N/A"));
      Assert.assertTrue(report.getRpcPort() != -1);
    }
  }
}
 
Example 17
Source File: TestGetApplicationsRequest.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetApplicationsRequest(){
  GetApplicationsRequest request = GetApplicationsRequest.newInstance();
  
  EnumSet<YarnApplicationState> appStates = 
    EnumSet.of(YarnApplicationState.ACCEPTED);
  request.setApplicationStates(appStates);
  
  Set<String> tags = new HashSet<String>();
  tags.add("tag1");
  request.setApplicationTags(tags);
  
  Set<String> types = new HashSet<String>();
  types.add("type1");
  request.setApplicationTypes(types);
  
  long startBegin = System.currentTimeMillis();
  long startEnd = System.currentTimeMillis() + 1;
  request.setStartRange(startBegin, startEnd);
  long finishBegin = System.currentTimeMillis() + 2;
  long finishEnd = System.currentTimeMillis() + 3;
  request.setFinishRange(finishBegin, finishEnd);
  
  long limit = 100L;
  request.setLimit(limit);
  
  Set<String> queues = new HashSet<String>();
  queues.add("queue1");
  request.setQueues(queues);
  
  
  Set<String> users = new HashSet<String>();
  users.add("user1");
  request.setUsers(users);
  
  ApplicationsRequestScope scope = ApplicationsRequestScope.ALL;
  request.setScope(scope);
  
  GetApplicationsRequest requestFromProto = new GetApplicationsRequestPBImpl(
      ((GetApplicationsRequestPBImpl)request).getProto());
  
  // verify the whole record equals with original record
  Assert.assertEquals(requestFromProto, request);

  // verify all properties are the same as original request
  Assert.assertEquals(
      "ApplicationStates from proto is not the same with original request",
      requestFromProto.getApplicationStates(), appStates);
  
  Assert.assertEquals(
      "ApplicationTags from proto is not the same with original request",
      requestFromProto.getApplicationTags(), tags);
  
  Assert.assertEquals(
      "ApplicationTypes from proto is not the same with original request",
      requestFromProto.getApplicationTypes(), types);
  
  Assert.assertEquals(
      "StartRange from proto is not the same with original request",
      requestFromProto.getStartRange(), new LongRange(startBegin, startEnd));
  
  Assert.assertEquals(
      "FinishRange from proto is not the same with original request",
      requestFromProto.getFinishRange(), new LongRange(finishBegin, finishEnd));
  
  Assert.assertEquals(
      "Limit from proto is not the same with original request",
      requestFromProto.getLimit(), limit);
  
  Assert.assertEquals(
      "Queues from proto is not the same with original request",
      requestFromProto.getQueues(), queues);
  
  Assert.assertEquals(
      "Users from proto is not the same with original request",
      requestFromProto.getUsers(), users);
}
 
Example 18
Source File: TestClientRMService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testForceKillApplication() throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  MockRM rm = new MockRM();
  rm.init(conf);
  rm.start();

  ClientRMService rmService = rm.getClientRMService();
  GetApplicationsRequest getRequest = GetApplicationsRequest.newInstance(
      EnumSet.of(YarnApplicationState.KILLED));

  RMApp app1 = rm.submitApp(1024);
  RMApp app2 = rm.submitApp(1024, true);

  assertEquals("Incorrect number of apps in the RM", 0,
      rmService.getApplications(getRequest).getApplicationList().size());

  KillApplicationRequest killRequest1 =
      KillApplicationRequest.newInstance(app1.getApplicationId());
  KillApplicationRequest killRequest2 =
      KillApplicationRequest.newInstance(app2.getApplicationId());

  int killAttemptCount = 0;
  for (int i = 0; i < 100; i++) {
    KillApplicationResponse killResponse1 =
        rmService.forceKillApplication(killRequest1);
    killAttemptCount++;
    if (killResponse1.getIsKillCompleted()) {
      break;
    }
    Thread.sleep(10);
  }
  assertTrue("Kill attempt count should be greater than 1 for managed AMs",
      killAttemptCount > 1);
  assertEquals("Incorrect number of apps in the RM", 1,
      rmService.getApplications(getRequest).getApplicationList().size());

  KillApplicationResponse killResponse2 =
      rmService.forceKillApplication(killRequest2);
  assertTrue("Killing UnmanagedAM should falsely acknowledge true",
      killResponse2.getIsKillCompleted());
  for (int i = 0; i < 100; i++) {
    if (2 ==
        rmService.getApplications(getRequest).getApplicationList().size()) {
      break;
    }
    Thread.sleep(10);
  }
  assertEquals("Incorrect number of apps in the RM", 2,
      rmService.getApplications(getRequest).getApplicationList().size());
}
 
Example 19
Source File: TestRMRestart.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 60000)
public void testRMRestartGetApplicationList() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  // start RM
  MockRM rm1 = createMockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
  nm1.registerNode();

  // a succeeded app.
  RMApp app0 = rm1.submitApp(200, "name", "user", null,
    false, "default", 1, null, "myType");
  MockAM am0 = launchAM(app0, rm1, nm1);
  finishApplicationMaster(app0, rm1, nm1, am0);

  // a failed app.
  RMApp app1 = rm1.submitApp(200, "name", "user", null,
    false, "default", 1, null, "myType");
  MockAM am1 = launchAM(app1, rm1, nm1);
  // fail the AM by sending CONTAINER_FINISHED event without registering.
  nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
  am1.waitForState(RMAppAttemptState.FAILED);
  rm1.waitForState(app1.getApplicationId(), RMAppState.FAILED);

  // a killed app.
  RMApp app2 = rm1.submitApp(200, "name", "user", null,
    false, "default", 1, null, "myType");
  MockAM am2 = launchAM(app2, rm1, nm1);
  rm1.killApp(app2.getApplicationId());
  rm1.waitForState(app2.getApplicationId(), RMAppState.KILLED);
  rm1.waitForState(am2.getApplicationAttemptId(), RMAppAttemptState.KILLED);

  // restart rm

  MockRM rm2 = new MockRM(conf, memStore) {
    @Override
    protected RMAppManager createRMAppManager() {
      return spy(super.createRMAppManager());
    }
  };
  rms.add(rm2);
  rm2.start();

  GetApplicationsRequest request1 =
      GetApplicationsRequest.newInstance(EnumSet.of(
        YarnApplicationState.FINISHED, YarnApplicationState.KILLED,
        YarnApplicationState.FAILED));
  GetApplicationsResponse response1 =
      rm2.getClientRMService().getApplications(request1);
  List<ApplicationReport> appList1 = response1.getApplicationList();

  // assert all applications exist according to application state after RM
  // restarts.
  boolean forApp0 = false, forApp1 = false, forApp2 = false;
  for (ApplicationReport report : appList1) {
    if (report.getApplicationId().equals(app0.getApplicationId())) {
      Assert.assertEquals(YarnApplicationState.FINISHED,
        report.getYarnApplicationState());
      forApp0 = true;
    }
    if (report.getApplicationId().equals(app1.getApplicationId())) {
      Assert.assertEquals(YarnApplicationState.FAILED,
        report.getYarnApplicationState());
      forApp1 = true;
    }
    if (report.getApplicationId().equals(app2.getApplicationId())) {
      Assert.assertEquals(YarnApplicationState.KILLED,
        report.getYarnApplicationState());
      forApp2 = true;
    }
  }
  Assert.assertTrue(forApp0 && forApp1 && forApp2);

  // assert all applications exist according to application type after RM
  // restarts.
  Set<String> appTypes = new HashSet<String>();
  appTypes.add("myType");
  GetApplicationsRequest request2 =
      GetApplicationsRequest.newInstance(appTypes);
  GetApplicationsResponse response2 =
      rm2.getClientRMService().getApplications(request2);
  List<ApplicationReport> appList2 = response2.getApplicationList();
  Assert.assertTrue(3 == appList2.size());

  // check application summary is logged for the completed apps after RM restart.
  verify(rm2.getRMAppManager(), times(3)).logApplicationSummary(
    isA(ApplicationId.class));
}
 
Example 20
Source File: TestRM.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 80000)
public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MockRM rm1 = new MockRM(conf);
  rm1.start();

  // a succeeded app
  RMApp app1 = rm1.submitApp(200);
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
  nm1.registerNode();
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am1);

  // a failed app
  RMApp app2 = rm1.submitApp(200);
  MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
  nm1.nodeHeartbeat(am2.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
  am2.waitForState(RMAppAttemptState.FAILED);
  rm1.waitForState(app2.getApplicationId(), RMAppState.FAILED);

  // a killed app
  RMApp app3 = rm1.submitApp(200);
  MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
  rm1.killApp(app3.getApplicationId());
  rm1.waitForState(app3.getApplicationId(), RMAppState.KILLED);
  rm1.waitForState(am3.getApplicationAttemptId(), RMAppAttemptState.KILLED);

  GetApplicationsRequest request1 =
      GetApplicationsRequest.newInstance(EnumSet.of(
        YarnApplicationState.FINISHED, YarnApplicationState.KILLED,
        YarnApplicationState.FAILED));
  GetApplicationsResponse response1 =
      rm1.getClientRMService().getApplications(request1);
  List<ApplicationReport> appList1 = response1.getApplicationList();

  Assert.assertEquals(3, appList1.size());
  for (ApplicationReport report : appList1) {
    // killed/failed apps host and rpc port are invalidated.
    if (report.getApplicationId().equals(app2.getApplicationId())
        || report.getApplicationId().equals(app3.getApplicationId())) {
      Assert.assertEquals("N/A", report.getHost());
      Assert.assertEquals(-1, report.getRpcPort());
    }
    // succeeded app's host and rpc port is not invalidated
    if (report.getApplicationId().equals(app1.getApplicationId())) {
      Assert.assertFalse(report.getHost().equals("N/A"));
      Assert.assertTrue(report.getRpcPort() != -1);
    }
  }
}