Java Code Examples for org.apache.hadoop.yarn.conf.YarnConfiguration#set()

The following examples show how to use org.apache.hadoop.yarn.conf.YarnConfiguration#set() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestMaster.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test 
public void testGetMasterUser() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(MRConfig.MASTER_USER_NAME, "foo");
  conf.set(YarnConfiguration.RM_PRINCIPAL, "bar");

  // default is yarn framework  
  assertEquals(Master.getMasterUserName(conf), "bar");

  // set framework name to classic
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
  assertEquals(Master.getMasterUserName(conf), "foo");

  // change framework to yarn
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  assertEquals(Master.getMasterUserName(conf), "bar");

}
 
Example 2
Source File: TestLocalCacheDirectoryManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 1000)
public void testDirectoryStateChangeFromFullToNonFull() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "40");
  LocalCacheDirectoryManager dir = new LocalCacheDirectoryManager(conf);

  // checking for first four paths
  String rootPath = "";
  String firstSubDir = "0";
  for (int i = 0; i < 4; i++) {
    Assert.assertEquals(rootPath, dir.getRelativePathForLocalization());
  }
  // Releasing two files from the root directory.
  dir.decrementFileCountForPath(rootPath);
  dir.decrementFileCountForPath(rootPath);
  // Space for two files should be available in root directory.
  Assert.assertEquals(rootPath, dir.getRelativePathForLocalization());
  Assert.assertEquals(rootPath, dir.getRelativePathForLocalization());
  // As no space is now available in root directory so it should be from
  // first sub directory
  Assert.assertEquals(firstSubDir, dir.getRelativePathForLocalization());
}
 
Example 3
Source File: TestYarnController.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Test
public void testEmptyYarnClasspath() throws Exception {
  assumeNonMaprProfile();
  DacDaemonYarnApplication.Environment myEnv = new DacDaemonYarnApplication.Environment() {
    @Override
    public String getEnv(String name) {
      return tempDir.getRoot().toString();
    }
  };

  YarnConfiguration yarnConfiguration = createYarnConfig("resource-manager", "hdfs://name-node:8020");
  yarnConfiguration.set(YarnDefaultsConfigurator.CLASSPATH_JARS, YarnDefaultsConfigurator.MapRYarnDefaults.getAppClassPath());
  DacDaemonYarnApplication dacDaemonApp = new DacDaemonYarnApplication(DremioConfig.create(),
    yarnConfiguration, myEnv);

  List<String> names = dacDaemonApp.getJarNames();
  assertFalse(names.isEmpty());
  assertEquals(2, names.size());

  String flatNames = names.toString();
  assertTrue(flatNames.contains(SHIM_LOADER_NAME));
  assertTrue(flatNames.contains("dremio-maprfs-shaded-5.1.0-mapr.jar"));
}
 
Example 4
Source File: TestLocalCacheDirectoryManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000)
public void testMinimumPerDirectoryFileLimit() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, "1");
  Exception e = null;
  NMContext nmContext =
      new NMContext(new NMContainerTokenSecretManager(conf),
        new NMTokenSecretManagerInNM(), null,
        new ApplicationACLsManager(conf), new NMNullStateStoreService(), null);
  ResourceLocalizationService service =
      new ResourceLocalizationService(null, null, null, null, nmContext);
  try {
    service.init(conf);
  } catch (Exception e1) {
    e = e1;
  }
  Assert.assertNotNull(e);
  Assert.assertEquals(YarnRuntimeException.class, e.getClass());
  Assert.assertEquals(e.getMessage(),
    YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY
        + " parameter is configured with a value less than 37.");

}
 
Example 5
Source File: AbstractZKRegistryTest.java    From big-c with Apache License 2.0 5 votes vote down vote up
public YarnConfiguration createRegistryConfiguration() {
  YarnConfiguration conf = new YarnConfiguration();
  conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000);
  conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500);
  conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 10);
  conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_CEILING, 10);
  conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM,
      zookeeper.getConnectionString());
  return conf;
}
 
Example 6
Source File: ParameterizedSchedulerTestBase.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void configureScheduler() throws IOException {
  conf = new YarnConfiguration();
  switch (schedulerType) {
    case CAPACITY:
      conf.set(YarnConfiguration.RM_SCHEDULER,
          CapacityScheduler.class.getName());
      break;
    case FAIR:
      configureFairScheduler(conf);
      break;
  }
}
 
Example 7
Source File: TestMapReduceTrackingUriPlugin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testProducesHistoryServerUriForAppId() throws URISyntaxException {
  final String historyAddress = "example.net:424242";
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, historyAddress);
  MapReduceTrackingUriPlugin plugin = new MapReduceTrackingUriPlugin();
  plugin.setConf(conf);
  ApplicationId id = ApplicationId.newInstance(6384623l, 5);
  String jobSuffix = id.toString().replaceFirst("^application_", "job_");
  URI expected =
      new URI("http://" + historyAddress + "/jobhistory/job/" + jobSuffix);
  URI actual = plugin.getTrackingUri(id);
  assertEquals(expected, actual);
}
 
Example 8
Source File: TestPrivilegedOperationExecutor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  localDataDir = System.getProperty("test.build.data");
  customExecutorPath = localDataDir + "/bin/container-executor";
  emptyConf = new YarnConfiguration();
  confWithExecutorPath = new YarnConfiguration();
  confWithExecutorPath.set(YarnConfiguration
      .NM_LINUX_CONTAINER_EXECUTOR_PATH, customExecutorPath);

  cGroupTasksNone = "none";
  cGroupTasksInvalid = "invalid_string";
  cGroupTasks1 = "cpu/hadoop_yarn/container_01/tasks";
  cGroupTasks2 = "net_cls/hadoop_yarn/container_01/tasks";
  cGroupTasks3 = "blkio/hadoop_yarn/container_01/tasks";
  opDisallowed = new PrivilegedOperation
      (PrivilegedOperation.OperationType.DELETE_AS_USER);
  opTasksNone = new PrivilegedOperation
      (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
          PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasksNone);
  opTasksInvalid = new PrivilegedOperation
      (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
          cGroupTasksInvalid);
  opTasks1 = new PrivilegedOperation
      (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
          PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks1);
  opTasks2 = new PrivilegedOperation
      (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
          PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks2);
  opTasks3 = new PrivilegedOperation
      (PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP,
          PrivilegedOperation.CGROUP_ARG_PREFIX + cGroupTasks3);
}
 
Example 9
Source File: TestRMDelegationTokens.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  ExitUtil.disableSystemExit();
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
}
 
Example 10
Source File: TestZKRMStateStoreZKClientConnections.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 20000)
public void testSetZKAcl() {
  TestZKClient zkClientTester = new TestZKClient();
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(YarnConfiguration.RM_ZK_ACL, "world:anyone:rwca");
  try {
    zkClientTester.store.zkClient.delete(zkClientTester.store
        .znodeWorkingPath, -1);
    fail("Shouldn't be able to delete path");
  } catch (Exception e) {/* expected behavior */
  }
}
 
Example 11
Source File: TestRMNMSecretKeys.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 1000000)
public void testNMUpdation() throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  // validating RM NM keys for Unsecured environment
  validateRMNMKeyExchange(conf);
  
  // validating RM NM keys for secured environment
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);
  validateRMNMKeyExchange(conf);
}
 
Example 12
Source File: TestRMAdminService.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAdminAclsWithFileSystemBasedConfigurationProvider()
    throws IOException, YarnException {
  configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
      "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

  //upload default configurations
  uploadDefaultConfiguration();

  try {
    rm = new MockRM(configuration);
    rm.init(configuration);
    rm.start();
  } catch(Exception ex) {
    fail("Should not get any exceptions");
  }

  String aclStringBefore =
      rm.adminService.getAccessControlList().getAclString().trim();

  YarnConfiguration yarnConf = new YarnConfiguration();
  yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL, "world:anyone:rwcda");
  uploadConfiguration(yarnConf, "yarn-site.xml");

  rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance());

  String aclStringAfter =
      rm.adminService.getAccessControlList().getAclString().trim();

  Assert.assertTrue(!aclStringAfter.equals(aclStringBefore));
  Assert.assertEquals(aclStringAfter, "world:anyone:rwcda,"
      + UserGroupInformation.getCurrentUser().getShortUserName());
}
 
Example 13
Source File: TestLeveldbTimelineStateStore.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  fsPath = new File("target", getClass().getSimpleName() +
      "-tmpDir").getAbsoluteFile();
  fsContext = FileContext.getLocalFSFileContext();
  fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
  conf = new YarnConfiguration();
  conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_RECOVERY_ENABLED, true);
  conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
      LeveldbTimelineStateStore.class,
      TimelineStateStore.class);
  conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH,
      fsPath.getAbsolutePath());
}
 
Example 14
Source File: TestCommitterEventHandler.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testBasic() throws Exception {
  AppContext mockContext = mock(AppContext.class);
  OutputCommitter mockCommitter = mock(OutputCommitter.class);
  Clock mockClock = mock(Clock.class);
  
  CommitterEventHandler handler = new CommitterEventHandler(mockContext, 
      mockCommitter, new TestingRMHeartbeatHandler());
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  JobContext mockJobContext = mock(JobContext.class);
  ApplicationAttemptId attemptid = 
    ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
  JobId jobId =  TypeConverter.toYarn(
      TypeConverter.fromYarn(attemptid.getApplicationId()));
  
  WaitForItHandler waitForItHandler = new WaitForItHandler();
  
  when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
  when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
  when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
  when(mockContext.getClock()).thenReturn(mockClock);
  
  handler.init(conf);
  handler.start();
  try {
    handler.handle(new CommitterJobCommitEvent(jobId, mockJobContext));

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
    Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, 
        jobId);
    Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, 
        jobId);

    Event e = waitForItHandler.getAndClearEvent();
    assertNotNull(e);
    assertTrue(e instanceof JobCommitCompletedEvent);
    FileSystem fs = FileSystem.get(conf);
    assertTrue(startCommitFile.toString(), fs.exists(startCommitFile));
    assertTrue(endCommitSuccessFile.toString(), fs.exists(endCommitSuccessFile));
    assertFalse(endCommitFailureFile.toString(), fs.exists(endCommitFailureFile));
    verify(mockCommitter).commitJob(any(JobContext.class));
  } finally {
    handler.stop();
  }
}
 
Example 15
Source File: Hadoop23YarnAppClient.java    From twill with Apache License 2.0 4 votes vote down vote up
/**
 * Overrides parent method to adds RM delegation token to the given context. If YARN is running with HA RM,
 * delegation tokens for each RM service will be added.
 */
protected void addRMToken(ContainerLaunchContext context, YarnClient yarnClient, ApplicationId appId) {
  if (!UserGroupInformation.isSecurityEnabled()) {
    return;
  }

  try {
    Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
    org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer);

    // The following logic is copied from ClientRMProxy.getRMDelegationTokenService, which is not available in
    // YARN older than 2.4
    List<String> services = new ArrayList<>();
    if (HAUtil.isHAEnabled(configuration)) {
      // If HA is enabled, we need to enumerate all RM hosts
      // and add the corresponding service name to the token service
      // Copy the yarn conf since we need to modify it to get the RM addresses
      YarnConfiguration yarnConf = new YarnConfiguration(configuration);
      for (String rmId : HAUtil.getRMHAIds(configuration)) {
        yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
        InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
                                                           YarnConfiguration.DEFAULT_RM_ADDRESS,
                                                           YarnConfiguration.DEFAULT_RM_PORT);
        services.add(SecurityUtil.buildTokenService(address).toString());
      }
    } else {
      services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
    }

    Credentials credentials = YarnUtils.decodeCredentials(context.getTokens());

    // casting needed for later Hadoop version
    @SuppressWarnings("RedundantCast")
    Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken, (InetSocketAddress) null);

    token.setService(new Text(Joiner.on(',').join(services)));
    credentials.addToken(new Text(token.getService()), token);

    LOG.debug("Added RM delegation token {} for application {}", token, appId);
    credentials.addToken(token.getService(), token);

    context.setTokens(YarnUtils.encodeCredentials(credentials));

  } catch (Exception e) {
    throw Throwables.propagate(e);
  }
}
 
Example 16
Source File: TestAMRestart.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 50000)
public void testRMRestartOrFailoverNotCountedForAMFailures()
    throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
    ResourceScheduler.class);
  conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);

  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  // explicitly set max-am-retry count as 1.
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  MockRM rm1 = new MockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
  nm1.registerNode();
  RMApp app1 = rm1.submitApp(200);
  // AM should be restarted even though max-am-attempt is 1.
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  Assert.assertTrue(((RMAppAttemptImpl) attempt1).mayBeLastAttempt());

  // Restart rm.
  MockRM rm2 = new MockRM(conf, memStore);
  rm2.start();
  ApplicationStateData appState =
      memStore.getState().getApplicationState().get(app1.getApplicationId());
  // re-register the NM
  nm1.setResourceTrackerService(rm2.getResourceTrackerService());
  NMContainerStatus status = Records.newRecord(NMContainerStatus.class);
  status
    .setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER);
  status.setContainerId(attempt1.getMasterContainer().getId());
  status.setContainerState(ContainerState.COMPLETE);
  status.setDiagnostics("");
  nm1.registerNode(Collections.singletonList(status), null);

  rm2.waitForState(attempt1.getAppAttemptId(), RMAppAttemptState.FAILED);
  Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
    appState.getAttempt(am1.getApplicationAttemptId())
      .getAMContainerExitStatus());
  // Will automatically start a new AppAttempt in rm2
  rm2.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
  MockAM am2 =
      rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(), 2, nm1);
  MockRM.finishAMAndVerifyAppState(app1, rm2, nm1, am2);
  RMAppAttempt attempt3 =
      rm2.getRMContext().getRMApps().get(app1.getApplicationId())
        .getCurrentAppAttempt();
  Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry());
  Assert.assertEquals(ContainerExitStatus.INVALID,
    appState.getAttempt(am2.getApplicationAttemptId())
      .getAMContainerExitStatus());

  rm1.stop();
  rm2.stop();
}
 
Example 17
Source File: TestNodeStatusUpdater.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testRMVersionLessThanMinimum() throws InterruptedException {
  final AtomicInteger numCleanups = new AtomicInteger(0);
  YarnConfiguration conf = createNMConfig();
  conf.set(YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION, "3.0.0");
  nm = new NodeManager() {
    @Override
    protected NodeStatusUpdater createNodeStatusUpdater(Context context,
                                                        Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
      MyNodeStatusUpdater myNodeStatusUpdater = new MyNodeStatusUpdater(
          context, dispatcher, healthChecker, metrics);
      MyResourceTracker2 myResourceTracker2 = new MyResourceTracker2();
      myResourceTracker2.heartBeatNodeAction = NodeAction.NORMAL;
      myResourceTracker2.rmVersion = "3.0.0";
      myNodeStatusUpdater.resourceTracker = myResourceTracker2;
      return myNodeStatusUpdater;
    }

    @Override
    protected ContainerManagerImpl createContainerManager(Context context,
        ContainerExecutor exec, DeletionService del,
        NodeStatusUpdater nodeStatusUpdater,
        ApplicationACLsManager aclsManager,
        LocalDirsHandlerService dirsHandler) {
      return new ContainerManagerImpl(context, exec, del, nodeStatusUpdater,
          metrics, aclsManager, dirsHandler) {

        @Override
        public void cleanUpApplicationsOnNMShutDown() {
          super.cleanUpApplicationsOnNMShutDown();
          numCleanups.incrementAndGet();
        }
      };
    }
  };

  nm.init(conf);
  nm.start();

  // NM takes a while to reach the STARTED state.
  int waitCount = 0;
  while (nm.getServiceState() != STATE.STARTED && waitCount++ != 20) {
    LOG.info("Waiting for NM to stop..");
    Thread.sleep(1000);
  }
  Assert.assertTrue(nm.getServiceState() == STATE.STARTED);
  nm.stop();
}
 
Example 18
Source File: HBaseServerTestInstance.java    From Halyard with Apache License 2.0 4 votes vote down vote up
public static synchronized Configuration getInstanceConfig() throws Exception {
    if (conf == null) {
        File zooRoot = File.createTempFile("hbase-zookeeper", "");
        zooRoot.delete();
        ZooKeeperServer zookeper = new ZooKeeperServer(zooRoot, zooRoot, 2000);
        ServerCnxnFactory factory = ServerCnxnFactory.createFactory(new InetSocketAddress("localhost", 0), 5000);
        factory.startup(zookeper);

        YarnConfiguration yconf = new YarnConfiguration();
        String argLine = System.getProperty("argLine");
        if (argLine != null) {
            yconf.set("yarn.app.mapreduce.am.command-opts", argLine.replace("jacoco.exec", "jacocoMR.exec"));
        }
        yconf.setBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false);
        yconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
        MiniMRYarnCluster miniCluster = new MiniMRYarnCluster("testCluster");
        miniCluster.init(yconf);
        String resourceManagerLink = yconf.get(YarnConfiguration.RM_ADDRESS);
        yconf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
        miniCluster.start();
        miniCluster.waitForNodeManagersToConnect(10000);
        // following condition set in MiniYarnCluster:273
        while (resourceManagerLink.endsWith(":0")) {
            Thread.sleep(100);
            resourceManagerLink = yconf.get(YarnConfiguration.RM_ADDRESS);
        }

        File hbaseRoot = File.createTempFile("hbase-root", "");
        hbaseRoot.delete();
        conf = HBaseConfiguration.create(miniCluster.getConfig());
        conf.set(HConstants.HBASE_DIR, hbaseRoot.toURI().toURL().toString());
        conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, factory.getLocalPort());
        conf.set("hbase.master.hostname", "localhost");
        conf.set("hbase.regionserver.hostname", "localhost");
        conf.setInt("hbase.master.info.port", -1);
        conf.set("hbase.fs.tmp.dir", new File(System.getProperty("java.io.tmpdir")).toURI().toURL().toString());
        LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
        cluster.startup();
    }
    return new Configuration(conf);
}
 
Example 19
Source File: TestAMRestart.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 50000)
public void testRMRestartOrFailoverNotCountedForAMFailures()
    throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
    ResourceScheduler.class);
  conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);

  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  // explicitly set max-am-retry count as 1.
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
  MemoryRMStateStore memStore = new MemoryRMStateStore();
  memStore.init(conf);

  MockRM rm1 = new MockRM(conf, memStore);
  rm1.start();
  MockNM nm1 =
      new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
  nm1.registerNode();
  RMApp app1 = rm1.submitApp(200);
  // AM should be restarted even though max-am-attempt is 1.
  MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  Assert.assertTrue(((RMAppAttemptImpl) attempt1).mayBeLastAttempt());

  // Restart rm.
  MockRM rm2 = new MockRM(conf, memStore);
  rm2.start();
  ApplicationStateData appState =
      memStore.getState().getApplicationState().get(app1.getApplicationId());
  // re-register the NM
  nm1.setResourceTrackerService(rm2.getResourceTrackerService());
  NMContainerStatus status = Records.newRecord(NMContainerStatus.class);
  status
    .setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER);
  status.setContainerId(attempt1.getMasterContainer().getId());
  status.setContainerState(ContainerState.COMPLETE);
  status.setDiagnostics("");
  nm1.registerNode(Collections.singletonList(status), null);

  rm2.waitForState(attempt1.getAppAttemptId(), RMAppAttemptState.FAILED);
  Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
    appState.getAttempt(am1.getApplicationAttemptId())
      .getAMContainerExitStatus());
  // Will automatically start a new AppAttempt in rm2
  rm2.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
  MockAM am2 =
      rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(), 2, nm1);
  MockRM.finishAMAndVerifyAppState(app1, rm2, nm1, am2);
  RMAppAttempt attempt3 =
      rm2.getRMContext().getRMApps().get(app1.getApplicationId())
        .getCurrentAppAttempt();
  Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry());
  Assert.assertEquals(ContainerExitStatus.INVALID,
    appState.getAttempt(am2.getApplicationAttemptId())
      .getAMContainerExitStatus());

  rm1.stop();
  rm2.stop();
}
 
Example 20
Source File: TestRMRestart.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 60000)
public void testDecomissionedNMsMetricsOnRMRestart() throws Exception {
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,
    hostFile.getAbsolutePath());
  writeToHostsFile("");
  final DrainDispatcher dispatcher = new DrainDispatcher();
  MockRM rm1 = null, rm2 = null;
  try {
    rm1 = new MockRM(conf) {
      @Override
      protected Dispatcher createDispatcher() {
        return dispatcher;
      }
    };
    rm1.start();
    MockNM nm1 = rm1.registerNode("localhost:1234", 8000);
    MockNM nm2 = rm1.registerNode("host2:1234", 8000);
    Assert
        .assertEquals(0,
            ClusterMetrics.getMetrics().getNumDecommisionedNMs());
    String ip = NetUtils.normalizeHostName("localhost");
    // Add 2 hosts to exclude list.
    writeToHostsFile("host2", ip);

    // refresh nodes
    rm1.getNodesListManager().refreshNodes(conf);
    NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
    Assert
        .assertTrue(
            NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
    nodeHeartbeat = nm2.nodeHeartbeat(true);
    Assert.assertTrue("The decommisioned metrics are not updated",
        NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));

    dispatcher.await();
    Assert
        .assertEquals(2,
            ClusterMetrics.getMetrics().getNumDecommisionedNMs());
    rm1.stop();
    rm1 = null;
    Assert
        .assertEquals(0,
            ClusterMetrics.getMetrics().getNumDecommisionedNMs());

    // restart RM.
    rm2 = new MockRM(conf);
    rm2.start();
    Assert
        .assertEquals(2,
            ClusterMetrics.getMetrics().getNumDecommisionedNMs());
  } finally {
    if (rm1 != null) {
      rm1.stop();
    }
    if (rm2 != null) {
      rm2.stop();
    }
  }
}