Java Code Examples for org.apache.hadoop.util.ExitUtil#disableSystemExit()

The following examples show how to use org.apache.hadoop.util.ExitUtil#disableSystemExit() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestYarnUncaughtExceptionHandler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * <p>
 * Throw {@code Error} inside thread and
 * check {@code YarnUncaughtExceptionHandler} instance
 * <p>
 * Used {@code ExitUtil} class to avoid jvm exit through
 * {@code System.exit(-1) }
 *
 * @throws InterruptedException
 */
@Test
public void testUncaughtExceptionHandlerWithError()
    throws InterruptedException {
  ExitUtil.disableSystemExit();
  final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
  final java.lang.Error error = new java.lang.Error("test-error");
  final Thread errorThread = new Thread(new Runnable() {
    @Override
    public void run() {
      throw error;
    }
  });
  errorThread.setUncaughtExceptionHandler(spyErrorHandler);
  assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
  errorThread.start();
  errorThread.join();
  verify(spyErrorHandler).uncaughtException(errorThread, error);
}
 
Example 2
Source File: TestApplicationHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunch() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 3
Source File: TestApplicationHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    String[] args = new String[2];
    args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
    args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(args);
    Configuration conf = historyServer.getConfig();
    assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
    assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 4
Source File: TestClusterId.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  ExitUtil.disableSystemExit();

  String baseDir = PathUtils.getTestDirName(getClass());

  hdfsDir = new File(baseDir, "dfs/name");
  if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
    throw new IOException("Could not delete test directory '" + hdfsDir + "'");
  }
  LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());

  // as some tests might change these values we reset them to defaults before
  // every test
  StartupOption.FORMAT.setForceFormat(false);
  StartupOption.FORMAT.setInteractiveFormat(true);
  
  config = new Configuration();
  config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
 
Example 5
Source File: TestApplicationHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    String[] args = new String[2];
    args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
    args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(args);
    Configuration conf = historyServer.getConfig();
    assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
    assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 6
Source File: TestClusterId.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  ExitUtil.disableSystemExit();

  String baseDir = PathUtils.getTestDirName(getClass());

  hdfsDir = new File(baseDir, "dfs/name");
  if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
    throw new IOException("Could not delete test directory '" + hdfsDir + "'");
  }
  LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());

  // as some tests might change these values we reset them to defaults before
  // every test
  StartupOption.FORMAT.setForceFormat(false);
  StartupOption.FORMAT.setInteractiveFormat(true);
  
  config = new Configuration();
  config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
 
Example 7
Source File: TestApplicationHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunch() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 8
Source File: TestYarnUncaughtExceptionHandler.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * <p>
 * Throw {@code Error} inside thread and
 * check {@code YarnUncaughtExceptionHandler} instance
 * <p>
 * Used {@code ExitUtil} class to avoid jvm exit through
 * {@code System.exit(-1) }
 *
 * @throws InterruptedException
 */
@Test
public void testUncaughtExceptionHandlerWithError()
    throws InterruptedException {
  ExitUtil.disableSystemExit();
  final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler);
  final java.lang.Error error = new java.lang.Error("test-error");
  final Thread errorThread = new Thread(new Runnable() {
    @Override
    public void run() {
      throw error;
    }
  });
  errorThread.setUncaughtExceptionHandler(spyErrorHandler);
  assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler());
  errorThread.start();
  errorThread.join();
  verify(spyErrorHandler).uncaughtException(errorThread, error);
}
 
Example 9
Source File: TestGridmixSubmission.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout=100000)
public void testMain() throws Exception {

  SecurityManager securityManager = System.getSecurityManager();

  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream out = new PrintStream(bytes);
  final PrintStream oldOut = System.out;
  System.setErr(out);
  ExitUtil.disableSystemExit();
  try {
    String[] argv = new String[0];
    DebugGridmix.main(argv);

  } catch (ExitUtil.ExitException e) {
    assertEquals("ExitException", e.getMessage());
    ExitUtil.resetFirstExitException();
  } finally {
    System.setErr(oldOut);
    System.setSecurityManager(securityManager);
  }
  String print = bytes.toString();
  // should be printed tip in std error stream
  assertTrue(print
          .contains("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>"));
  assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
 
Example 10
Source File: TestJobHistoryServer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout =60000)
public void testLaunch() throws Exception {

  ExitUtil.disableSystemExit();
  try {
    historyServer = JobHistoryServer.launchJobHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0,e.status);
    ExitUtil.resetFirstExitException();
    fail();
  }
}
 
Example 11
Source File: TestRMDelegationTokens.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  ExitUtil.disableSystemExit();
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
}
 
Example 12
Source File: TestGridmixSubmission.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout=100000)
public void testMain() throws Exception {

  SecurityManager securityManager = System.getSecurityManager();

  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream out = new PrintStream(bytes);
  final PrintStream oldOut = System.out;
  System.setErr(out);
  ExitUtil.disableSystemExit();
  try {
    String[] argv = new String[0];
    DebugGridmix.main(argv);

  } catch (ExitUtil.ExitException e) {
    assertEquals("ExitException", e.getMessage());
    ExitUtil.resetFirstExitException();
  } finally {
    System.setErr(oldOut);
    System.setSecurityManager(securityManager);
  }
  String print = bytes.toString();
  // should be printed tip in std error stream
  assertTrue(print
          .contains("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>"));
  assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
 
Example 13
Source File: TestJobHistoryServer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout =60000)
public void testLaunch() throws Exception {

  ExitUtil.disableSystemExit();
  try {
    historyServer = JobHistoryServer.launchJobHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0,e.status);
    ExitUtil.resetFirstExitException();
    fail();
  }
}
 
Example 14
Source File: TestRMDelegationTokens.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  ExitUtil.disableSystemExit();
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
}
 
Example 15
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example 16
Source File: TestTools.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void before() {
  ExitUtil.disableSystemExit();
  OPTIONS[1] = INVALID_OPTION;
}
 
Example 17
Source File: TestTools.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void before() {
  ExitUtil.disableSystemExit();
  OPTIONS[1] = INVALID_OPTION;
}
 
Example 18
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}