Java Code Examples for org.apache.hadoop.util.ExitUtil#ExitException

The following examples show how to use org.apache.hadoop.util.ExitUtil#ExitException . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestApplicationHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunch() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 2
Source File: TestApplicationHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    String[] args = new String[2];
    args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
    args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(args);
    Configuration conf = historyServer.getConfig();
    assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
    assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 3
Source File: TestApplicationHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunch() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 4
Source File: TestApplicationHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLaunchWithArguments() throws Exception {
  ExitUtil.disableSystemExit();
  ApplicationHistoryServer historyServer = null;
  try {
    // Not able to modify the config of this test case,
    // but others have been customized to avoid conflicts
    String[] args = new String[2];
    args[0]="-D" + YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS + "=4000";
    args[1]="-D" + YarnConfiguration.TIMELINE_SERVICE_TTL_MS + "=200";
    historyServer =
        ApplicationHistoryServer.launchAppHistoryServer(args);
    Configuration conf = historyServer.getConfig();
    assertEquals("4000", conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS));
    assertEquals("200", conf.get(YarnConfiguration.TIMELINE_SERVICE_TTL_MS));
  } catch (ExitUtil.ExitException e) {
    assertEquals(0, e.status);
    ExitUtil.resetFirstExitException();
    fail();
  } finally {
    if (historyServer != null) {
      historyServer.stop();
    }
  }
}
 
Example 5
Source File: TestJobHistoryServer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout =60000)
public void testLaunch() throws Exception {

  ExitUtil.disableSystemExit();
  try {
    historyServer = JobHistoryServer.launchJobHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0,e.status);
    ExitUtil.resetFirstExitException();
    fail();
  }
}
 
Example 6
Source File: TestGridmixSubmission.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout=100000)
public void testMain() throws Exception {

  SecurityManager securityManager = System.getSecurityManager();

  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream out = new PrintStream(bytes);
  final PrintStream oldOut = System.out;
  System.setErr(out);
  ExitUtil.disableSystemExit();
  try {
    String[] argv = new String[0];
    DebugGridmix.main(argv);

  } catch (ExitUtil.ExitException e) {
    assertEquals("ExitException", e.getMessage());
    ExitUtil.resetFirstExitException();
  } finally {
    System.setErr(oldOut);
    System.setSecurityManager(securityManager);
  }
  String print = bytes.toString();
  // should be printed tip in std error stream
  assertTrue(print
          .contains("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>"));
  assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
 
Example 7
Source File: TestJobHistoryServer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout =60000)
public void testLaunch() throws Exception {

  ExitUtil.disableSystemExit();
  try {
    historyServer = JobHistoryServer.launchJobHistoryServer(new String[0]);
  } catch (ExitUtil.ExitException e) {
    assertEquals(0,e.status);
    ExitUtil.resetFirstExitException();
    fail();
  }
}
 
Example 8
Source File: TestGridmixSubmission.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout=100000)
public void testMain() throws Exception {

  SecurityManager securityManager = System.getSecurityManager();

  final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
  final PrintStream out = new PrintStream(bytes);
  final PrintStream oldOut = System.out;
  System.setErr(out);
  ExitUtil.disableSystemExit();
  try {
    String[] argv = new String[0];
    DebugGridmix.main(argv);

  } catch (ExitUtil.ExitException e) {
    assertEquals("ExitException", e.getMessage());
    ExitUtil.resetFirstExitException();
  } finally {
    System.setErr(oldOut);
    System.setSecurityManager(securityManager);
  }
  String print = bytes.toString();
  // should be printed tip in std error stream
  assertTrue(print
          .contains("Usage: gridmix [-generate <MiB>] [-users URI] [-Dname=value ...] <iopath> <trace>"));
  assertTrue(print.contains("e.g. gridmix -generate 100m foo -"));
}
 
Example 9
Source File: TestDFSInotifyEventInputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 120000)
public void testTwoActiveNNs() throws IOException, MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    DFSClient client0 = new DFSClient(cluster.getDfsCluster().getNameNode(0)
        .getNameNodeAddress(), conf);
    DFSClient client1 = new DFSClient(cluster.getDfsCluster().getNameNode(1)
        .getNameNodeAddress(), conf);
    DFSInotifyEventInputStream eis = client0.getInotifyEventStream();
    for (int i = 0; i < 10; i++) {
      client0.mkdirs("/dir" + i, null, false);
    }

    cluster.getDfsCluster().transitionToActive(1);
    for (int i = 10; i < 20; i++) {
      client1.mkdirs("/dir" + i, null, false);
    }

    // make sure that the old active can't read any further than the edits
    // it logged itself (it has no idea whether the in-progress edits from
    // the other writer have actually been committed)
    EventBatch batch = null;
    for (int i = 0; i < 10; i++) {
      batch = waitForNextEvents(eis);
      Assert.assertEquals(1, batch.getEvents().length);
      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
      Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
          i));
    }
    Assert.assertTrue(eis.poll() == null);
  } finally {
    try {
      cluster.shutdown();
    } catch (ExitUtil.ExitException e) {
      // expected because the old active will be unable to flush the
      // end-of-segment op since it is fenced
    }
  }
}
 
Example 10
Source File: TestDFSInotifyEventInputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 120000)
public void testTwoActiveNNs() throws IOException, MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    DFSClient client0 = new DFSClient(cluster.getDfsCluster().getNameNode(0)
        .getNameNodeAddress(), conf);
    DFSClient client1 = new DFSClient(cluster.getDfsCluster().getNameNode(1)
        .getNameNodeAddress(), conf);
    DFSInotifyEventInputStream eis = client0.getInotifyEventStream();
    for (int i = 0; i < 10; i++) {
      client0.mkdirs("/dir" + i, null, false);
    }

    cluster.getDfsCluster().transitionToActive(1);
    for (int i = 10; i < 20; i++) {
      client1.mkdirs("/dir" + i, null, false);
    }

    // make sure that the old active can't read any further than the edits
    // it logged itself (it has no idea whether the in-progress edits from
    // the other writer have actually been committed)
    EventBatch batch = null;
    for (int i = 0; i < 10; i++) {
      batch = waitForNextEvents(eis);
      Assert.assertEquals(1, batch.getEvents().length);
      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
      Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
          i));
    }
    Assert.assertTrue(eis.poll() == null);
  } finally {
    try {
      cluster.shutdown();
    } catch (ExitUtil.ExitException e) {
      // expected because the old active will be unable to flush the
      // end-of-segment op since it is fenced
    }
  }
}