Java Code Examples for org.apache.hadoop.util.ToolRunner#run()

The following examples show how to use org.apache.hadoop.util.ToolRunner#run() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   File: TestBinaryTokenFile.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * run a distributed job and verify that TokenCache is available
 * @throws IOException
 */
@Test
public void testBinaryTokenFile() throws IOException {
  Configuration conf = mrCluster.getConfig();
  
  // provide namenodes names for the job to get the delegation tokens for
  final String nnUri = dfsCluster.getURI(0).toString();
  conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
  
  // using argument to pass the file name
  final String[] args = { 
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
      };
  int res = -1;
  try {
    res = ToolRunner.run(conf, new MySleepJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with " + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0:", 0, res);
}
 
Example 2
Source Project: hadoop-gpu   File: TestStreamedMerge.java    License: Apache License 2.0 5 votes vote down vote up
void lsr() {
  try {
    System.out.println("lsr /");
    ToolRunner.run(conf_, new FsShell(), new String[]{ "-lsr", "/" });
  } catch (Exception e) {
    e.printStackTrace();
  }
}
 
Example 3
Source Project: hbase   File: HBaseFsck.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Main program
 *
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
  // create a fsck object
  Configuration conf = HBaseConfiguration.create();
  Path hbasedir = CommonFSUtils.getRootDir(conf);
  URI defaultFs = hbasedir.getFileSystem(conf).getUri();
  CommonFSUtils.setFsDefault(conf, new Path(defaultFs));
  int ret = ToolRunner.run(new HBaseFsckTool(conf), args);
  System.exit(ret);
}
 
Example 4
Source Project: hadoop   File: Grep.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  int res = ToolRunner.run(new Configuration(), new Grep(), args);
  System.exit(res);
}
 
Example 5
public static int main(String[] args) throws Exception {
  return ToolRunner.run(CachedConfiguration.getInstance(), new MRTester(), args);
}
 
Example 6
Source Project: gemfirexd-oss   File: BusyLegs.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  System.out.println("SampleApp.main() invoked with " + args);
  int rc = ToolRunner.run(new BusyLegs(), args);
  System.exit(rc);
}
 
Example 7
@Test
public void testFullRestoreSetToOtherTable() throws Exception {

  LOG.info("Test full restore set");

  // Create set
  try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
    String name = "name";
    table.addToBackupSet(name, new String[] { table1.getNameAsString() });
    List<TableName> names = table.describeBackupSet(name);

    assertNotNull(names);
    assertTrue(names.size() == 1);
    assertTrue(names.get(0).equals(table1));

    String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name };
    // Run backup
    int ret = ToolRunner.run(conf1, new BackupDriver(), args);
    assertTrue(ret == 0);
    List<BackupInfo> backups = table.getBackupHistory();
    assertTrue(backups.size() == 1);
    String backupId = backups.get(0).getBackupId();
    assertTrue(checkSucceeded(backupId));

    LOG.info("backup complete");

    // Restore from set into other table
    args =
        new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m",
            table1_restore.getNameAsString(), "-o" };
    // Run backup
    ret = ToolRunner.run(conf1, new RestoreDriver(), args);
    assertTrue(ret == 0);
    Admin hba = TEST_UTIL.getAdmin();
    assertTrue(hba.tableExists(table1_restore));
    // Verify number of rows in both tables
    assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
    TEST_UTIL.deleteTable(table1_restore);
    LOG.info("restore into other table is complete");
    hba.close();
  }
}
 
Example 8
Source Project: hadoop   File: DFSHAAdmin.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] argv) throws Exception {
  int res = ToolRunner.run(new DFSHAAdmin(), argv);
  System.exit(res);
}
 
Example 9
Source Project: hadoop-book   File: MultiFileWordCount.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    int ret = ToolRunner.run(new MultiFileWordCount(), args);
    System.exit(ret);
}
 
Example 10
Source Project: linden   File: LindenJob.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  System.out.println(Arrays.asList(args));
  int exitCode = ToolRunner.run(new LindenJob(), args);
  System.exit(exitCode);
}
 
Example 11
Source Project: kylin   File: FactDistinctColumnsJob.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    FactDistinctColumnsJob job = new FactDistinctColumnsJob();
    int exitCode = ToolRunner.run(job, args);
    System.exit(exitCode);
}
 
Example 12
Source Project: tez   File: SortMergeJoinExample.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  SortMergeJoinExample job = new SortMergeJoinExample();
  int status = ToolRunner.run(new Configuration(), job, args);
  System.exit(status);
}
 
Example 13
Source Project: hadoop-gpu   File: InputSampler.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  JobConf job = new JobConf(InputSampler.class);
  InputSampler<?,?> sampler = new InputSampler(job);
  int res = ToolRunner.run(sampler, args);
  System.exit(res);
}
 
Example 14
public static void main(String[] args) throws Exception {
	int res = ToolRunner.run(new Configuration(),
			new TotalOrderSortingStage(), args);
	System.exit(res);
}
 
Example 15
Source Project: ojai   File: TestJsonMapperReducer.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  int rc = ToolRunner.run(new TestJsonMapperReducer(), args);
  System.exit(rc);
}
 
Example 16
Source Project: big-c   File: LargeSorter.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
  int res = ToolRunner.run(new Configuration(), new LargeSorter(), args);
  System.exit(res);
}
 
Example 17
Source Project: kylin-on-parquet-v2   File: NDCuboidJob.java    License: Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    CuboidJob job = new NDCuboidJob();
    int exitCode = ToolRunner.run(job, args);
    System.exit(exitCode);
}
 
Example 18
/**
 * Runs Embedded Jetty server which exposes traces and spans Servlets exposed over HTTP protocol.
 *
 * @param args Default Arguments which passed to main method
 * @throws Exception Which are propagated from Embedded Jetty Server to Hadoop tool runner.
 */
public static void main(String[] args) throws Exception {
  ToolRunner.run(HBaseConfiguration.create(), new HBaseSpanViewerServer(), args);
}
 
Example 19
Source Project: hiped2   File: SmallFilesRead.java    License: Apache License 2.0 2 votes vote down vote up
/**
 * Main entry point for the example.
 *
 * @param args arguments
 * @throws Exception when something goes wrong
 */
public static void main(final String[] args) throws Exception {
  int res = ToolRunner.run(new Configuration(), new SmallFilesRead(), args);
  System.exit(res);
}
 
Example 20
/**
 * Launch the solver on 9x10 board and the one sided pentominos. This takes
 * about 2.5 hours on 20 nodes with 2 cpus/node. Splits the job into 2000
 * maps and 1 reduce.
 */
public static void main(String[] args) throws Exception {
    int res = ToolRunner.run(new Configuration(), new DistributedPentomino(), args);
    System.exit(res);
}