Java Code Examples for org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption#REGULAR

The following examples show how to use org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption#REGULAR . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
  StartupOption startOpt = StartupOption.REGULAR;
  int i = 0;

  if (args != null && args.length != 0) {
    String cmd = args[i++];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      return false;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else {
      return false;
    }
  }

  setStartupOption(conf, startOpt);
  return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
 
Example 2
Source File: DataNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
  StartupOption startOpt = StartupOption.REGULAR;
  int i = 0;

  if (args != null && args.length != 0) {
    String cmd = args[i++];
    if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
      LOG.error("-r, --rack arguments are not supported anymore. RackID " +
          "resolution is handled by the NameNode.");
      return false;
    } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.ROLLBACK;
    } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
      startOpt = StartupOption.REGULAR;
    } else {
      return false;
    }
  }

  setStartupOption(conf, startOpt);
  return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
 
Example 3
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static String[] createArgs(StartupOption operation) {
  if (operation == StartupOption.ROLLINGUPGRADE) {
    return new String[]{operation.getName(),
        operation.getRollingUpgradeStartupOption().name()};
  }
  String[] args = (operation == null ||
      operation == StartupOption.FORMAT ||
      operation == StartupOption.REGULAR) ?
          new String[] {} : new String[] {operation.getName()};
  return args;
}
 
Example 4
Source File: TestJournal.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}
 
Example 5
Source File: TestJournal.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
Example 6
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static String[] createArgs(StartupOption operation) {
  if (operation == StartupOption.ROLLINGUPGRADE) {
    return new String[]{operation.getName(),
        operation.getRollingUpgradeStartupOption().name()};
  }
  String[] args = (operation == null ||
      operation == StartupOption.FORMAT ||
      operation == StartupOption.REGULAR) ?
          new String[] {} : new String[] {operation.getName()};
  return args;
}
 
Example 7
Source File: TestJournal.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}
 
Example 8
Source File: TestJournal.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
Example 9
Source File: MiniHadoopClusterManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Parses arguments and fills out the member variables.
 *
 * @param args
 *          Command-line arguments.
 * @return true on successful parse; false to indicate that the program should
 *         exit.
 */
private boolean parseArguments(String[] args) {
  Options options = makeOptions();
  CommandLine cli;
  try {
    CommandLineParser parser = new GnuParser();
    cli = parser.parse(options, args);
  } catch (ParseException e) {
    LOG.warn("options parsing failed:  " + e.getMessage());
    new HelpFormatter().printHelp("...", options);
    return false;
  }

  if (cli.hasOption("help")) {
    new HelpFormatter().printHelp("...", options);
    return false;
  }
  if (cli.getArgs().length > 0) {
    for (String arg : cli.getArgs()) {
      System.err.println("Unrecognized option: " + arg);
      new HelpFormatter().printHelp("...", options);
      return false;
    }
  }

  // MR
  noMR = cli.hasOption("nomr");
  numNodeManagers = intArgument(cli, "nodemanagers", 1);
  rmPort = intArgument(cli, "rmport", 0);
  jhsPort = intArgument(cli, "jhsport", 0);
  fs = cli.getOptionValue("namenode");

  // HDFS
  noDFS = cli.hasOption("nodfs");
  numDataNodes = intArgument(cli, "datanodes", 1);
  nnPort = intArgument(cli, "nnport", 0);
  dfsOpts = cli.hasOption("format") ? StartupOption.FORMAT
      : StartupOption.REGULAR;

  // Runner
  writeDetails = cli.getOptionValue("writeDetails");
  writeConfig = cli.getOptionValue("writeConfig");

  // General
  conf = new JobConf();
  updateConfiguration(conf, cli.getOptionValues("D"));

  return true;
}
 
Example 10
Source File: MiniDFSClusterManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Parses arguments and fills out the member variables.
 * @param args Command-line arguments.
 * @return true on successful parse; false to indicate that the
 * program should exit.
 */
private boolean parseArguments(String[] args) {
  Options options = makeOptions();
  CommandLine cli;
  try {
    CommandLineParser parser = new GnuParser();
    cli = parser.parse(options, args);
  } catch(ParseException e) {
    LOG.warn("options parsing failed:  "+e.getMessage());
    new HelpFormatter().printHelp("...", options);
    return false;
  }

  if (cli.hasOption("help")) {
    new HelpFormatter().printHelp("...", options);
    return false;
  }
  
  if (cli.getArgs().length > 0) {
    for (String arg : cli.getArgs()) {
      LOG.error("Unrecognized option: " + arg);
      new HelpFormatter().printHelp("...", options);
      return false;
    }
  }

  // HDFS
  numDataNodes = intArgument(cli, "datanodes", 1);
  nameNodePort = intArgument(cli, "nnport", 0);
  if (cli.hasOption("format")) {
    dfsOpts = StartupOption.FORMAT;
    format = true;
  } else {
    dfsOpts = StartupOption.REGULAR;
    format = false;
  }

  // Runner
  writeDetails = cli.getOptionValue("writeDetails");
  writeConfig = cli.getOptionValue("writeConfig");

  // General
  conf = new HdfsConfiguration();
  updateConfiguration(conf, cli.getOptionValues("D"));

  return true;
}
 
Example 11
Source File: MiniHadoopClusterManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Parses arguments and fills out the member variables.
 *
 * @param args
 *          Command-line arguments.
 * @return true on successful parse; false to indicate that the program should
 *         exit.
 */
private boolean parseArguments(String[] args) {
  Options options = makeOptions();
  CommandLine cli;
  try {
    CommandLineParser parser = new GnuParser();
    cli = parser.parse(options, args);
  } catch (ParseException e) {
    LOG.warn("options parsing failed:  " + e.getMessage());
    new HelpFormatter().printHelp("...", options);
    return false;
  }

  if (cli.hasOption("help")) {
    new HelpFormatter().printHelp("...", options);
    return false;
  }
  if (cli.getArgs().length > 0) {
    for (String arg : cli.getArgs()) {
      System.err.println("Unrecognized option: " + arg);
      new HelpFormatter().printHelp("...", options);
      return false;
    }
  }

  // MR
  noMR = cli.hasOption("nomr");
  numNodeManagers = intArgument(cli, "nodemanagers", 1);
  rmPort = intArgument(cli, "rmport", 0);
  jhsPort = intArgument(cli, "jhsport", 0);
  fs = cli.getOptionValue("namenode");

  // HDFS
  noDFS = cli.hasOption("nodfs");
  numDataNodes = intArgument(cli, "datanodes", 1);
  nnPort = intArgument(cli, "nnport", 0);
  dfsOpts = cli.hasOption("format") ? StartupOption.FORMAT
      : StartupOption.REGULAR;

  // Runner
  writeDetails = cli.getOptionValue("writeDetails");
  writeConfig = cli.getOptionValue("writeConfig");

  // General
  conf = new JobConf();
  updateConfiguration(conf, cli.getOptionValues("D"));

  return true;
}
 
Example 12
Source File: MiniDFSClusterManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Parses arguments and fills out the member variables.
 * @param args Command-line arguments.
 * @return true on successful parse; false to indicate that the
 * program should exit.
 */
private boolean parseArguments(String[] args) {
  Options options = makeOptions();
  CommandLine cli;
  try {
    CommandLineParser parser = new GnuParser();
    cli = parser.parse(options, args);
  } catch(ParseException e) {
    LOG.warn("options parsing failed:  "+e.getMessage());
    new HelpFormatter().printHelp("...", options);
    return false;
  }

  if (cli.hasOption("help")) {
    new HelpFormatter().printHelp("...", options);
    return false;
  }
  
  if (cli.getArgs().length > 0) {
    for (String arg : cli.getArgs()) {
      LOG.error("Unrecognized option: " + arg);
      new HelpFormatter().printHelp("...", options);
      return false;
    }
  }

  // HDFS
  numDataNodes = intArgument(cli, "datanodes", 1);
  nameNodePort = intArgument(cli, "nnport", 0);
  if (cli.hasOption("format")) {
    dfsOpts = StartupOption.FORMAT;
    format = true;
  } else {
    dfsOpts = StartupOption.REGULAR;
    format = false;
  }

  // Runner
  writeDetails = cli.getOptionValue("writeDetails");
  writeConfig = cli.getOptionValue("writeConfig");

  // General
  conf = new HdfsConfiguration();
  updateConfiguration(conf, cli.getOptionValues("D"));

  return true;
}