Java Code Examples for org.apache.hadoop.conf.Configuration#reloadConfiguration()

The following examples show how to use org.apache.hadoop.conf.Configuration#reloadConfiguration() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestParent.java    From pxf with Apache License 2.0 6 votes vote down vote up
protected void trySecureLogin(Hdfs hdfs, String kerberosPrincipal) throws Exception {
    if (StringUtils.isEmpty(kerberosPrincipal)) return;

    String testUser = kerberosPrincipal.split("@")[0];
    String testUserKeytabPath = String.format(testUserkeyTabPathFormat, testUser);
    if (!new File(testUserKeytabPath).exists()) {
        throw new Exception(String.format("Keytab file %s not found", testUserKeytabPath));
    }
    if (StringUtils.isEmpty(hdfs.getHadoopRoot())) {
        throw new Exception("SUT parameter hadoopRoot in hdfs component is not defined");
    }
    // setup the security context for kerberos
    Configuration config = new Configuration();
    config.addResource(new Path(hdfs.getHadoopRoot() + "/conf/hdfs-site.xml"));
    config.addResource(new Path(hdfs.getHadoopRoot() + "/conf/core-site.xml"));
    config.reloadConfiguration();
    config.set("hadoop.security.authentication", "Kerberos");
    UserGroupInformation.setConfiguration(config);
    UserGroupInformation.loginUserFromKeytab(kerberosPrincipal, testUserKeytabPath);

    // Initialize HDFS system object again, after login
    hdfs.init();
}
 
Example 2
Source File: HistoryLogUtils.java    From spydra with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a specialized hadoop configuration for spydra. This configuration is
 * special in the sense that it configures hadoop tooling to be able to access GCS
 * for logs, history and is able to run a read-only job-history server (not moving
 * or deleting logs or history files). This configuration is dependent on client
 * and username due to how this information is stored in GCS.
 *
 * @param clientId client id to generate configuration for
 * @param username username to generate configuration for
 * @param bucket   name of the bucket storing logs and history information
 */
public static Configuration generateHadoopConfig(
    String clientId, String username,
    String bucket) {
  // We want minimalistic and clean options that are unlikely to collide with anything,
  // that's why not loading defaults
  Configuration cfg = new Configuration(false);
  cfg.addResource(HISTORY_LOG_CONFIG_NAME);
  cfg.reloadConfiguration();
  cfg.set(SPYDRA_HISTORY_CLIENT_ID_PROPERTY, clientId);
  cfg.set(SPYDRA_HISTORY_USERNAME_PROPERTY, username);
  cfg.set(SPYDRA_HISTORY_BUCKET_PROPERTY, bucket);

  if (logger.isDebugEnabled()) {
    logger.debug("Dumping generated config to be applied for log/history tools");
    logger.debug(
        StreamSupport.stream(cfg.spliterator(), false)
            .map(Object::toString)
            .collect(Collectors.joining("\n")));
  }

  return cfg;
}
 
Example 3
Source File: SolrMaster.java    From yarn-proto with Apache License 2.0 6 votes vote down vote up
public SolrMaster(CommandLine cli) throws Exception {
  this.cli = cli;
  Configuration hadoopConf = new Configuration();
  if (cli.hasOption("conf")) {
    hadoopConf.addResource(new Path(cli.getOptionValue("conf")));
    hadoopConf.reloadConfiguration();
  }
  conf = new YarnConfiguration(hadoopConf);

  nmClient = NMClient.createNMClient();
  nmClient.init(conf);
  nmClient.start();
  numContainersToWaitFor = Integer.parseInt(cli.getOptionValue("nodes"));
  memory = Integer.parseInt(cli.getOptionValue("memory", "512"));
  port = Integer.parseInt(cli.getOptionValue("port"));
  nextPort = port;

  SecureRandom random = new SecureRandom();
  this.randomStopKey = new BigInteger(130, random).toString(32);

  this.inetAddresses = getMyInetAddresses();
}
 
Example 4
Source File: HadoopXmlResourceParser.java    From knox with Apache License 2.0 5 votes vote down vote up
/**
 * Produces a set of {@link SimpleDescriptor}s from the specified file.
 *
 * @param path
 *          The path to the configuration file which holds descriptor information in a pre-defined format.
 * @param topologyName
 *          if set, the parser should only parse a descriptor with the same name
 * @return A SimpleDescriptor based on the contents of the given file.
 */
public HadoopXmlResourceParserResult parse(String path, String topologyName) {
  try {
    log.parseHadoopXmlResource(path, topologyName == null ? "all topologies" : topologyName);
    final Configuration xmlConfiguration = new Configuration(false);
    xmlConfiguration.addResource(Paths.get(path).toUri().toURL());
    xmlConfiguration.reloadConfiguration();
    final HadoopXmlResourceParserResult parserResult = parseXmlConfig(xmlConfiguration, topologyName);
    logParserResult(path, parserResult);
    return parserResult;
  } catch (Exception e) {
    log.failedToParseXmlConfiguration(path, e.getMessage(), e);
    return new HadoopXmlResourceParserResult();
  }
}