Java Code Examples for org.apache.hadoop.yarn.conf.YarnConfiguration#get()

The following examples show how to use org.apache.hadoop.yarn.conf.YarnConfiguration#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AmIpFilter.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected String findRedirectUrl() throws ServletException {
  String addr;
  if (proxyUriBases.size() == 1) {  // external proxy or not RM HA
    addr = proxyUriBases.values().iterator().next();
  } else {                          // RM HA
    YarnConfiguration conf = new YarnConfiguration();
    String activeRMId = RMHAUtils.findActiveRMHAId(conf);
    String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
        ? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
        : YarnConfiguration.RM_WEBAPP_ADDRESS;
    String host = conf.get(
        HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
    addr = proxyUriBases.get(host);
  }
  if (addr == null) {
    throw new ServletException(
        "Could not determine the proxy server for redirection");
  }
  return addr;
}
 
Example 2
Source File: AmIpFilter.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected String findRedirectUrl() throws ServletException {
  String addr;
  if (proxyUriBases.size() == 1) {  // external proxy or not RM HA
    addr = proxyUriBases.values().iterator().next();
  } else {                          // RM HA
    YarnConfiguration conf = new YarnConfiguration();
    String activeRMId = RMHAUtils.findActiveRMHAId(conf);
    String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
        ? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
        : YarnConfiguration.RM_WEBAPP_ADDRESS;
    String host = conf.get(
        HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
    addr = proxyUriBases.get(host);
  }
  if (addr == null) {
    throw new ServletException(
        "Could not determine the proxy server for redirection");
  }
  return addr;
}
 
Example 3
Source File: ConfigUtils.java    From Bats with Apache License 2.0 5 votes vote down vote up
public static String getRawContainerLogsUrl(YarnConfiguration conf, String nodeHttpAddress, String appId, String containerId)
{
  String logDirs = conf.get(YarnConfiguration.NM_LOG_DIRS);
  if (logDirs.startsWith("${yarn.log.dir}")) {
    return ConfigUtils.getSchemePrefix(conf) + nodeHttpAddress + "/logs" + logDirs.substring("${yarn.log.dir}".length()) + "/" + appId + "/" + containerId;
  } else {
    try {
      String logDirsPath = new File(logDirs).getCanonicalPath();
      String yarnLogDirPath = new File(getYarnLogDir()).getCanonicalPath();
      if (logDirsPath.startsWith(yarnLogDirPath)) {
        return ConfigUtils.getSchemePrefix(conf) + nodeHttpAddress + "/logs" + logDirsPath
            .substring(yarnLogDirPath.length()) + "/" + appId + "/" + containerId;
      } else {
        if (!rawContainerLogWarningPrinted) {
          LOG.warn("Cannot determine the location of container logs because of incompatible node manager log location ({}) and yarn log location ({})",
              logDirsPath, yarnLogDirPath);
          rawContainerLogWarningPrinted = true;
        }
      }
    } catch (Exception ex) {
      if (!rawContainerLogWarningPrinted) {
        LOG.warn("Cannot determine the location of container logs because of error: ", ex);
        rawContainerLogWarningPrinted = true;
      }
    }
  }
  return null;
}
 
Example 4
Source File: YarnController.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public TwillRunnerService startTwillRunner(YarnConfiguration yarnConfiguration) {
  String zkStr = dremioConfig.getString(DremioConfig.ZOOKEEPER_QUORUM);
  String clusterId = yarnConfiguration.get(YARN_CLUSTER_ID);
  Preconditions.checkNotNull(clusterId, "Cluster ID can not be null");
  TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);
  TwillRunnerService previousOne = twillRunners.putIfAbsent(new ClusterId(clusterId), twillRunner);
  if (previousOne == null) {
    // start one we are planning to add - if it is already in collection it should be started
    twillRunner.start();
    return twillRunner;
  }
  return previousOne;
}
 
Example 5
Source File: Hadoop2ClusterManagerUtil.java    From rubix with Apache License 2.0 5 votes vote down vote up
public static URL getNodeURL(YarnConfiguration yConf) throws MalformedURLException
{
  String address = yConf.get(addressConf, localHostAddress);
  String serverAddress = address.substring(0, address.indexOf(":"));
  int serverPort = Integer.parseInt(address.substring(address.indexOf(":") + 1));

  return new URL("http://" + serverAddress + ":" + serverPort + "/ws/v1/cluster/nodes");
}
 
Example 6
Source File: HelloWorld.java    From twill with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
  if (args.length < 1) {
    System.err.println("Arguments format: <host:port of zookeeper server>");
    System.exit(1);
  }

  String zkStr = args[0];
  YarnConfiguration yarnConfiguration = new YarnConfiguration();
  final TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr);
  twillRunner.start();

  String yarnClasspath =
    yarnConfiguration.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
                          Joiner.on(",").join(YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH));
  List<String> applicationClassPaths = Lists.newArrayList();
  Iterables.addAll(applicationClassPaths, Splitter.on(",").split(yarnClasspath));
  final TwillController controller = twillRunner.prepare(new HelloWorldRunnable())
      .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true)))
      .withApplicationClassPaths(applicationClassPaths)
      .withBundlerClassAcceptor(new HadoopClassExcluder())
      .start();

  Runtime.getRuntime().addShutdownHook(new Thread() {
    @Override
    public void run() {
      try {
        Futures.getUnchecked(controller.terminate());
      } finally {
        twillRunner.stop();
      }
    }
  });

  try {
    controller.awaitTerminated();
  } catch (ExecutionException e) {
    e.printStackTrace();
  }
}
 
Example 7
Source File: ConfigUtils.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
public static String getRawContainerLogsUrl(YarnConfiguration conf, String nodeHttpAddress, String appId, String containerId)
{
  String logDirs = conf.get(YarnConfiguration.NM_LOG_DIRS);
  if (logDirs.startsWith("${yarn.log.dir}")) {
    return ConfigUtils.getSchemePrefix(conf) + nodeHttpAddress + "/logs" + logDirs.substring("${yarn.log.dir}".length()) + "/" + appId + "/" + containerId;
  } else {
    try {
      String logDirsPath = new File(logDirs).getCanonicalPath();
      String yarnLogDirPath = new File(getYarnLogDir()).getCanonicalPath();
      if (logDirsPath.startsWith(yarnLogDirPath)) {
        return ConfigUtils.getSchemePrefix(conf) + nodeHttpAddress + "/logs" + logDirsPath
            .substring(yarnLogDirPath.length()) + "/" + appId + "/" + containerId;
      } else {
        if (!rawContainerLogWarningPrinted) {
          LOG.warn("Cannot determine the location of container logs because of incompatible node manager log location ({}) and yarn log location ({})",
              logDirsPath, yarnLogDirPath);
          rawContainerLogWarningPrinted = true;
        }
      }
    } catch (Exception ex) {
      if (!rawContainerLogWarningPrinted) {
        LOG.warn("Cannot determine the location of container logs because of error: ", ex);
        rawContainerLogWarningPrinted = true;
      }
    }
  }
  return null;
}
 
Example 8
Source File: YarnController.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
protected TwillPreparer createPreparer(YarnConfiguration yarnConfiguration, List<Property> propertyList) {
  AppBundleRunnable.Arguments discoveryArgs = new AppBundleRunnable.Arguments(
      YARN_BUNDLED_JAR_NAME,
      "com.dremio.dac.daemon.YarnDaemon",
      new String[] {});

  DacDaemonYarnApplication dacDaemonApp = new DacDaemonYarnApplication(dremioConfig, yarnConfiguration,
    new DacDaemonYarnApplication.Environment());

  TwillRunnerService twillRunner = startTwillRunner(yarnConfiguration);

  Map<String, String> envVars = Maps.newHashMap();
  envVars.put("MALLOC_ARENA_MAX", "4");
  envVars.put("MALLOC_MMAP_THRESHOLD_", "131072");
  envVars.put("MALLOC_TRIM_THRESHOLD_", "131072");
  envVars.put("MALLOC_TOP_PAD_", "131072");
  envVars.put("MALLOC_MMAP_MAX_", "65536");
  // Set ${DREMIO_HOME} for YarnDaemon to avoid config substitution failure
  envVars.put(DREMIO_HOME, ".");

  try {
    String userName = UserGroupInformation.getCurrentUser().getUserName();
    envVars.put("HADOOP_USER_NAME", userName);
  } catch (IOException e) {
    logger.error("Exception while trying to fill out HADOOP_USER_NAME with current user", e);
  }

  for (Property prop : propertyList) {
    // add if it is env var
    if (PropertyType.ENV_VAR.equals(prop.getType())) {
      envVars.put(prop.getKey(), prop.getValue());
    }
  }
  String[] yarnClasspath = yarnConfiguration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
    YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH);
  final TwillPreparer preparer = twillRunner.prepare(dacDaemonApp)
    .addLogHandler(new YarnTwillLogHandler())
    .withApplicationClassPaths(yarnClasspath)
    .withBundlerClassAcceptor(new HadoopClassExcluder())
    .setLogLevels(ImmutableMap.of(Logger.ROOT_LOGGER_NAME, yarnContainerLogLevel()))
    .withEnv(envVars)
    .withMaxRetries(YARN_RUNNABLE_NAME, MAX_APP_RESTART_RETRIES)
    .withArguments(YARN_RUNNABLE_NAME, discoveryArgs.toArray());

  for (String classpathJar : dacDaemonApp.getJarNames()) {
    preparer.withClassPaths(classpathJar);
  }

  preparer.addJVMOptions(prepareCommandOptions(yarnConfiguration, propertyList));

  String queue = yarnConfiguration.get(DacDaemonYarnApplication.YARN_QUEUE_NAME);
  if (queue != null) {
    preparer.setSchedulerQueue(queue);
  }

  if (dremioConfig.getBoolean(DremioConfig.DEBUG_YARN_ENABLED)) {
    preparer.enableDebugging(true, YARN_RUNNABLE_NAME);
  }

  return preparer;
}
 
Example 9
Source File: HBaseServerTestInstance.java    From Halyard with Apache License 2.0 4 votes vote down vote up
public static synchronized Configuration getInstanceConfig() throws Exception {
    if (conf == null) {
        File zooRoot = File.createTempFile("hbase-zookeeper", "");
        zooRoot.delete();
        ZooKeeperServer zookeper = new ZooKeeperServer(zooRoot, zooRoot, 2000);
        ServerCnxnFactory factory = ServerCnxnFactory.createFactory(new InetSocketAddress("localhost", 0), 5000);
        factory.startup(zookeper);

        YarnConfiguration yconf = new YarnConfiguration();
        String argLine = System.getProperty("argLine");
        if (argLine != null) {
            yconf.set("yarn.app.mapreduce.am.command-opts", argLine.replace("jacoco.exec", "jacocoMR.exec"));
        }
        yconf.setBoolean(MRConfig.MAPREDUCE_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false);
        yconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
        MiniMRYarnCluster miniCluster = new MiniMRYarnCluster("testCluster");
        miniCluster.init(yconf);
        String resourceManagerLink = yconf.get(YarnConfiguration.RM_ADDRESS);
        yconf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
        miniCluster.start();
        miniCluster.waitForNodeManagersToConnect(10000);
        // following condition set in MiniYarnCluster:273
        while (resourceManagerLink.endsWith(":0")) {
            Thread.sleep(100);
            resourceManagerLink = yconf.get(YarnConfiguration.RM_ADDRESS);
        }

        File hbaseRoot = File.createTempFile("hbase-root", "");
        hbaseRoot.delete();
        conf = HBaseConfiguration.create(miniCluster.getConfig());
        conf.set(HConstants.HBASE_DIR, hbaseRoot.toURI().toURL().toString());
        conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, factory.getLocalPort());
        conf.set("hbase.master.hostname", "localhost");
        conf.set("hbase.regionserver.hostname", "localhost");
        conf.setInt("hbase.master.info.port", -1);
        conf.set("hbase.fs.tmp.dir", new File(System.getProperty("java.io.tmpdir")).toURI().toURL().toString());
        LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
        cluster.startup();
    }
    return new Configuration(conf);
}