Java Code Examples for org.apache.hadoop.yarn.api.ApplicationConstants#LOG_DIR_EXPANSION_VAR

The following examples show how to use org.apache.hadoop.yarn.api.ApplicationConstants#LOG_DIR_EXPANSION_VAR . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LaunchContainerThread.java    From TensorFlowOnYARN with Apache License 2.0 6 votes vote down vote up
private String makeContainerCommand(long containerMemory, String clusterSpec,
    String jobName, int taskIndex) {
  String[] commands = new String[]{
      ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java",
      "-Xmx" + containerMemory + "m",
      TFServerRunner.class.getName() + " ",
      Utils.mkOption(Constants.OPT_CLUSTER_SPEC, clusterSpec),
      Utils.mkOption(Constants.OPT_JOB_NAME, jobName),
      Utils.mkOption(Constants.OPT_TASK_INDEX, taskIndex),
      "1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/TFServerRunner." +
          ApplicationConstants.STDOUT,
      "2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/TFServerRunner." +
          ApplicationConstants.STDERR
  };

  return Utils.mkString(commands, " ");
}
 
Example 2
Source File: LaunchCluster.java    From TensorFlowOnYARN with Apache License 2.0 6 votes vote down vote up
private String makeAppMasterCommand(String tfLib, String tfJar) {
  String[] commands = new String[]{
      ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java",
      // Set Xmx based on am memory size
      "-Xmx" + amMemory + "m",
      // Set class name
      ApplicationMaster.class.getName(),
      Utils.mkOption(Constants.OPT_TF_CONTAINER_MEMORY, containerMemory),
      Utils.mkOption(Constants.OPT_TF_CONTAINER_VCORES, containerVCores),
      Utils.mkOption(Constants.OPT_TF_WORKER_NUM, workerNum),
      Utils.mkOption(Constants.OPT_TF_PS_NUM, psNum),
      Utils.mkOption(Constants.OPT_TF_LIB, tfLib),
      Utils.mkOption(Constants.OPT_TF_JAR, tfJar),
      "1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout",
      "2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"
  };
  return Utils.mkString(commands, " ");
}
 
Example 3
Source File: TestContainerLaunch.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000)
public void testEnvExpansion() throws IOException {
  Path logPath = new Path("/nm/container/logs");
  String input =
      Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*"
          + ApplicationConstants.CLASS_PATH_SEPARATOR
          + Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/lib/*"
          + ApplicationConstants.CLASS_PATH_SEPARATOR
          + Apps.crossPlatformify("HADOOP_LOG_HOME")
          + ApplicationConstants.LOG_DIR_EXPANSION_VAR;

  String res = ContainerLaunch.expandEnvironment(input, logPath);

  if (Shell.WINDOWS) {
    Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;"
        + "%HADOOP_HOME%/share/hadoop/common/lib/*;"
        + "%HADOOP_LOG_HOME%/nm/container/logs", res);
  } else {
    Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:"
        + "$HADOOP_HOME/share/hadoop/common/lib/*:"
        + "$HADOOP_LOG_HOME/nm/container/logs", res);
  }
  System.out.println(res);
}
 
Example 4
Source File: TestContainerLaunch.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 10000)
public void testEnvExpansion() throws IOException {
  Path logPath = new Path("/nm/container/logs");
  String input =
      Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*"
          + ApplicationConstants.CLASS_PATH_SEPARATOR
          + Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/lib/*"
          + ApplicationConstants.CLASS_PATH_SEPARATOR
          + Apps.crossPlatformify("HADOOP_LOG_HOME")
          + ApplicationConstants.LOG_DIR_EXPANSION_VAR;

  String res = ContainerLaunch.expandEnvironment(input, logPath);

  if (Shell.WINDOWS) {
    Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;"
        + "%HADOOP_HOME%/share/hadoop/common/lib/*;"
        + "%HADOOP_LOG_HOME%/nm/container/logs", res);
  } else {
    Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:"
        + "$HADOOP_HOME/share/hadoop/common/lib/*:"
        + "$HADOOP_LOG_HOME/nm/container/logs", res);
  }
  System.out.println(res);
}
 
Example 5
Source File: YarnSubmissionHelper.java    From reef with Apache License 2.0 5 votes vote down vote up
public YarnSubmissionHelper(final YarnConfiguration yarnConfiguration,
                            final REEFFileNames fileNames,
                            final ClasspathProvider classpath,
                            final YarnProxyUser yarnProxyUser,
                            final SecurityTokenProvider tokenProvider,
                            final boolean isUnmanaged,
                            final List<String> commandPrefixList) throws IOException, YarnException {

  this.classpath = classpath;
  this.yarnProxyUser = yarnProxyUser;
  this.isUnmanaged = isUnmanaged;

  this.driverStdoutFilePath =
      ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + fileNames.getDriverStdoutFileName();

  this.driverStderrFilePath =
      ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + fileNames.getDriverStderrFileName();

  LOG.log(Level.FINE, "Initializing YARN Client");
  this.yarnClient = YarnClient.createYarnClient();
  this.yarnClient.init(yarnConfiguration);
  this.yarnClient.start();
  LOG.log(Level.FINE, "Initialized YARN Client");

  LOG.log(Level.FINE, "Requesting Application ID from YARN.");
  final YarnClientApplication yarnClientApplication = this.yarnClient.createApplication();
  this.applicationResponse = yarnClientApplication.getNewApplicationResponse();
  this.applicationSubmissionContext = yarnClientApplication.getApplicationSubmissionContext();
  this.applicationSubmissionContext.setUnmanagedAM(isUnmanaged);
  this.applicationId = this.applicationSubmissionContext.getApplicationId();
  this.tokenProvider = tokenProvider;
  this.commandPrefixList = commandPrefixList;
  this.configurationFilePaths = Collections.singletonList(fileNames.getDriverConfigurationPath());
  LOG.log(Level.INFO, "YARN Application ID: {0}", this.applicationId);
}
 
Example 6
Source File: AbstractYarnClusterDescriptor.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
protected ContainerLaunchContext setupApplicationMasterContainer(
		String yarnClusterEntrypoint,
		boolean hasLogback,
		boolean hasLog4j,
		boolean hasKrb5,
		int jobManagerMemoryMb) {
	// ------------------ Prepare Application Master Container  ------------------------------

	// respect custom JVM options in the YAML file
	String javaOpts = flinkConfiguration.getString(CoreOptions.FLINK_JVM_OPTIONS);
	if (flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS).length() > 0) {
		javaOpts += " " + flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS);
	}
	//applicable only for YarnMiniCluster secure test run
	//krb5.conf file will be available as local resource in JM/TM container
	if (hasKrb5) {
		javaOpts += " -Djava.security.krb5.conf=krb5.conf";
	}

	// Set up the container launch context for the application master
	ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

	final  Map<String, String> startCommandValues = new HashMap<>();
	startCommandValues.put("java", "$JAVA_HOME/bin/java");

	int heapSize = Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration);
	String jvmHeapMem = String.format("-Xms%sm -Xmx%sm", heapSize, heapSize);
	startCommandValues.put("jvmmem", jvmHeapMem);

	startCommandValues.put("jvmopts", javaOpts);
	String logging = "";

	if (hasLogback || hasLog4j) {
		logging = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";

		if (hasLogback) {
			logging += " -Dlogback.configurationFile=file:" + CONFIG_FILE_LOGBACK_NAME;
		}

		if (hasLog4j) {
			logging += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
		}
	}

	startCommandValues.put("logging", logging);
	startCommandValues.put("class", yarnClusterEntrypoint);
	startCommandValues.put("redirects",
		"1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " +
		"2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err");
	startCommandValues.put("args", "");

	final String commandTemplate = flinkConfiguration
		.getString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
			ConfigConstants.DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE);
	final String amCommand =
		BootstrapTools.getStartCommand(commandTemplate, startCommandValues);

	amContainer.setCommands(Collections.singletonList(amCommand));

	LOG.debug("Application Master start command: " + amCommand);

	return amContainer;
}
 
Example 7
Source File: AbstractYarnClusterDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
protected ContainerLaunchContext setupApplicationMasterContainer(
		String yarnClusterEntrypoint,
		boolean hasLogback,
		boolean hasLog4j,
		boolean hasKrb5,
		int jobManagerMemoryMb) {
	// ------------------ Prepare Application Master Container  ------------------------------

	// respect custom JVM options in the YAML file
	String javaOpts = flinkConfiguration.getString(CoreOptions.FLINK_JVM_OPTIONS);
	if (flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS).length() > 0) {
		javaOpts += " " + flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS);
	}
	//applicable only for YarnMiniCluster secure test run
	//krb5.conf file will be available as local resource in JM/TM container
	if (hasKrb5) {
		javaOpts += " -Djava.security.krb5.conf=krb5.conf";
	}

	// Set up the container launch context for the application master
	ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

	final  Map<String, String> startCommandValues = new HashMap<>();
	startCommandValues.put("java", "$JAVA_HOME/bin/java");

	int heapSize = Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration);
	String jvmHeapMem = String.format("-Xms%sm -Xmx%sm", heapSize, heapSize);
	startCommandValues.put("jvmmem", jvmHeapMem);

	startCommandValues.put("jvmopts", javaOpts);
	String logging = "";

	if (hasLogback || hasLog4j) {
		logging = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";

		if (hasLogback) {
			logging += " -Dlogback.configurationFile=file:" + CONFIG_FILE_LOGBACK_NAME;
		}

		if (hasLog4j) {
			logging += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
		}
	}

	startCommandValues.put("logging", logging);
	startCommandValues.put("class", yarnClusterEntrypoint);
	startCommandValues.put("redirects",
		"1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " +
		"2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err");
	startCommandValues.put("args", "");

	final String commandTemplate = flinkConfiguration
		.getString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
			ConfigConstants.DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE);
	final String amCommand =
		BootstrapTools.getStartCommand(commandTemplate, startCommandValues);

	amContainer.setCommands(Collections.singletonList(amCommand));

	LOG.debug("Application Master start command: " + amCommand);

	return amContainer;
}
 
Example 8
Source File: MapReduceChildJVM.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static String getTaskLogFile(LogName filter) {
  return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR + 
      filter.toString();
}
 
Example 9
Source File: MapReduceChildJVM.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static String getTaskLogFile(LogName filter) {
  return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR + 
      filter.toString();
}
 
Example 10
Source File: LaunchContainer.java    From metron with Apache License 2.0 4 votes vote down vote up
@Override
public void run() {
  LOG.info("Setting up container launch container for containerid="
          + container.getId());
  // Set the local resources
  Map<String, LocalResource> localResources = new HashMap<>();
  LOG.info("Local Directory Contents");
  for(File f : new File(".").listFiles()) {
    LOG.info("  " + f.length() + " - " + f.getName() );
  }
  LOG.info("Localizing " + request.getPath());
  String modelScript = localizeResources(localResources, new Path(request.getPath()), appJarLocation);
  for(Map.Entry<String, LocalResource> entry : localResources.entrySet()) {
    LOG.info(entry.getKey() + " localized: " + entry.getValue().getResource() );
  }
  // The container for the eventual shell commands needs its own local
  // resources too.
  // In this scenario, if a shell script is specified, we need to have it
  // copied and made available to the container.

  // Set the necessary command to execute on the allocated container
  Map<String, String> env = new HashMap<>();
  // For example, we could setup the classpath needed.
  // Assuming our classes or jars are available as local resources in the
  // working directory from which the command will be run, we need to append
  // "." to the path.
  // By default, all the hadoop specific classpaths will already be available
  // in $CLASSPATH, so we should be careful not to overwrite it.
  StringBuffer classPathEnv = new StringBuffer("$CLASSPATH:./*:");
  //if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
    classPathEnv.append(System.getProperty("java.class.path"));
  //}
  env.put("CLASSPATH", classPathEnv.toString());
  // Construct the command to be executed on the launched container
  String command = ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java "
          + Runner.class.getName() + " "
          + RunnerOptions.toArgs(RunnerOptions.CONTAINER_ID.of(container.getId().getContainerId() + "")
          ,RunnerOptions.ZK_QUORUM.of(zkQuorum)
          ,RunnerOptions.ZK_ROOT.of(zkRoot)
          ,RunnerOptions.SCRIPT.of(modelScript)
          ,RunnerOptions.NAME.of(request.getName())
          ,RunnerOptions.HOSTNAME.of(containerHostname())
          ,RunnerOptions.VERSION.of(request.getVersion())
  )
          + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
          + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
  List<String> commands = new ArrayList<String>();
  LOG.info("Executing container command: " + command);
  commands.add(command);


  // Set up ContainerLaunchContext, setting local resource, environment,
  // command and token for constructor.

  // Note for tokens: Set up tokens for the container too. Today, for normal
  // shell commands, the container in distribute-shell doesn't need any
  // tokens. We are populating them mainly for NodeManagers to be able to
  // download anyfiles in the distributed file-system. The tokens are
  // otherwise also useful in cases, for e.g., when one is running a
  // "hadoop dfs" command inside the distributed shell.
  ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(
          localResources, env, commands, null, allTokens.duplicate(), null);
  //TODO: Add container to listener so it can be removed
  nmClientAsync.startContainerAsync(container, ctx);
}
 
Example 11
Source File: TezRuntimeChildJVM.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
private static String getTaskLogFile(LogName filter) {
  return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
      filter.toString();
}
 
Example 12
Source File: TezRuntimeChildJVM.java    From tez with Apache License 2.0 4 votes vote down vote up
private static String getTaskLogFile(LogName filter) {
  return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
      filter.toString();
}