Java Code Examples for org.apache.hadoop.yarn.api.records.ContainerLaunchContext#setCommands()

The following examples show how to use org.apache.hadoop.yarn.api.records.ContainerLaunchContext#setCommands() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: YarnTypes.java    From reef with Apache License 2.0 6 votes vote down vote up
/**
 * Gets a LaunchContext and sets the environment variable
 * {@link YarnUtilities#REEF_YARN_APPLICATION_ID_ENV_VAR} for REEF Evaluators.
 * @return a ContainerLaunchContext with the given commands, LocalResources and environment map.
 */
public static ContainerLaunchContext getContainerLaunchContext(
    final List<String> commands,
    final Map<String, LocalResource> localResources,
    final byte[] securityTokenBuffer,
    final Map<String, String> envMap,
    final ApplicationId applicationId) {
  final ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
  context.setLocalResources(localResources);
  context.setCommands(commands);
  if (applicationId != null) {
    envMap.put(YarnUtilities.REEF_YARN_APPLICATION_ID_ENV_VAR, applicationId.toString());
  }

  for (final Map.Entry entry : envMap.entrySet()) {
    LOG.log(Level.FINE, "Key : {0}, Value : {1}", new Object[] {entry.getKey(), entry.getValue()});
  }
  context.setEnvironment(envMap);
  if (securityTokenBuffer != null) {
    context.setTokens(ByteBuffer.wrap(securityTokenBuffer));
    LOG.log(Level.INFO, "Added tokens to container launch context");
  }
  return context;
}
 
Example 2
Source File: YarnManager.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
public void startContainer(Container container, VMConfig appVMConfig) throws YarnException, IOException {
  String command = appVMConfig.buildCommand();
  ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
  if(vmConfig.getVmResources().size() > 0) {
    appVMConfig.getVmResources().putAll(vmConfig.getVmResources());
    VMResources vmResources = new VMResources(conf, appVMConfig);
    ctx.setLocalResources(vmResources);
  }
  Map<String, String> appEnv = new HashMap<String, String>();
  boolean miniClusterEnv = vmConfig.getEnvironment() == VMConfig.Environment.YARN_MINICLUSTER;
  setupAppClasspath(miniClusterEnv , conf, appEnv);
  ctx.setEnvironment(appEnv);
  
  StringBuilder sb = new StringBuilder();
  List<String> commands = Collections.singletonList(
      sb.append(command).
      append(" 1> ").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append("/stdout").
      append(" 2> ").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append("/stderr").toString()
  );
  ctx.setCommands(commands);
  nmClient.startContainer(container, ctx);
  //TODO: update vm descriptor status
}
 
Example 3
Source File: BuilderUtils.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static ContainerLaunchContext newContainerLaunchContext(
    Map<String, LocalResource> localResources,
    Map<String, String> environment, List<String> commands,
    Map<String, ByteBuffer> serviceData, ByteBuffer tokens,
    Map<ApplicationAccessType, String> acls) {
  ContainerLaunchContext container = recordFactory
      .newRecordInstance(ContainerLaunchContext.class);
  container.setLocalResources(localResources);
  container.setEnvironment(environment);
  container.setCommands(commands);
  container.setServiceData(serviceData);
  container.setTokens(tokens);
  container.setApplicationACLs(acls);
  return container;
}
 
Example 4
Source File: AppClient.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public void run(VMConfig vmConfig, Configuration conf) throws Exception {
  try {
    vmConfig.overrideHadoopConfiguration(conf);
    System.out.println("Create YarnClient") ;
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    System.out.println("Create YarnClientApplication via YarnClient") ;
    YarnClientApplication app = yarnClient.createApplication();
    String appId =  app.getApplicationSubmissionContext().getApplicationId().toString() ;
    System.out.println("Application Id = " + appId) ;
    System.out.println("Set up the container launch context for the application master") ;
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    StringBuilder sb = new StringBuilder();
    List<String> commands = Collections.singletonList(
        sb.append(vmConfig.buildCommand()).
        append(" 1> ").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append("/stdout").
        append(" 2> ").append(ApplicationConstants.LOG_DIR_EXPANSION_VAR).append("/stderr")
        .toString()
    );
    amContainer.setCommands(commands) ;

    System.out.println("Setup the app classpath and resources") ;
    if(vmConfig.getVmResources().size() > 0) {
      amContainer.setLocalResources(new VMResources(conf, vmConfig));
    }
    
    System.out.println("Setup the classpath for ApplicationMaster, environment = " + vmConfig.getEnvironment()) ;
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    boolean jvmEnv = vmConfig.getEnvironment() != VMConfig.Environment.YARN;
    Util.setupAppMasterEnv(jvmEnv , conf, appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);

    System.out.println("Set up resource type requirements for ApplicationMaster") ;
    Resource resource = Records.newRecord(Resource.class);
    resource.setMemory(256);
    resource.setVirtualCores(1);

    System.out.println("Finally, set-up ApplicationSubmissionContext for the application");
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName(vmConfig.getName()); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(resource);
    appContext.setQueue("default"); // queue 

    // Submit application
    ApplicationId applicationId = appContext.getApplicationId();
    System.out.println("Submitting application " + applicationId);
    yarnClient.submitApplication(appContext);
  } catch(Exception ex) {
    ex.printStackTrace(); 
    throw ex ;
  }
}
 
Example 5
Source File: YarnRemoteInterpreterProcess.java    From zeppelin with Apache License 2.0 4 votes vote down vote up
private ContainerLaunchContext setUpAMLaunchContext() throws IOException {
  ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

  // Set the resources to localize
  this.stagingDir = new Path(fs.getHomeDirectory() + "/.zeppelinStaging", appId.toString());
  Map<String, LocalResource> localResources = new HashMap<>();

  File interpreterZip = createInterpreterZip();
  Path srcPath = localFs.makeQualified(new Path(interpreterZip.toURI()));
  Path destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
  addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "zeppelin");
  FileUtils.forceDelete(interpreterZip);

  // TODO(zjffdu) Should not add interpreter specific logic here.
  if (launchContext.getInterpreterSettingGroup().equals("flink")) {
    File flinkZip = createFlinkZip();
    srcPath = localFs.makeQualified(new Path(flinkZip.toURI()));
    destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
    addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "flink");
    FileUtils.forceDelete(flinkZip);
  }
  amContainer.setLocalResources(localResources);

  // Setup the command to run the AM
  List<String> vargs = new ArrayList<>();
  vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/bin/interpreter.sh");
  vargs.add("-d");
  vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/interpreter/"
          + launchContext.getInterpreterSettingGroup());
  vargs.add("-c");
  vargs.add(launchContext.getIntpEventServerHost());
  vargs.add("-p");
  vargs.add(launchContext.getIntpEventServerPort() + "");
  vargs.add("-r");
  vargs.add(zConf.getInterpreterPortRange() + "");
  vargs.add("-i");
  vargs.add(launchContext.getInterpreterGroupId());
  vargs.add("-l");
  vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/" +
          ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_LOCALREPO.getStringValue()
          + "/" + launchContext.getInterpreterSettingName());
  vargs.add("-g");
  vargs.add(launchContext.getInterpreterSettingName());

  vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
          File.separator + ApplicationConstants.STDOUT);
  vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
          File.separator + ApplicationConstants.STDERR);

  // Setup ContainerLaunchContext for AM container
  amContainer.setCommands(vargs);

  // pass the interpreter ENV to yarn container and also add hadoop jars to CLASSPATH
  populateHadoopClasspath(this.envs);
  if (this.launchContext.getInterpreterSettingGroup().equals("flink")) {
    // Update the flink related env because the all these are different in yarn container
    this.envs.put("FLINK_HOME", ApplicationConstants.Environment.PWD.$() + "/flink");
    this.envs.put("FLINK_CONF_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/conf");
    this.envs.put("FLINK_LIB_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/lib");
    this.envs.put("FLINK_PLUGINS_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/plugins");
  }
  // set -Xmx
  int memory = Integer.parseInt(
          properties.getProperty("zeppelin.interpreter.yarn.resource.memory", "1024"));
  this.envs.put("ZEPPELIN_INTP_MEM", "-Xmx" + memory + "m");
  amContainer.setEnvironment(this.envs);

  return amContainer;
}
 
Example 6
Source File: Client.java    From Scribengin with GNU Affero General Public License v3.0 4 votes vote down vote up
public ApplicationId run() throws IOException, YarnException {
  LOG.info("calling run.");
  yarnClient.start();

  // YarnClientApplication is used to populate:
  //   1. GetNewApplication Response
  //   2. ApplicationSubmissionContext
  YarnClientApplication app = yarnClient.createApplication();
  
  // GetNewApplicationResponse can be used to determined resources available.
  GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
  
  ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
  this.appId = appContext.getApplicationId();
  appContext.setApplicationName(this.appname);

  // Set up the container launch context for AM.
  ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

  LocalResource appMasterJar;
  FileSystem fs = FileSystem.get(this.conf);
  
  amContainer.setLocalResources(
      Collections.singletonMap("master.jar",
          Util.newYarnAppResource(fs, new Path(this.hdfsJar), LocalResourceType.FILE,
              LocalResourceVisibility.APPLICATION)));
  // Set up CLASSPATH for ApplicationMaster
  Map<String, String> appMasterEnv = new HashMap<String, String>();
  setupAppMasterEnv(appMasterEnv);
  amContainer.setEnvironment(appMasterEnv);

  // Set up resource requirements for ApplicationMaster
  Resource capability = Records.newRecord(Resource.class);
  capability.setMemory(this.applicationMasterMem);
  capability.setVirtualCores(1); //TODO: Can we really setVirtualCores ?
  amContainer.setCommands(Collections.singletonList(this.getAppMasterCommand()));

  // put everything together.
  appContext.setAMContainerSpec(amContainer);
  appContext.setResource(capability);
  appContext.setQueue("default"); // TODO: Need to investigate more on queuing an scheduling.

  // Submit application
  yarnClient.submitApplication(appContext);
  LOG.info("APPID: "+this.appId.toString());
  return this.appId;
  //return this.monitorApplication(appId);
}
 
Example 7
Source File: SolrMaster.java    From yarn-proto with Apache License 2.0 4 votes vote down vote up
public synchronized void onContainersAllocated(List<Container> containers) {
  String zkHost = cli.getOptionValue("zkHost");
  String solrArchive = cli.getOptionValue("solr");
  String hdfsHome = cli.getOptionValue("hdfs_home");

  Path pathToRes = new Path(solrArchive);
  FileStatus jarStat = null;
  try {
    jarStat = FileSystem.get(conf).getFileStatus(pathToRes);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
  LocalResource solrPackageRes = Records.newRecord(LocalResource.class);
  solrPackageRes.setResource(ConverterUtils.getYarnUrlFromPath(pathToRes));
  solrPackageRes.setSize(jarStat.getLen());
  solrPackageRes.setTimestamp(jarStat.getModificationTime());
  solrPackageRes.setType(LocalResourceType.ARCHIVE);
  solrPackageRes.setVisibility(LocalResourceVisibility.APPLICATION);

  Map<String, LocalResource> localResourcesMap = new HashMap<String, LocalResource>();
  localResourcesMap.put("solr", solrPackageRes);

  String acceptShutdownFrom = "-Dyarn.acceptShutdownFrom=" + inetAddresses;

  log.info("Using " + acceptShutdownFrom);

  String dasha = "";
  if (hdfsHome != null) {
    dasha += " -a '-Dsolr.hdfs.home=" + hdfsHome + " -Dsolr.directoryFactory=HdfsDirectoryFactory -Dsolr.lock.type=hdfs %s'";
  } else {
    dasha += "-a '%s'";
  }

  dasha = String.format(dasha, acceptShutdownFrom);

  String command = "/bin/bash ./solr/bin/solr -f -c -p %d -k %s -m " + memory + "m -z " + zkHost + dasha + " -V";
  for (Container container : containers) {
    ContainerId containerId = container.getId();

    // increment the port if running on the same host
    int jettyPort = nextPort++;
    String jettyHost = container.getNodeId().getHost();
    Set<Integer> portsOnHost = solrHosts.get(jettyHost);
    if (portsOnHost == null) {
      portsOnHost = new HashSet<Integer>();
      solrHosts.put(jettyHost, portsOnHost);
    }
    portsOnHost.add(jettyPort);
    log.info("Added port " + jettyPort + " to host: " + jettyHost);

    try {
      // Launch container by create ContainerLaunchContext
      ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
      ctx.setLocalResources(localResourcesMap);

      String cmd = String.format(command, jettyPort, randomStopKey);
      log.info("\n\nRunning command: " + cmd);

      ctx.setCommands(Collections.singletonList(
              cmd + " >" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout 2>&1"
      ));
      log.info("Launching container " + containerId);
      nmClient.startContainer(container, ctx);
    } catch (Exception exc) {
      log.error("Failed to start container to run Solr on port " + jettyPort + " due to: " + exc, exc);
    }
  }
}
 
Example 8
Source File: GobblinYarnAppLauncher.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
/**
 * Setup and submit the Gobblin Yarn application.
 *
 * @throws IOException if there's anything wrong setting up and submitting the Yarn application
 * @throws YarnException if there's anything wrong setting up and submitting the Yarn application
 */
@VisibleForTesting
ApplicationId setupAndSubmitApplication() throws IOException, YarnException {
  YarnClientApplication gobblinYarnApp = this.yarnClient.createApplication();
  ApplicationSubmissionContext appSubmissionContext = gobblinYarnApp.getApplicationSubmissionContext();
  appSubmissionContext.setApplicationType(GOBBLIN_YARN_APPLICATION_TYPE);
  appSubmissionContext.setMaxAppAttempts(ConfigUtils.getInt(config, GobblinYarnConfigurationKeys.APP_MASTER_MAX_ATTEMPTS_KEY, GobblinYarnConfigurationKeys.DEFAULT_APP_MASTER_MAX_ATTEMPTS_KEY));
  ApplicationId applicationId = appSubmissionContext.getApplicationId();

  GetNewApplicationResponse newApplicationResponse = gobblinYarnApp.getNewApplicationResponse();
  // Set up resource type requirements for ApplicationMaster
  Resource resource = prepareContainerResource(newApplicationResponse);

  // Add lib jars, and jars and files that the ApplicationMaster need as LocalResources
  Map<String, LocalResource> appMasterLocalResources = addAppMasterLocalResources(applicationId);

  ContainerLaunchContext amContainerLaunchContext = Records.newRecord(ContainerLaunchContext.class);
  amContainerLaunchContext.setLocalResources(appMasterLocalResources);
  amContainerLaunchContext.setEnvironment(YarnHelixUtils.getEnvironmentVariables(this.yarnConfiguration));
  amContainerLaunchContext.setCommands(Lists.newArrayList(buildApplicationMasterCommand(applicationId.toString(), resource.getMemory())));

  Map<ApplicationAccessType, String> acls = new HashMap<>(1);
  acls.put(ApplicationAccessType.VIEW_APP, this.appViewAcl);
  amContainerLaunchContext.setApplicationACLs(acls);

  if (UserGroupInformation.isSecurityEnabled()) {
    setupSecurityTokens(amContainerLaunchContext);
  }

  // Setup the application submission context
  appSubmissionContext.setApplicationName(this.applicationName);
  appSubmissionContext.setResource(resource);
  appSubmissionContext.setQueue(this.appQueueName);
  appSubmissionContext.setPriority(Priority.newInstance(0));
  appSubmissionContext.setAMContainerSpec(amContainerLaunchContext);
  // Also setup container local resources by copying local jars and files the container need to HDFS
  addContainerLocalResources(applicationId);

  // Submit the application
  LOGGER.info("Submitting application " + sanitizeApplicationId(applicationId.toString()));
  this.yarnClient.submitApplication(appSubmissionContext);

  LOGGER.info("Application successfully submitted and accepted");
  ApplicationReport applicationReport = this.yarnClient.getApplicationReport(applicationId);
  LOGGER.info("Application Name: " + applicationReport.getName());
  LOGGER.info("Application Tracking URL: " + applicationReport.getTrackingUrl());
  LOGGER.info("Application User: " + applicationReport.getUser() + " Queue: " + applicationReport.getQueue());

  return applicationId;
}
 
Example 9
Source File: YarnClusterDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
ContainerLaunchContext setupApplicationMasterContainer(
		String yarnClusterEntrypoint,
		boolean hasKrb5,
		JobManagerProcessSpec processSpec) {
	// ------------------ Prepare Application Master Container  ------------------------------

	// respect custom JVM options in the YAML file
	String javaOpts = flinkConfiguration.getString(CoreOptions.FLINK_JVM_OPTIONS);
	if (flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS).length() > 0) {
		javaOpts += " " + flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS);
	}
	//applicable only for YarnMiniCluster secure test run
	//krb5.conf file will be available as local resource in JM/TM container
	if (hasKrb5) {
		javaOpts += " -Djava.security.krb5.conf=krb5.conf";
	}

	// Set up the container launch context for the application master
	ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

	final  Map<String, String> startCommandValues = new HashMap<>();
	startCommandValues.put("java", "$JAVA_HOME/bin/java");

	String jvmHeapMem = JobManagerProcessUtils.generateJvmParametersStr(processSpec, flinkConfiguration);
	startCommandValues.put("jvmmem", jvmHeapMem);

	startCommandValues.put("jvmopts", javaOpts);
	startCommandValues.put("logging", YarnLogConfigUtil.getLoggingYarnCommand(flinkConfiguration));

	startCommandValues.put("class", yarnClusterEntrypoint);
	startCommandValues.put("redirects",
		"1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " +
		"2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err");
	startCommandValues.put("args", "");

	final String commandTemplate = flinkConfiguration
			.getString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
					ConfigConstants.DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE);
	final String amCommand =
		BootstrapTools.getStartCommand(commandTemplate, startCommandValues);

	amContainer.setCommands(Collections.singletonList(amCommand));

	LOG.debug("Application Master start command: " + amCommand);

	return amContainer;
}
 
Example 10
Source File: Client.java    From hadoop-mini-clusters with Apache License 2.0 4 votes vote down vote up
public void run(String[] args) throws Exception {
    final String command = args[0];
    final int n = Integer.valueOf(args[1]);
    final Path jarPath = new Path(args[2]);
    final String resourceManagerAddress = args[3];
    final String resourceManagerHostname = args[4];
    final String resourceManagerSchedulerAddress = args[5];
    final String resourceManagerResourceTrackerAddress = args[6];

    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    conf.set("yarn.resourcemanager.address", resourceManagerAddress);
    conf.set("yarn.resourcemanager.hostname", resourceManagerHostname);
    conf.set("yarn.resourcemanager.scheduler.address", resourceManagerSchedulerAddress);
    conf.set("yarn.resourcemanager.resource-tracker.address", resourceManagerResourceTrackerAddress);
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer =
            Records.newRecord(ContainerLaunchContext.class);
    amContainer.setCommands(
            Collections.singletonList(
                    "$JAVA_HOME/bin/java" +
                            " -Xmx256M" +
                            " com.hortonworks.simpleyarnapp.ApplicationMaster" +
                            " " + command +
                            " " + String.valueOf(n) +
                            " " + resourceManagerAddress +
                            " " + resourceManagerHostname +
                            " " + resourceManagerSchedulerAddress +
                            " " + resourceManagerResourceTrackerAddress +
                            " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
                            " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"
            )
    );

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    setupAppMasterJar(jarPath, appMasterJar);
    amContainer.setLocalResources(
            Collections.singletonMap("simple-yarn-app-1.1.0.jar", appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext =
            app.getApplicationSubmissionContext();
    appContext.setApplicationName("simple-yarn-app"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue

    // Submit application
    ApplicationId appId = appContext.getApplicationId();
    System.out.println("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED &&
            appState != YarnApplicationState.KILLED &&
            appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println(
            "Application " + appId + " finished with" +
                    " state " + appState +
                    " at " + appReport.getFinishTime());

}
 
Example 11
Source File: ApplicationMaster.java    From ignite with Apache License 2.0 4 votes vote down vote up
/** {@inheritDoc} */
@Override public synchronized void onContainersAllocated(List<Container> conts) {
    for (Container c : conts) {
        if (checkContainer(c)) {
            log.log(Level.INFO, "Container {0} allocated", c.getId());

            try {
                ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);

                if (UserGroupInformation.isSecurityEnabled())
                    // Set the tokens to the newly allocated container:
                    ctx.setTokens(allTokens.duplicate());

                Map<String, String> env = new HashMap<>(ctx.getEnvironment());

                Map<String, String> systemEnv = System.getenv();

                for (String key : systemEnv.keySet()) {
                    if (key.matches("^IGNITE_[_0-9A-Z]+$"))
                        env.put(key, systemEnv.get(key));
                }

                env.put("IGNITE_TCP_DISCOVERY_ADDRESSES", getAddress(c.getNodeId().getHost()));

                if (props.jvmOpts() != null && !props.jvmOpts().isEmpty())
                    env.put("JVM_OPTS", props.jvmOpts());

                ctx.setEnvironment(env);

                Map<String, LocalResource> resources = new HashMap<>();

                resources.put("ignite", IgniteYarnUtils.setupFile(ignitePath, fs, LocalResourceType.ARCHIVE));
                resources.put("ignite-config.xml", IgniteYarnUtils.setupFile(cfgPath, fs, LocalResourceType.FILE));

                if (props.licencePath() != null)
                    resources.put("gridgain-license.xml",
                        IgniteYarnUtils.setupFile(new Path(props.licencePath()), fs, LocalResourceType.FILE));

                if (props.userLibs() != null)
                    resources.put("libs", IgniteYarnUtils.setupFile(new Path(props.userLibs()), fs,
                        LocalResourceType.FILE));

                ctx.setLocalResources(resources);

                ctx.setCommands(
                    Collections.singletonList(
                        (props.licencePath() != null ? "cp gridgain-license.xml ./ignite/*/ || true && " : "")
                        + "cp -r ./libs/* ./ignite/*/libs/ || true && "
                        + "./ignite/*/bin/ignite.sh "
                        + "./ignite-config.xml"
                        + " -J-Xmx" + ((int)props.memoryPerNode()) + "m"
                        + " -J-Xms" + ((int)props.memoryPerNode()) + "m"
                        + IgniteYarnUtils.YARN_LOG_OUT
                    ));

                log.log(Level.INFO, "Launching container: {0}.", c.getId());

                nmClient.startContainer(c, ctx);

                containers.put(c.getId(),
                    new IgniteContainer(
                        c.getId(),
                        c.getNodeId(),
                        c.getResource().getVirtualCores(),
                        c.getResource().getMemory()));
            }
            catch (Exception ex) {
                log.log(Level.WARNING, "Error launching container " + c.getId(), ex);
            }
        }
        else {
            log.log(Level.WARNING, "Container {0} check failed. Releasing...", c.getId());

            rmClient.releaseAssignedContainer(c.getId());
        }
    }
}
 
Example 12
Source File: AbstractYarnClusterDescriptor.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
protected ContainerLaunchContext setupApplicationMasterContainer(
		String yarnClusterEntrypoint,
		boolean hasLogback,
		boolean hasLog4j,
		boolean hasKrb5,
		int jobManagerMemoryMb) {
	// ------------------ Prepare Application Master Container  ------------------------------

	// respect custom JVM options in the YAML file
	String javaOpts = flinkConfiguration.getString(CoreOptions.FLINK_JVM_OPTIONS);
	if (flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS).length() > 0) {
		javaOpts += " " + flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS);
	}
	//applicable only for YarnMiniCluster secure test run
	//krb5.conf file will be available as local resource in JM/TM container
	if (hasKrb5) {
		javaOpts += " -Djava.security.krb5.conf=krb5.conf";
	}

	// Set up the container launch context for the application master
	ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

	final  Map<String, String> startCommandValues = new HashMap<>();
	startCommandValues.put("java", "$JAVA_HOME/bin/java");

	int heapSize = Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration);
	String jvmHeapMem = String.format("-Xms%sm -Xmx%sm", heapSize, heapSize);
	startCommandValues.put("jvmmem", jvmHeapMem);

	startCommandValues.put("jvmopts", javaOpts);
	String logging = "";

	if (hasLogback || hasLog4j) {
		logging = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";

		if (hasLogback) {
			logging += " -Dlogback.configurationFile=file:" + CONFIG_FILE_LOGBACK_NAME;
		}

		if (hasLog4j) {
			logging += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
		}
	}

	startCommandValues.put("logging", logging);
	startCommandValues.put("class", yarnClusterEntrypoint);
	startCommandValues.put("redirects",
		"1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " +
		"2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err");
	startCommandValues.put("args", "");

	final String commandTemplate = flinkConfiguration
		.getString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
			ConfigConstants.DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE);
	final String amCommand =
		BootstrapTools.getStartCommand(commandTemplate, startCommandValues);

	amContainer.setCommands(Collections.singletonList(amCommand));

	LOG.debug("Application Master start command: " + amCommand);

	return amContainer;
}
 
Example 13
Source File: AMSimulator.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void submitApp()
        throws YarnException, InterruptedException, IOException {
  // ask for new application
  GetNewApplicationRequest newAppRequest =
      Records.newRecord(GetNewApplicationRequest.class);
  GetNewApplicationResponse newAppResponse = 
      rm.getClientRMService().getNewApplication(newAppRequest);
  appId = newAppResponse.getApplicationId();
  
  // submit the application
  final SubmitApplicationRequest subAppRequest =
      Records.newRecord(SubmitApplicationRequest.class);
  ApplicationSubmissionContext appSubContext = 
      Records.newRecord(ApplicationSubmissionContext.class);
  appSubContext.setApplicationId(appId);
  appSubContext.setMaxAppAttempts(1);
  appSubContext.setQueue(queue);
  appSubContext.setPriority(Priority.newInstance(0));
  ContainerLaunchContext conLauContext = 
      Records.newRecord(ContainerLaunchContext.class);
  conLauContext.setApplicationACLs(
      new HashMap<ApplicationAccessType, String>());
  conLauContext.setCommands(new ArrayList<String>());
  conLauContext.setEnvironment(new HashMap<String, String>());
  conLauContext.setLocalResources(new HashMap<String, LocalResource>());
  conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
  appSubContext.setAMContainerSpec(conLauContext);
  appSubContext.setUnmanagedAM(true);
  subAppRequest.setApplicationSubmissionContext(appSubContext);
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws YarnException {
      rm.getClientRMService().submitApplication(subAppRequest);
      return null;
    }
  });
  LOG.info(MessageFormat.format("Submit a new application {0}", appId));
  
  // waiting until application ACCEPTED
  RMApp app = rm.getRMContext().getRMApps().get(appId);
  while(app.getState() != RMAppState.ACCEPTED) {
    Thread.sleep(10);
  }

  // Waiting until application attempt reach LAUNCHED
  // "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
  this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
      .getCurrentAppAttempt().getAppAttemptId();
  RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
      .getCurrentAppAttempt();
  while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
    Thread.sleep(10);
  }
}
 
Example 14
Source File: TestContainerManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void testContainerLaunchAndExit(int exitCode) throws IOException,
    InterruptedException, YarnException {

 File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
 PrintWriter fileWriter = new PrintWriter(scriptFile);
 File processStartFile =
	  new File(tmpDir, "start_file.txt").getAbsoluteFile();

 // ////// Construct the Container-id
 ContainerId cId = createContainerId(0);

 if (Shell.WINDOWS) {
   fileWriter.println("@echo Hello World!> " + processStartFile);
   fileWriter.println("@echo " + cId + ">> " + processStartFile);
   if (exitCode != 0) {
     fileWriter.println("@exit " + exitCode);
   }
 } else {
   fileWriter.write("\numask 0"); // So that start file is readable by the test
   fileWriter.write("\necho Hello World! > " + processStartFile);
   fileWriter.write("\necho $$ >> " + processStartFile); 
   // Have script throw an exit code at the end
   if (exitCode != 0) {
     fileWriter.write("\nexit "+exitCode);
   }
 }
 
 fileWriter.close();

 ContainerLaunchContext containerLaunchContext = 
	  recordFactory.newRecordInstance(ContainerLaunchContext.class);

 URL resource_alpha =
	  ConverterUtils.getYarnUrlFromPath(localFS
			  .makeQualified(new Path(scriptFile.getAbsolutePath())));
 LocalResource rsrc_alpha =
	  recordFactory.newRecordInstance(LocalResource.class);
 rsrc_alpha.setResource(resource_alpha);
 rsrc_alpha.setSize(-1);
 rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
 rsrc_alpha.setType(LocalResourceType.FILE);
 rsrc_alpha.setTimestamp(scriptFile.lastModified());
 String destinationFile = "dest_file";
 Map<String, LocalResource> localResources = 
	  new HashMap<String, LocalResource>();
 localResources.put(destinationFile, rsrc_alpha);
 containerLaunchContext.setLocalResources(localResources);
 List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
 containerLaunchContext.setCommands(commands);

  StartContainerRequest scRequest =
      StartContainerRequest.newInstance(
        containerLaunchContext,
        createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(),
          user, context.getContainerTokenSecretManager()));
  List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
  list.add(scRequest);
  StartContainersRequest allRequests =
      StartContainersRequest.newInstance(list);
  containerManager.startContainers(allRequests);

 BaseContainerManagerTest.waitForContainerState(containerManager, cId,
	  ContainerState.COMPLETE);

  List<ContainerId> containerIds = new ArrayList<ContainerId>();
  containerIds.add(cId);
  GetContainerStatusesRequest gcsRequest =
      GetContainerStatusesRequest.newInstance(containerIds);
 ContainerStatus containerStatus = 
	  containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);

 // Verify exit status matches exit state of script
 Assert.assertEquals(exitCode,
	  containerStatus.getExitStatus());	    
}
 
Example 15
Source File: TestLogAggregationService.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testLogAggregationForRealContainerLaunch() throws IOException,
    InterruptedException, YarnException {

  this.containerManager.start();


  File scriptFile = new File(tmpDir, "scriptFile.sh");
  PrintWriter fileWriter = new PrintWriter(scriptFile);
  fileWriter.write("\necho Hello World! Stdout! > "
      + new File(localLogDir, "stdout"));
  fileWriter.write("\necho Hello World! Stderr! > "
      + new File(localLogDir, "stderr"));
  fileWriter.write("\necho Hello World! Syslog! > "
      + new File(localLogDir, "syslog"));
  fileWriter.close();

  ContainerLaunchContext containerLaunchContext =
      recordFactory.newRecordInstance(ContainerLaunchContext.class);
  // ////// Construct the Container-id
  ApplicationId appId = ApplicationId.newInstance(0, 0);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0);

  URL resource_alpha =
      ConverterUtils.getYarnUrlFromPath(localFS
          .makeQualified(new Path(scriptFile.getAbsolutePath())));
  LocalResource rsrc_alpha =
      recordFactory.newRecordInstance(LocalResource.class);
  rsrc_alpha.setResource(resource_alpha);
  rsrc_alpha.setSize(-1);
  rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
  rsrc_alpha.setType(LocalResourceType.FILE);
  rsrc_alpha.setTimestamp(scriptFile.lastModified());
  String destinationFile = "dest_file";
  Map<String, LocalResource> localResources = 
      new HashMap<String, LocalResource>();
  localResources.put(destinationFile, rsrc_alpha);
  containerLaunchContext.setLocalResources(localResources);
  List<String> commands = new ArrayList<String>();
  commands.add("/bin/bash");
  commands.add(scriptFile.getAbsolutePath());
  containerLaunchContext.setCommands(commands);

  StartContainerRequest scRequest =
      StartContainerRequest.newInstance(containerLaunchContext,
        TestContainerManager.createContainerToken(
          cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user,
          context.getContainerTokenSecretManager()));
  List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
  list.add(scRequest);
  StartContainersRequest allRequests =
      StartContainersRequest.newInstance(list);
  this.containerManager.startContainers(allRequests);
  
  BaseContainerManagerTest.waitForContainerState(this.containerManager,
      cId, ContainerState.COMPLETE);

  this.containerManager.handle(new CMgrCompletedAppsEvent(Arrays
      .asList(appId), CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
  this.containerManager.stop();
}
 
Example 16
Source File: AMSimulator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void submitApp()
        throws YarnException, InterruptedException, IOException {
  // ask for new application
  GetNewApplicationRequest newAppRequest =
      Records.newRecord(GetNewApplicationRequest.class);
  GetNewApplicationResponse newAppResponse = 
      rm.getClientRMService().getNewApplication(newAppRequest);
  appId = newAppResponse.getApplicationId();
  
  // submit the application
  final SubmitApplicationRequest subAppRequest =
      Records.newRecord(SubmitApplicationRequest.class);
  ApplicationSubmissionContext appSubContext = 
      Records.newRecord(ApplicationSubmissionContext.class);
  appSubContext.setApplicationId(appId);
  appSubContext.setMaxAppAttempts(1);
  appSubContext.setQueue(queue);
  appSubContext.setPriority(Priority.newInstance(0));
  ContainerLaunchContext conLauContext = 
      Records.newRecord(ContainerLaunchContext.class);
  conLauContext.setApplicationACLs(
      new HashMap<ApplicationAccessType, String>());
  conLauContext.setCommands(new ArrayList<String>());
  conLauContext.setEnvironment(new HashMap<String, String>());
  conLauContext.setLocalResources(new HashMap<String, LocalResource>());
  conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
  appSubContext.setAMContainerSpec(conLauContext);
  appSubContext.setUnmanagedAM(true);
  subAppRequest.setApplicationSubmissionContext(appSubContext);
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws YarnException {
      rm.getClientRMService().submitApplication(subAppRequest);
      return null;
    }
  });
  LOG.info(MessageFormat.format("Submit a new application {0}", appId));
  
  // waiting until application ACCEPTED
  RMApp app = rm.getRMContext().getRMApps().get(appId);
  while(app.getState() != RMAppState.ACCEPTED) {
    Thread.sleep(10);
  }

  // Waiting until application attempt reach LAUNCHED
  // "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
  this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
      .getCurrentAppAttempt().getAppAttemptId();
  RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
      .getCurrentAppAttempt();
  while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
    Thread.sleep(10);
  }
}
 
Example 17
Source File: TestContainerManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void testContainerLaunchAndExit(int exitCode) throws IOException,
    InterruptedException, YarnException {

 File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
 PrintWriter fileWriter = new PrintWriter(scriptFile);
 File processStartFile =
	  new File(tmpDir, "start_file.txt").getAbsoluteFile();

 // ////// Construct the Container-id
 ContainerId cId = createContainerId(0);

 if (Shell.WINDOWS) {
   fileWriter.println("@echo Hello World!> " + processStartFile);
   fileWriter.println("@echo " + cId + ">> " + processStartFile);
   if (exitCode != 0) {
     fileWriter.println("@exit " + exitCode);
   }
 } else {
   fileWriter.write("\numask 0"); // So that start file is readable by the test
   fileWriter.write("\necho Hello World! > " + processStartFile);
   fileWriter.write("\necho $$ >> " + processStartFile); 
   // Have script throw an exit code at the end
   if (exitCode != 0) {
     fileWriter.write("\nexit "+exitCode);
   }
 }
 
 fileWriter.close();

 ContainerLaunchContext containerLaunchContext = 
	  recordFactory.newRecordInstance(ContainerLaunchContext.class);

 URL resource_alpha =
	  ConverterUtils.getYarnUrlFromPath(localFS
			  .makeQualified(new Path(scriptFile.getAbsolutePath())));
 LocalResource rsrc_alpha =
	  recordFactory.newRecordInstance(LocalResource.class);
 rsrc_alpha.setResource(resource_alpha);
 rsrc_alpha.setSize(-1);
 rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
 rsrc_alpha.setType(LocalResourceType.FILE);
 rsrc_alpha.setTimestamp(scriptFile.lastModified());
 String destinationFile = "dest_file";
 Map<String, LocalResource> localResources = 
	  new HashMap<String, LocalResource>();
 localResources.put(destinationFile, rsrc_alpha);
 containerLaunchContext.setLocalResources(localResources);
 List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
 containerLaunchContext.setCommands(commands);

  StartContainerRequest scRequest =
      StartContainerRequest.newInstance(
        containerLaunchContext,
        createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(),
          user, context.getContainerTokenSecretManager()));
  List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
  list.add(scRequest);
  StartContainersRequest allRequests =
      StartContainersRequest.newInstance(list);
  containerManager.startContainers(allRequests);

 BaseContainerManagerTest.waitForContainerState(containerManager, cId,
	  ContainerState.COMPLETE);

  List<ContainerId> containerIds = new ArrayList<ContainerId>();
  containerIds.add(cId);
  GetContainerStatusesRequest gcsRequest =
      GetContainerStatusesRequest.newInstance(containerIds);
 ContainerStatus containerStatus = 
	  containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);

 // Verify exit status matches exit state of script
 Assert.assertEquals(exitCode,
	  containerStatus.getExitStatus());	    
}
 
Example 18
Source File: TestLogAggregationService.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testLogAggregationForRealContainerLaunch() throws IOException,
    InterruptedException, YarnException {

  this.containerManager.start();


  File scriptFile = new File(tmpDir, "scriptFile.sh");
  PrintWriter fileWriter = new PrintWriter(scriptFile);
  fileWriter.write("\necho Hello World! Stdout! > "
      + new File(localLogDir, "stdout"));
  fileWriter.write("\necho Hello World! Stderr! > "
      + new File(localLogDir, "stderr"));
  fileWriter.write("\necho Hello World! Syslog! > "
      + new File(localLogDir, "syslog"));
  fileWriter.close();

  ContainerLaunchContext containerLaunchContext =
      recordFactory.newRecordInstance(ContainerLaunchContext.class);
  // ////// Construct the Container-id
  ApplicationId appId = ApplicationId.newInstance(0, 0);
  ApplicationAttemptId appAttemptId =
      BuilderUtils.newApplicationAttemptId(appId, 1);
  ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0);

  URL resource_alpha =
      ConverterUtils.getYarnUrlFromPath(localFS
          .makeQualified(new Path(scriptFile.getAbsolutePath())));
  LocalResource rsrc_alpha =
      recordFactory.newRecordInstance(LocalResource.class);
  rsrc_alpha.setResource(resource_alpha);
  rsrc_alpha.setSize(-1);
  rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
  rsrc_alpha.setType(LocalResourceType.FILE);
  rsrc_alpha.setTimestamp(scriptFile.lastModified());
  String destinationFile = "dest_file";
  Map<String, LocalResource> localResources = 
      new HashMap<String, LocalResource>();
  localResources.put(destinationFile, rsrc_alpha);
  containerLaunchContext.setLocalResources(localResources);
  List<String> commands = new ArrayList<String>();
  commands.add("/bin/bash");
  commands.add(scriptFile.getAbsolutePath());
  containerLaunchContext.setCommands(commands);

  StartContainerRequest scRequest =
      StartContainerRequest.newInstance(containerLaunchContext,
        TestContainerManager.createContainerToken(
          cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user,
          context.getContainerTokenSecretManager()));
  List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
  list.add(scRequest);
  StartContainersRequest allRequests =
      StartContainersRequest.newInstance(list);
  this.containerManager.startContainers(allRequests);
  
  BaseContainerManagerTest.waitForContainerState(this.containerManager,
      cId, ContainerState.COMPLETE);

  this.containerManager.handle(new CMgrCompletedAppsEvent(Arrays
      .asList(appId), CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
  this.containerManager.stop();
}
 
Example 19
Source File: AbstractYarnClusterDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
protected ContainerLaunchContext setupApplicationMasterContainer(
		String yarnClusterEntrypoint,
		boolean hasLogback,
		boolean hasLog4j,
		boolean hasKrb5,
		int jobManagerMemoryMb) {
	// ------------------ Prepare Application Master Container  ------------------------------

	// respect custom JVM options in the YAML file
	String javaOpts = flinkConfiguration.getString(CoreOptions.FLINK_JVM_OPTIONS);
	if (flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS).length() > 0) {
		javaOpts += " " + flinkConfiguration.getString(CoreOptions.FLINK_JM_JVM_OPTIONS);
	}
	//applicable only for YarnMiniCluster secure test run
	//krb5.conf file will be available as local resource in JM/TM container
	if (hasKrb5) {
		javaOpts += " -Djava.security.krb5.conf=krb5.conf";
	}

	// Set up the container launch context for the application master
	ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

	final  Map<String, String> startCommandValues = new HashMap<>();
	startCommandValues.put("java", "$JAVA_HOME/bin/java");

	int heapSize = Utils.calculateHeapSize(jobManagerMemoryMb, flinkConfiguration);
	String jvmHeapMem = String.format("-Xms%sm -Xmx%sm", heapSize, heapSize);
	startCommandValues.put("jvmmem", jvmHeapMem);

	startCommandValues.put("jvmopts", javaOpts);
	String logging = "";

	if (hasLogback || hasLog4j) {
		logging = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\"";

		if (hasLogback) {
			logging += " -Dlogback.configurationFile=file:" + CONFIG_FILE_LOGBACK_NAME;
		}

		if (hasLog4j) {
			logging += " -Dlog4j.configuration=file:" + CONFIG_FILE_LOG4J_NAME;
		}
	}

	startCommandValues.put("logging", logging);
	startCommandValues.put("class", yarnClusterEntrypoint);
	startCommandValues.put("redirects",
		"1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " +
		"2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err");
	startCommandValues.put("args", "");

	final String commandTemplate = flinkConfiguration
		.getString(ConfigConstants.YARN_CONTAINER_START_COMMAND_TEMPLATE,
			ConfigConstants.DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE);
	final String amCommand =
		BootstrapTools.getStartCommand(commandTemplate, startCommandValues);

	amContainer.setCommands(Collections.singletonList(amCommand));

	LOG.debug("Application Master start command: " + amCommand);

	return amContainer;
}
 
Example 20
Source File: UnmanagedAmTest.java    From reef with Apache License 2.0 2 votes vote down vote up
@Test
public void testAmShutdown() throws IOException, YarnException {

  Assume.assumeTrue(
      "This test requires a YARN Resource Manager to connect to",
      Boolean.parseBoolean(System.getenv("REEF_TEST_YARN")));

  final YarnConfiguration yarnConfig = new YarnConfiguration();

  // Start YARN client and register the application

  final YarnClient yarnClient = YarnClient.createYarnClient();
  yarnClient.init(yarnConfig);
  yarnClient.start();

  final ContainerLaunchContext containerContext = Records.newRecord(ContainerLaunchContext.class);
  containerContext.setCommands(Collections.<String>emptyList());
  containerContext.setLocalResources(Collections.<String, LocalResource>emptyMap());
  containerContext.setEnvironment(Collections.<String, String>emptyMap());
  containerContext.setTokens(getTokens());

  final ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext();
  appContext.setApplicationName("REEF_Unmanaged_AM_Test");
  appContext.setAMContainerSpec(containerContext);
  appContext.setUnmanagedAM(true);
  appContext.setQueue("default");

  final ApplicationId applicationId = appContext.getApplicationId();
  LOG.log(Level.INFO, "Registered YARN application: {0}", applicationId);

  yarnClient.submitApplication(appContext);

  LOG.log(Level.INFO, "YARN application submitted: {0}", applicationId);

  addToken(yarnClient.getAMRMToken(applicationId));

  // Start the AM

  final AMRMClientAsync<AMRMClient.ContainerRequest> rmClient = AMRMClientAsync.createAMRMClientAsync(1000, this);
  rmClient.init(yarnConfig);
  rmClient.start();

  final NMClientAsync nmClient = new NMClientAsyncImpl(this);
  nmClient.init(yarnConfig);
  nmClient.start();

  final RegisterApplicationMasterResponse registration =
      rmClient.registerApplicationMaster(NetUtils.getHostname(), -1, null);

  LOG.log(Level.INFO, "Unmanaged AM is running: {0}", registration);

  rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "Success!", null);

  LOG.log(Level.INFO, "Unregistering AM: state {0}", rmClient.getServiceState());

  // Shutdown the AM

  rmClient.stop();
  nmClient.stop();

  // Get the final application report

  final ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);
  final YarnApplicationState appState = appReport.getYarnApplicationState();
  final FinalApplicationStatus finalAttemptStatus = appReport.getFinalApplicationStatus();

  LOG.log(Level.INFO, "Application {0} final attempt {1} status: {2}/{3}", new Object[] {
      applicationId, appReport.getCurrentApplicationAttemptId(), appState, finalAttemptStatus});

  Assert.assertEquals("Application must be in FINISHED state", YarnApplicationState.FINISHED, appState);
  Assert.assertEquals("Final status must be SUCCEEDED", FinalApplicationStatus.SUCCEEDED, finalAttemptStatus);

  // Shutdown YARN client

  yarnClient.stop();
}