org.apache.oozie.client.OozieClient Java Examples

The following examples show how to use org.apache.oozie.client.OozieClient. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: OozieUtil.java    From EasyML with Apache License 2.0 6 votes vote down vote up
/**
 * Submit Oozie Job
 *
 * @param app_path
 * @throws OozieClientException
 * @throws IOException
 */
public static String submit(String app_path) throws OozieClientException,
IOException {
	// create a workflow job configuration and set the workflow application path
	Properties conf = wc.createConfiguration();

	conf.setProperty(OozieClient.APP_PATH, app_path);

	// setting workflow parameters
	conf.setProperty("queueName", Constants.QUEUE_NAME);
	conf.setProperty("nameNode", Constants.NAME_NODE);
	conf.setProperty("jobTracker", Constants.JOB_TRACKER);
	conf.setProperty("appPath", app_path);
	String jobId = wc.run(conf);
	logger.info("submit workflow job:" + jobId);

	return jobId;
}
 
Example #2
Source File: DownloadRequestServiceImpl.java    From occurrence with Apache License 2.0 6 votes vote down vote up
@Inject
public DownloadRequestServiceImpl(OozieClient client,
                                  @Named("oozie.default_properties") Map<String, String> defaultProperties,
                                  @Named("ws.url") String wsUrl,
                                  @Named("ws.mount") String wsMountDir,
                                  OccurrenceDownloadService occurrenceDownloadService,
                                  DownloadEmailUtils downloadEmailUtils,
                                  DownloadLimitsService downloadLimitsService) {

  this.client = client;
  this.wsUrl = wsUrl;
  downloadMount = new File(wsMountDir);
  this.occurrenceDownloadService = occurrenceDownloadService;
  this.downloadEmailUtils = downloadEmailUtils;
  parametersBuilder = new DownloadWorkflowParametersBuilder(defaultProperties);
  this.downloadLimitsService = downloadLimitsService;
}
 
Example #3
Source File: OccurrenceDownloadServiceModule.java    From occurrence with Apache License 2.0 6 votes vote down vote up
@Provides
@Singleton
@Named("oozie.default_properties")
Map<String,String> providesDefaultParameters(@Named("environment") String environment,
                                             @Named("ws.url") String wsUrl,
                                             @Named("hdfs.namenode") String nameNode,
                                             @Named("user.name") String userName) {
  return new ImmutableMap.Builder<String, String>()
    .put(OozieClient.LIBPATH, String.format(DownloadWorkflowParameters.WORKFLOWS_LIB_PATH_FMT, environment))
    .put(OozieClient.APP_PATH, nameNode + String.format(DownloadWorkflowParameters.DOWNLOAD_WORKFLOW_PATH_FMT,
                                                        environment))
    .put(OozieClient.WORKFLOW_NOTIFICATION_URL,
         DownloadUtils.concatUrlPaths(wsUrl, "occurrence/download/request/callback?job_id=$jobId&status=$status"))
    .put(OozieClient.USER_NAME, userName)
    .putAll(DownloadWorkflowParameters.CONSTANT_PARAMETERS).build();
}
 
Example #4
Source File: OozieUtil.java    From EasyML with Apache License 2.0 5 votes vote down vote up
/**
 * Rerun oozie job
 *
 * @param jobID
 * @throws OozieClientException
 * @throws IOException
 */

public static void reRun(String jobID) throws OozieClientException,
IOException {
	logger.info("rerun job:" + jobID);
	// create a workflow job configuration and set the workflow application path
	Properties conf = wc.createConfiguration();
	String app_path = wc.getJobInfo(jobID).getAppPath();

	// Need to analyze conf_o
	conf.setProperty(OozieClient.APP_PATH, app_path);
	conf.setProperty("queueName", Constants.QUEUE_NAME);
	conf.setProperty("nameNode", Constants.NAME_NODE);
	conf.setProperty("jobTracker", Constants.JOB_TRACKER);
	conf.setProperty("appPath", app_path);
	conf.setProperty("oozie.wf.rerun.failnodes", "false");

	wc.reRun(jobID, conf);
	OozieJob job = new OozieJob();
	job.setId(jobID);
	try {
		OozieJob temp = SecureDao.getObject(job);
		if ( temp != null ) {
			job = temp;
		}
		job.setStatus(getJob(jobID).getStatus());
		job.setCreatedTime(TimeUtils.getTime());

		logger.info("[reRun Status]" + job.getStatus());
		String[] setFields = {"status", "createtime", "endtime"};
		String[] condFields = {"jobid"};
		SecureDao.update(job, setFields, condFields);
	} catch (Exception e) {
		e.printStackTrace();
	}

}
 
Example #5
Source File: CallbackServiceTest.java    From occurrence with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  downloadEmailUtils = mock(DownloadEmailUtils.class);
  occurrenceDownloadService = mock(OccurrenceDownloadService.class);
  downloadLimitsService= mock(DownloadLimitsService.class);
  when(downloadLimitsService.exceedsSimultaneousDownloadLimit(any(String.class))).thenReturn(null);
  when(downloadLimitsService.exceedsDownloadComplexity(any(DownloadRequest.class))).thenReturn(null);
  when(occurrenceDownloadService.get(anyString())).thenReturn(mockDownload());
  oozieClient = mock(OozieClient.class);
  service =
    new DownloadRequestServiceImpl(oozieClient, Maps.<String, String>newHashMap(), "http://localhost:8080/",
      "", occurrenceDownloadService, downloadEmailUtils,downloadLimitsService);
}
 
Example #6
Source File: RunAppMojo.java    From kite with Apache License 2.0 5 votes vote down vote up
private String getAppPathPropertyName() {
  if ("coordinator".equals(applicationType)) {
    return OozieClient.COORDINATOR_APP_PATH;
  } else if ("bundle".equals(applicationType)) {
    return OozieClient.BUNDLE_APP_PATH;
  } else {
    return OozieClient.APP_PATH;
  }
}
 
Example #7
Source File: RunAppMojo.java    From kite with Apache License 2.0 5 votes vote down vote up
public void execute() throws MojoExecutionException, MojoFailureException {
  OozieClient oozieClient = new OozieClient(oozieUrl);
  Properties conf = oozieClient.createConfiguration();
  if (jobProperties != null) {
    conf.putAll(jobProperties);
  }
  if (hadoopConfiguration != null) {
    conf.putAll(hadoopConfiguration);
    String hadoopFs = hadoopConfiguration.getProperty("fs.default.name");
    if (hadoopFs == null) {
      throw new MojoExecutionException("Missing property 'fs.default.name' in " +
          "hadoopConfiguration");
    }
    String hadoopJobTracker = hadoopConfiguration.getProperty("mapred.job.tracker");
    if (hadoopJobTracker == null) {
      throw new MojoExecutionException("Missing property 'mapred.job.tracker' in " +
          "hadoopConfiguration");
    }
    conf.put(NAMENODE_PROPERTY, hadoopFs);
    conf.put(JOBTRACKER_PROPERTY, hadoopJobTracker);
  }

  String appPath = getAppPath().toString();
  conf.setProperty(getAppPathPropertyName(), appPath);
  conf.setProperty(APP_PATH_PROPERTY, appPath); // used in coordinator.xml
  getLog().info("App path: " + appPath);
  try {
    String jobId = oozieClient.run(conf);
    getLog().info("Running Oozie job " + jobId);
  } catch (OozieClientException e) {
    throw new MojoExecutionException("Error running Oozie job", e);
  }
}
 
Example #8
Source File: HadoopClientServicesImpl.java    From pentaho-hadoop-shims with Apache License 2.0 5 votes vote down vote up
public HadoopClientServicesImpl( NamedCluster namedCluster, HadoopShim hadoopShim, BundleContext bundleContext ) {
  this.bundleContext = bundleContext;
  this.hadoopShim = hadoopShim;
  this.namedCluster = namedCluster;
  this.oozieClient = new OozieClient( namedCluster.getOozieUrl() );
  this.writerAppenderManagerFactory = new WriterAppenderManager.Factory();
  this.bytesUtil = new CommonHBaseBytesUtil();
}
 
Example #9
Source File: OozieJobsServiceImpl.java    From searchanalytics-bigdata with MIT License 4 votes vote down vote up
private void submitWorkflowJob(String workFlowRoot)
		throws OozieClientException, InterruptedException {
	String oozieURL = System.getProperty("oozie.base.url");
	LOG.debug("Oozie BaseURL is: {} ", oozieURL);
	OozieClient client = new OozieClient(oozieURL);

	DateTime now = new DateTime();
	int monthOfYear = now.getMonthOfYear();
	int dayOfMonth = now.getDayOfMonth();
	int hourOfDay = now.getHourOfDay();
	String year = String.valueOf(now.getYear());
	String month = monthOfYear < 10 ? "0" + String.valueOf(monthOfYear)
			: String.valueOf(monthOfYear);
	String day = dayOfMonth < 10 ? "0" + String.valueOf(dayOfMonth)
			: String.valueOf(dayOfMonth);
	String hour = hourOfDay < 10 ? "0" + String.valueOf(hourOfDay) : String
			.valueOf(hourOfDay);

	Properties conf = client.createConfiguration();
	conf.setProperty(OozieClient.APP_PATH, workFlowRoot
			+ "/hive-action-add-partition.xml");
	conf.setProperty("nameNode", hadoopClusterService.getHDFSUri());
	conf.setProperty("jobTracker", hadoopClusterService.getJobTRackerUri());
	conf.setProperty("workflowRoot", workFlowRoot);
	conf.setProperty("YEAR", year);
	conf.setProperty("MONTH", month);
	conf.setProperty("DAY", day);
	conf.setProperty("HOUR", hour);
	conf.setProperty("oozie.use.system.libpath", "true");

	// submit and start the workflow job
	client.setDebugMode(1);
	// client.dryrun(conf);
	String jobId = client.run(conf);// submit(conf);

	LOG.debug("Workflow job submitted");
	// wait until the workflow job finishes printing the status every 10
	// seconds
	int retries = 3;
	for (int i = 1; i <= retries; i++) {
		// Sleep 60 sec./ 3 mins
		Thread.sleep(60 * 1000);

		WorkflowJob jobInfo = client.getJobInfo(jobId);
		Status jobStatus = jobInfo.getStatus();
		LOG.debug("Workflow job running ...");
		LOG.debug("HiveActionWorkflowJob Status Try: {}", i);
		LOG.debug("HiveActionWorkflowJob Id: {}", jobInfo.getId());
		LOG.debug("HiveActionWorkflowJob StartTime: {}",
				jobInfo.getStartTime());
		LOG.debug("HiveActionWorkflowJob EndTime: {}", jobInfo.getEndTime());
		LOG.debug("HiveActionWorkflowJob ConsoleURL: {}",
				jobInfo.getConsoleUrl());
		LOG.debug("HiveActionWorkflowJob Status: {}", jobInfo.getStatus());

		WorkflowAction workflowAction = jobInfo.getActions().get(0);

		LOG.debug("HiveActionWorkflowJob Action consoleURL: {}",
				workflowAction.getConsoleUrl());
		LOG.debug("HiveActionWorkflowJob Action Name: {}",
				workflowAction.getName());
		LOG.debug("HiveActionWorkflowJob Action error message: {}",
				workflowAction.getErrorMessage());
		LOG.debug("HiveActionWorkflowJob Action Status: {}",
				workflowAction.getStats());
		LOG.debug("HiveActionWorkflowJob Action data: {}",
				workflowAction.getData());
		LOG.debug("HiveActionWorkflowJob Action conf: {}",
				workflowAction.getConf());
		LOG.debug("HiveActionWorkflowJob Action retries: {}",
				workflowAction.getRetries());
		LOG.debug("HiveActionWorkflowJob Action id: {}",
				workflowAction.getId());
		LOG.debug("HiveActionWorkflowJob Action start time: {}",
				workflowAction.getStartTime());
		LOG.debug("HiveActionWorkflowJob Action end time: {}",
				workflowAction.getEndTime());
		LOG.debug("HiveActionWorkflowJob Oozie Url: {}",
				client.getOozieUrl());

		if (jobStatus == WorkflowJob.Status.SUCCEEDED) {
			LOG.info("Oozie workflow job was successful!" + jobStatus);
			break;
		} else if (jobStatus == WorkflowJob.Status.PREP
				|| jobStatus == WorkflowJob.Status.RUNNING) {
			if (i == retries) {
				throw new RuntimeException("Error executing workflow job!"
						+ jobStatus);
			} else {
				continue;
			}
		} else {
			throw new RuntimeException("Error executing workflow job!"
					+ jobStatus);
		}
	}
}
 
Example #10
Source File: OozieJobsServiceImpl.java    From searchanalytics-bigdata with MIT License 4 votes vote down vote up
private void submitCoordJob(String workFlowRoot)
		throws OozieClientException, InterruptedException {
	// OozieClient client = LocalOozie.getCoordClient();
	String oozieURL = System.getProperty("oozie.base.url");
	LOG.debug("Oozie BaseURL is: {} ", oozieURL);
	OozieClient client = new OozieClient(oozieURL);
	Properties conf = client.createConfiguration();
	conf.setProperty(OozieClient.COORDINATOR_APP_PATH, workFlowRoot
			+ "/coord-app-hive-add-partition.xml");
	conf.setProperty("nameNode", hadoopClusterService.getHDFSUri());
	conf.setProperty("jobTracker", hadoopClusterService.getJobTRackerUri());
	conf.setProperty("workflowRoot", workFlowRoot);
	Date nowMinusOneMin = new DateTime().minusMinutes(1).toDate();
	Date now = new DateTime().toDate();
	conf.setProperty("jobStart",
			DateUtils.formatDateOozieTZ(nowMinusOneMin));
	conf.setProperty("jobEnd", DateUtils.formatDateOozieTZ(new DateTime()
			.plusHours(2).toDate()));
	conf.setProperty("initialDataset", DateUtils.formatDateOozieTZ(now));
	conf.setProperty("tzOffset", "2");

	// submit and start the workflow job
	String jobId = client.submit(conf);

	LOG.debug("Workflow job submitted");
	// wait until the workflow job finishes printing the status every 10
	// seconds
	int retries = 2;
	for (int i = 1; i <= retries; i++) {
		// Sleep 60 sec./ 3 mins
		Thread.sleep(60 * 1000);

		CoordinatorJob coordJobInfo = client.getCoordJobInfo(jobId);
		LOG.debug("Workflow job running ...");
		LOG.debug("coordJobInfo Try: {}", i);
		LOG.debug("coordJobInfo StartTime: {}", coordJobInfo.getStartTime());
		LOG.debug("coordJobInfo NextMaterizedTime: {}",
				coordJobInfo.getNextMaterializedTime());
		LOG.debug("coordJobInfo EndTime: {}", coordJobInfo.getEndTime());
		LOG.debug("coordJobInfo Frequency: {}", coordJobInfo.getFrequency());
		LOG.debug("coordJobInfo ConsoleURL: {}",
				coordJobInfo.getConsoleUrl());
		LOG.debug("coordJobInfo Status: {}", coordJobInfo.getStatus());
		for (CoordinatorAction action : coordJobInfo.getActions()) {
			LOG.debug("coordJobInfo Action Id: {}", action.getId());
			LOG.debug("coordJobInfo Action NominalTimeL: {}",
					action.getNominalTime());
			LOG.debug("coordJobInfo Action Runconf: {}",
					action.getRunConf());
			LOG.debug("coordJobInfo Action Status: {}", action.getStatus());
			LOG.debug("coordJobInfo ActionConsoleURL: {}",
					action.getConsoleUrl());
			LOG.debug("coordJobInfo ActionErrorMessage: {}",
					action.getErrorMessage());
		}
		if (coordJobInfo.getStatus() == Job.Status.RUNNING) {
			// Wait three times to see the running state is stable..then it
			// is fine.
			// Job will keep running even if hive action fails.
			if (i == retries) {
				LOG.info("Coord Job in running state!");
				break;
			} else {
				continue;
			}
		} else if (coordJobInfo.getStatus() == Job.Status.PREMATER
				|| coordJobInfo.getStatus() == Job.Status.PREP) {
			// still preparing.
			continue;
		} else {
			throw new RuntimeException(
					"Error occured while running coord job!");
		}
	}
}
 
Example #11
Source File: OozieJobInfoImpl.java    From pentaho-hadoop-shims with Apache License 2.0 4 votes vote down vote up
public OozieJobInfoImpl( String id, OozieClient oozieClient ) {
  this.id = id;
  this.oozieClient = oozieClient;
}
 
Example #12
Source File: OozieJobsServiceImpl.java    From searchanalytics-bigdata with MIT License 4 votes vote down vote up
private void submitTopQueriesBundleCoordJob(String workFlowRoot)
		throws OozieClientException, InterruptedException {
	// OozieClient client = LocalOozie.getCoordClient();
	String oozieURL = System.getProperty("oozie.base.url");
	LOG.debug("Oozie BaseURL is: {} ", oozieURL);
	OozieClient client = new OozieClient(oozieURL);
	Properties conf = client.createConfiguration();
	conf.setProperty(OozieClient.BUNDLE_APP_PATH, workFlowRoot
			+ "/load-and-index-customerqueries-bundle-configuration.xml");
	conf.setProperty("coordAppPathLoadCustomerQueries", workFlowRoot
			+ "/coord-app-load-customerqueries.xml");
	conf.setProperty("coordAppPathIndexTopQueriesES", workFlowRoot
			+ "/coord-app-index-topqueries-es.xml");

	conf.setProperty("nameNode", hadoopClusterService.getHDFSUri());
	conf.setProperty("jobTracker", hadoopClusterService.getJobTRackerUri());
	conf.setProperty("workflowRoot", workFlowRoot);
	String userName = System.getProperty("user.name");
	String oozieWorkFlowRoot = hadoopClusterService.getHDFSUri() + "/usr/"
			+ userName + "/oozie";
	conf.setProperty("oozieWorkflowRoot", oozieWorkFlowRoot);
	Date now = new Date();
	conf.setProperty("jobStart", DateUtils.formatDateOozieTZ(new DateTime(
			now).minusDays(1).toDate()));
	conf.setProperty("jobStartIndex", DateUtils
			.formatDateOozieTZ(new DateTime(now).minusDays(1).plusMinutes(1).toDate()));
	conf.setProperty("jobEnd", DateUtils.formatDateOozieTZ(new DateTime()
			.plusDays(2).toDate()));
	conf.setProperty("initialDataset", DateUtils.formatDateOozieTZ(now));
	conf.setProperty("tzOffset", "2");

	// submit and start the workflow job
	String jobId = client.submit(conf);

	LOG.debug("Bundle job submitted");
	// wait until the workflow job finishes printing the status every 10
	// seconds
	int retries = 3;
	for (int i = 1; i <= retries; i++) {
		// Sleep 60 sec./ 3 mins
		Thread.sleep(60 * 1000);

		BundleJob bundleJobInfo = client.getBundleJobInfo(jobId);
		LOG.debug("Bundle job running ...");
		LOG.debug("bundleJobInfo Try: {}", i);
		LOG.debug("bundleJobInfo StartTime: {}",
				bundleJobInfo.getStartTime());
		LOG.debug("bundleJobInfo EndTime: {}", bundleJobInfo.getEndTime());
		LOG.debug("bundleJobInfo ConsoleURL: {}",
				bundleJobInfo.getConsoleUrl());
		LOG.debug("bundleJobInfo Status: {}", bundleJobInfo.getStatus());

		for (CoordinatorJob coordinatorJob : bundleJobInfo
				.getCoordinators()) {
			LOG.debug("bundleJobInfo Coord StartTime: {}",
					coordinatorJob.getStartTime());
			LOG.debug("bundleJobInfo Coord EndTime: {}",
					coordinatorJob.getEndTime());
			LOG.debug("bundleJobInfo Coord NextMaterizedTime: {}",
					coordinatorJob.getNextMaterializedTime());
			LOG.debug("bundleJobInfo Frequency: {}",
					coordinatorJob.getFrequency());
			LOG.debug("bundleJobInfo Coord Status: {}",
					coordinatorJob.getStatus());
			for (CoordinatorAction action : coordinatorJob.getActions()) {
				LOG.debug("bundleJobInfo Action Id: {}", action.getId());
				LOG.debug("bundleJobInfo Action NominalTimeL: {}",
						action.getNominalTime());
				LOG.debug("bundleJobInfo Action Runconf: {}",
						action.getRunConf());
				LOG.debug("bundleJobInfo Action Status: {}",
						action.getStatus());
				LOG.debug("bundleJobInfo ActionConsoleURL: {}",
						action.getConsoleUrl());
				LOG.debug("bundleJobInfo ActionErrorMessage: {}",
						action.getErrorMessage());
			}
		}

		if (bundleJobInfo.getStatus() == Job.Status.RUNNING) {
			// Wait three times to see the running state is stable..then it
			// is fine.
			// Job will keep running even if hive action fails.
			if (i == retries) {
				LOG.info("Bundle Job in running state! "
						+ bundleJobInfo.getStatus());
				break;
			} else {
				continue;
			}
		} else if (bundleJobInfo.getStatus() == Job.Status.PREMATER
				|| bundleJobInfo.getStatus() == Job.Status.PREP) {
			// still preparing.
			continue;
		} else {
			throw new RuntimeException(
					"Error occured while running customer top queries bundle job! "
							+ bundleJobInfo.getStatus());
		}
	}
}
 
Example #13
Source File: OozieLocalServerIntegrationTest.java    From hadoop-mini-clusters with Apache License 2.0 4 votes vote down vote up
@Test
public void testSubmitCoordinator() throws Exception {

    LOG.info("OOZIE: Test Submit Coordinator Start");

    FileSystem hdfsFs = hdfsLocalCluster.getHdfsFileSystemHandle();
    OozieClient oozie = oozieLocalServer.getOozieCoordClient();

    Path appPath = new Path(hdfsFs.getHomeDirectory(), "testApp");
    hdfsFs.mkdirs(new Path(appPath, "lib"));
    Path workflow = new Path(appPath, "workflow.xml");
    Path coordinator = new Path(appPath, "coordinator.xml");

    //write workflow.xml
    String wfApp =
            "<workflow-app xmlns='uri:oozie:workflow:0.1' name='test-wf'>" +
                    "    <start to='end'/>" +
                    "    <end name='end'/>" +
                    "</workflow-app>";

    String coordApp =
            "<coordinator-app timezone='UTC' end='2016-07-26T02:26Z' start='2016-07-26T01:26Z' frequency='${coord:hours(1)}' name='test-coordinator' xmlns='uri:oozie:coordinator:0.4'>" +
                    "    <action>" +
                    "        <workflow>" +
                    "            <app-path>" + workflow.toString() + "</app-path>" +
                    "        </workflow>" +
                    "    </action>" +
                    "</coordinator-app>";

    Writer writer = new OutputStreamWriter(hdfsFs.create(workflow));
    writer.write(wfApp);
    writer.close();

    Writer coordWriter = new OutputStreamWriter(hdfsFs.create(coordinator));
    coordWriter.write(coordApp);
    coordWriter.close();

    //write job.properties
    Properties conf = oozie.createConfiguration();
    conf.setProperty(OozieClient.COORDINATOR_APP_PATH, coordinator.toString());
    conf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName());

    //submit and check
    final String jobId = oozie.submit(conf);
    CoordinatorJob coord  = oozie.getCoordJobInfo(jobId);
    assertNotNull(coord);
    assertEquals(Job.Status.PREP, coord.getStatus());

    LOG.info("OOZIE: Coordinator: {}", coord.toString());
    hdfsFs.close();
}
 
Example #14
Source File: OccurrenceDownloadServiceModule.java    From occurrence with Apache License 2.0 4 votes vote down vote up
@Provides
@Singleton
OozieClient providesOozieClient(@Named("oozie.url") String url) {
  return new OozieClient(url);
}
 
Example #15
Source File: OozieLocalServerIntegrationTest.java    From hadoop-mini-clusters with Apache License 2.0 4 votes vote down vote up
@Test
public void testSubmitWorkflow() throws Exception {

    LOG.info("OOZIE: Test Submit Workflow Start");

    FileSystem hdfsFs = hdfsLocalCluster.getHdfsFileSystemHandle();
    OozieClient oozie = oozieLocalServer.getOozieClient();

    Path appPath = new Path(hdfsFs.getHomeDirectory(), "testApp");
    hdfsFs.mkdirs(new Path(appPath, "lib"));
    Path workflow = new Path(appPath, "workflow.xml");

    // Setup input directory and file
    hdfsFs.mkdirs(new Path(TEST_INPUT_DIR));
    hdfsFs.copyFromLocalFile(
            new Path(getClass().getClassLoader().getResource(TEST_INPUT_FILE).toURI()), new Path(TEST_INPUT_DIR));

    //write workflow.xml
    String wfApp = "<workflow-app name=\"sugar-option-decision\" xmlns=\"uri:oozie:workflow:0.5\">\n" +
            "  <global>\n" +
            "    <job-tracker>${jobTracker}</job-tracker>\n" +
            "    <name-node>${nameNode}</name-node>\n" +
            "    <configuration>\n" +
            "      <property>\n" +
            "        <name>mapreduce.output.fileoutputformat.outputdir</name>\n" +
            "        <value>" + TEST_OUTPUT_DIR + "</value>\n" +
            "      </property>\n" +
            "      <property>\n" +
            "        <name>mapreduce.input.fileinputformat.inputdir</name>\n" +
            "        <value>" + TEST_INPUT_DIR + "</value>\n" +
            "      </property>\n" +
            "    </configuration>\n" +
            "  </global>\n" +
            "  <start to=\"first\"/>\n" +
            "  <action name=\"first\">\n" +
            "    <map-reduce> <prepare><delete path=\"" + TEST_OUTPUT_DIR + "\"/></prepare></map-reduce>\n" +
            "    <ok to=\"decision-second-option\"/>\n" +
            "    <error to=\"kill\"/>\n" +
            "  </action>\n" +
            "  <decision name=\"decision-second-option\">\n" +
            "    <switch>\n" +
            "      <case to=\"option\">${doOption}</case>\n" +
            "      <default to=\"second\"/>\n" +
            "    </switch>\n" +
            "  </decision>\n" +
            "  <action name=\"option\">\n" +
            "    <map-reduce> <prepare><delete path=\"" + TEST_OUTPUT_DIR + "\"/></prepare></map-reduce>\n" +
            "    <ok to=\"second\"/>\n" +
            "    <error to=\"kill\"/>\n" +
            "  </action>\n" +
            "  <action name=\"second\">\n" +
            "    <map-reduce> <prepare><delete path=\"" + TEST_OUTPUT_DIR + "\"/></prepare></map-reduce>\n" +
            "    <ok to=\"end\"/>\n" +
            "    <error to=\"kill\"/>\n" +
            "  </action>\n" +
            "  <kill name=\"kill\">\n" +
            "    <message>\n" +
            "      Failed to workflow, error message[${wf: errorMessage (wf: lastErrorNode ())}]\n" +
            "    </message>\n" +
            "  </kill>\n" +
            "  <end name=\"end\"/>\n" +
            "</workflow-app>";

    Writer writer = new OutputStreamWriter(hdfsFs.create(workflow));
    writer.write(wfApp);
    writer.close();

    //write job.properties
    Properties conf = oozie.createConfiguration();
    conf.setProperty(OozieClient.APP_PATH, workflow.toString());
    conf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName());
    conf.setProperty("nameNode", "hdfs://localhost:" + hdfsLocalCluster.getHdfsNamenodePort());
    conf.setProperty("jobTracker", mrLocalCluster.getResourceManagerAddress());
    conf.setProperty("doOption", "true");

    //submit and check
    final String jobId = oozie.run(conf);
    WorkflowJob wf = oozie.getJobInfo(jobId);
    assertNotNull(wf);
    assertEquals(WorkflowJob.Status.RUNNING, wf.getStatus());


    while(true){
        Thread.sleep(1000);
        wf = oozie.getJobInfo(jobId);
        if(wf.getStatus() == WorkflowJob.Status.FAILED || wf.getStatus() == WorkflowJob.Status.KILLED || wf.getStatus() == WorkflowJob.Status.PREP || wf.getStatus() == WorkflowJob.Status.SUCCEEDED){
            break;
        }
    }

    wf = oozie.getJobInfo(jobId);
    assertEquals(WorkflowJob.Status.SUCCEEDED, wf.getStatus());

    LOG.info("OOZIE: Workflow: {}", wf.toString());
    hdfsFs.close();

}
 
Example #16
Source File: OozieLocalServer.java    From hadoop-mini-clusters with Apache License 2.0 4 votes vote down vote up
public OozieClient getOozieCoordClient() {
    return oozieCoordClient;
}
 
Example #17
Source File: OozieLocalServer.java    From hadoop-mini-clusters with Apache License 2.0 4 votes vote down vote up
public OozieClient getOozieClient() {
    return oozieClient;
}