/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.hadoop.mapred;

import java.io.File;
import java.io.IOException;
import java.net.URI;

import junit.framework.TestCase;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;

/**
 * A JUnit test to test Map-Reduce empty jobs.
 */
public class TestEmptyJob extends TestCase {
  private static final Log LOG =
      LogFactory.getLog(TestEmptyJob.class.getName());

  private static String TEST_ROOT_DIR =
      new File(System.getProperty("test.build.data", "/tmp")).toURI()
          .toString().replace(' ', '+');

  MiniMRCluster mr = null;

  /** Committer with cleanup waiting on a signal
   */
  static class CommitterWithDelayCleanup extends FileOutputCommitter {
    @Override
    public void commitJob(JobContext context) throws IOException {
      Configuration conf = context.getConfiguration();
      Path share = new Path(conf.get("share"));
      FileSystem fs = FileSystem.get(conf);

      
      while (true) {
        if (fs.exists(share)) {
          break;
        }
        UtilsForTests.waitFor(100);
      }
      super.commitJob(context);
    }
  }

  /**
   * Simple method running a MapReduce job with no input data. Used to test that
   * such a job is successful.
   * 
   * @param fileSys
   * @param numMaps
   * @param numReduces
   * @return true if the MR job is successful, otherwise false
   * @throws IOException
   */
  private boolean launchEmptyJob(URI fileSys, int numMaps, int numReduces)
      throws IOException {
    // create an empty input dir
    final Path inDir = new Path(TEST_ROOT_DIR, "testing/empty/input");
    final Path outDir = new Path(TEST_ROOT_DIR, "testing/empty/output");
    final Path inDir2 = new Path(TEST_ROOT_DIR, "testing/dummy/input");
    final Path outDir2 = new Path(TEST_ROOT_DIR, "testing/dummy/output");
    final Path share = new Path(TEST_ROOT_DIR, "share");

    JobConf conf = mr.createJobConf();
    FileSystem fs = FileSystem.get(fileSys, conf);
    fs.delete(new Path(TEST_ROOT_DIR), true);
    fs.delete(outDir, true);
    if (!fs.mkdirs(inDir)) {
      LOG.warn("Can't create " + inDir);
      return false;
    }

    // use WordCount example
    FileSystem.setDefaultUri(conf, fileSys);
    conf.setJobName("empty");
    // use an InputFormat which returns no split
    conf.setInputFormat(EmptyInputFormat.class);
    conf.setOutputCommitter(CommitterWithDelayCleanup.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(IntWritable.class);
    conf.setMapperClass(IdentityMapper.class);
    conf.setReducerClass(IdentityReducer.class);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setNumMapTasks(numMaps);
    conf.setNumReduceTasks(numReduces);
    conf.set("share", share.toString());

    // run job and wait for completion
    JobClient jc = new JobClient(conf);
    RunningJob runningJob = jc.submitJob(conf);
    JobInProgress job = mr.getJobTrackerRunner().getJobTracker().getJob(runningJob.getID());
    
    while (true) {
      if (job.isCleanupLaunched()) {
        LOG.info("Waiting for cleanup to be launched for job " 
                 + runningJob.getID());
        break;
      }
      UtilsForTests.waitFor(100);
    }
    
    // submit another job so that the map load increases and scheduling happens
    LOG.info("Launching dummy job ");
    RunningJob dJob = null;
    try {
      JobConf dConf = new JobConf(conf);
      dConf.setOutputCommitter(FileOutputCommitter.class);
      dJob = UtilsForTests.runJob(dConf, inDir2, outDir2, 2, 0);
    } catch (Exception e) {
      LOG.info("Exception ", e);
      throw new IOException(e);
    }
    
    while (true) {
      LOG.info("Waiting for job " + dJob.getID() + " to complete");
      try {
        Thread.sleep(100);
      } catch (InterruptedException e) {
      }
      if (dJob.isComplete()) {
        break;
      }
    }
    
    // check if the second job is successful
    assertTrue(dJob.isSuccessful());

    // signal the cleanup
    fs.create(share).close();
    
    while (true) {
      LOG.info("Waiting for job " + runningJob.getID() + " to complete");
      try {
        Thread.sleep(100);
      } catch (InterruptedException e) {
      }
      if (runningJob.isComplete()) {
        break;
      }
    }

    assertTrue(runningJob.isComplete());
    assertTrue(runningJob.isSuccessful());
    JobID jobID = runningJob.getID();

    TaskReport[] jobSetupTasks = jc.getSetupTaskReports(jobID);
    assertTrue("Number of job-setup tips is not 2!", jobSetupTasks.length == 2);
    assertTrue("Setup progress is " + runningJob.setupProgress()
        + " and not 1.0", runningJob.setupProgres