/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.hadoop.hdfs;

import java.io.IOException;
import java.util.Date;
import java.io.DataInputStream;
import java.io.FileOutputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.io.File;
import java.io.BufferedReader;
import java.util.StringTokenizer;
import java.net.InetAddress;
import java.text.SimpleDateFormat;
import java.util.Iterator;

import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile;

import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reducer;

import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

/**
 * This program executes a specified operation that applies load to 
 * the NameNode.
 * 
 * When run simultaneously on multiple nodes, this program functions 
 * as a stress-test and benchmark for namenode, especially when 
 * the number of bytes written to each file is small.
 * 
 * Valid operations are:
 *   create_write
 *   open_read
 *   rename
 *   delete
 * 
 * NOTE: The open_read, rename and delete operations assume that the files
 *       they operate on are already available. The create_write operation 
 *       must be run before running the other operations.
 */

public class NNBench extends Configured implements Tool {
  private static final Log LOG = LogFactory.getLog(
          "org.apache.hadoop.hdfs.NNBench");
  
  protected static String CONTROL_DIR_NAME = "control";
  protected static String OUTPUT_DIR_NAME = "output";
  protected static String DATA_DIR_NAME = "data";
  protected static final String DEFAULT_RES_FILE_NAME = "NNBench_results.log";
  protected static final String NNBENCH_VERSION = "NameNode Benchmark 0.4";
  
  public static String operation = "none";
  public static long numberOfMaps = 1l; // default is 1
  public static long numberOfReduces = 1l; // default is 1
  public static long startTime = 
          System.currentTimeMillis() + (120 * 1000); // default is 'now' + 2min
  public static long blockSize = 1l; // default is 1
  public static int bytesToWrite = 0; // default is 0
  public static long bytesPerChecksum = 1l; // default is 1
  public static long numberOfFiles = 1l; // default is 1
  public static short replicationFactorPerFile = 1; // default is 1
  public static String baseDir = "/benchmarks/NNBench";  // default
  public static boolean readFileAfterOpen = false; // default is to not read
  
  // Supported operations
  private static final String OP_CREATE_WRITE = "create_write";
  private static final String OP_OPEN_READ = "open_read";
  private static final String OP_RENAME = "rename";
  private static final String OP_DELETE = "delete";
  
  // To display in the format that matches the NN and DN log format
  // Example: 2007-10-26 00:01:19,853
  static SimpleDateFormat sdf = 
          new SimpleDateFormat("yyyy-MM-dd' 'HH:mm:ss','S");

  // private static Configuration config = new Configuration();
  
  /**
   * Clean up the files before a test run
   * 
   * @throws IOException on error
   */
  private static void cleanupBeforeTestrun(
    Configuration config
  ) throws IOException {

    FileSystem tempFS = FileSystem.get(config);
    
    // Delete the data directory only if it is the create/write operation
    if (operation.equals(OP_CREATE_WRITE)) {
      LOG.info("Deleting data directory");
      tempFS.delete(new Path(baseDir, DATA_DIR_NAME), true);
    }
    tempFS.delete(new Path(baseDir, CONTROL_DIR_NAME), true);
    tempFS.delete(new Path(baseDir, OUTPUT_DIR_NAME), true);
  }
  
  /**
   * Create control files before a test run.
   * Number of files created is equal to the number of maps specified
   * 
   * @throws IOException on error
   */
  private static void createControlFiles(
    Configuration config
  ) throws IOException {

    FileSystem tempFS = FileSystem.get(config);
    LOG.info("Creating " + numberOfMaps + " control files");

    for (int i = 0; i < numberOfMaps; i++) {
      String strFileName = "NNBench_Controlfile_" + i;
      Path filePath = new Path(new Path(baseDir, CONTROL_DIR_NAME),
              strFileName);

      SequenceFile.Writer writer = null;
      try {
        writer = SequenceFile.createWriter(tempFS, config, filePath, Text.class, 
                LongWritable.class, CompressionType.NONE);
        writer.append(new Text(strFileName), new LongWritable(0l));
      } finally {
        if (writer != null) {
          writer.close();
        }