/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.namenode;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import javax.security.auth.login.LoginException;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedBlockInfo;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;

/**
 * Main class for a series of name-node benchmarks.
 * 
 * Each benchmark measures throughput and average execution time 
 * of a specific name-node operation, e.g. file creation or block reports.
 * 
 * The benchmark does not involve any other hadoop components
 * except for the name-node. Each operation is executed
 * by calling directly the respective name-node method.
 * The name-node here is real all other components are simulated.
 * 
 * Command line arguments for the benchmark include:<br>
 * 1) total number of operations to be performed,<br>
 * 2) number of threads to run these operations,<br>
 * 3) followed by operation specific input parameters.
 * 
 * Then the benchmark generates inputs for each thread so that the
 * input generation overhead does not effect the resulting statistics.
 * The number of operations performed by threads practically is the same. 
 * Precisely, the difference between the number of operations 
 * performed by any two threads does not exceed 1.
 * 
 * Then the benchmark executes the specified number of operations using 
 * the specified number of threads and outputs the resulting stats.
 */
public class NNThroughputBenchmark {
  private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
  private static final int BLOCK_SIZE = 16;

  static Configuration config;
  static NameNode nameNode;

  private final UserGroupInformation ugi;

  NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
    config = conf;
    ugi = UnixUserGroupInformation.login(config);
    UserGroupInformation.setCurrentUser(ugi);

    // We do not need many handlers, since each thread simulates a handler
    // by calling name-node methods directly
    config.setInt("dfs.namenode.handler.count", 1);
    // set exclude file
    config.set("dfs.hosts.exclude", "${hadoop.tmp.dir}/dfs/hosts/exclude");
    File excludeFile = new File(config.get("dfs.hosts.exclude", "exclude"));
    if(! excludeFile.exists()) {
      if(!excludeFile.getParentFile().mkdirs())
        throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile);
    }
    new FileOutputStream(excludeFile).close();
    // Start the NameNode
    String[] argv = new String[] {};
    nameNode = NameNode.createNameNode(argv, config);
  }

  void close() throws IOException {
    nameNode.stop();
  }

  static void turnOffNameNodeLogging() {
    // change log level to ERROR: NameNode.LOG & NameNode.stateChangeLog
    ((Log4JLogger)NameNode.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger)NetworkTopology.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.ERROR);
    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ERROR);
  }

  /**
   * Base class for collecting operation statistics.
   * 
   * Overload this class in order to run statistics for a 
   * specific name-node operation.
   */
  abstract class OperationStatsBase {
    protected static final String BASE_DIR_NAME = "/nnThroughputBenchmark";
    protected static final String OP_ALL_NAME = "all";
    protected static final String OP_ALL_USAGE = "-op all " +
                                  "<other ops options> [-keepResults]";

    protected String baseDir;
    protected short replication;
    protected int  numThreads = 0;        // number