/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs;

import java.io.File;
import java.io.IOException;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.nio.channels.FileChannel;
import java.util.Random;
import java.io.RandomAccessFile;
import javax.security.auth.login.LoginException;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.NameSpaceSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.security.*;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.StringUtils;

/**
 * This class creates a single-process DFS cluster for junit testing.
 * The data directories for non-simulated DFS are under the testing directory.
 * For simulated data nodes, no underlying fs storage is used.
 */
public class MiniDFSCluster {
  static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
  public static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
  public static int currNSId = 0;
  
  private static final int PORT_START = 10000;
  // the next port that will be handed out (if it is free)
  private volatile static int nextPort = PORT_START;

  /**
   * Check whether a port is free.
   */ 
  static boolean isPortFree(int port) {
    ServerSocket socket = null;
    try {
      socket = new ServerSocket();
      socket.bind(new InetSocketAddress(port));
    } catch (IOException e) {
      return false;
    } finally {
      try {
        if (socket != null) {
          socket.close();
        }
      } catch (IOException ignore) {
        // do nothing
      }
    }
    return true;
  }

  /**
   * Get a free port.
   */
  public static int getFreePort() {
    return getFreePorts(1);
  }

  /**
   * Get the specified number of consecutive free ports.
   * @return the first free port of the range
   */
  public static int getFreePorts(int num) {
    int port = nextPort;

    boolean found = true;
    do {
      for (int i = port; i < port + num; i++) {
        if (!isPortFree(i)) {
          port = i + 1;
          found = false;
          break; // from for loop
        }
      }
    } while (!found);

    nextPort = port + num;
    LOG.info("using free port " + port + "(+" + (num - 1) + ")");
    return port;
  }

  public class DataNodeProperties {
    public DataNode datanode;
    Configuration conf;
    String[] dnArgs;

    DataNodeProperties(DataNode node, Configuration conf, String[] args) {
      this.datanode = node;
      this.conf = conf;
      this.dnArgs = args;
    }
  }
  
  boolean federation = false;
  private Configuration conf;
  private NameNodeInfo[] nameNodes;
  
  /**
   * Stores the information related to a namenode in the cluster
   */
  static class NameNodeInfo {
    final NameNode nameNode;
    final Configuration conf;
    NameNodeInfo(NameNode nn, Configuration conf) {
      this.nameNode = nn;
      this.conf = new Configuration(conf);
    }
  }

  //private Configuration conf;
  private int numDataNodes;
  private ArrayList<DataNodeProperties> dataNodes = 
                         new ArrayList<DataNodeProperties>();
  private File base_dir;
  private File data_dir;

  public final static String FINALIZED_DIR_NAME = "/current/finalized/";
  public final static String RBW_DIR_NAME = "/current/rbw/";
  public final static String CURRENT_DIR_NAME = "/current";
  public final static String DFS_CLUSTER_ID = "dfs.clsuter.id";

  // wait until namenode has left safe mode?
  private boolean waitSafeMode = true;  
  
  /**
   * This null constructor is used only when wishing to start a data node cluster
   * without a name node (ie when the name node is started elsewhere).
   */
  public MiniDFSCluster() {
    nameNodes = new NameNodeInfo[0]; // No namenode in the cluster
  }
  
  /**
   * Modify the config and start up the servers with the given operation.
   * Servers will be started on free ports.
   * <p>
   * The caller must manage the creation of NameNode and DataNode directories
   * and have already set dfs.name.dir and dfs.d