/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.tools;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;

import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.LeaseRenewal;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlockWithMetaInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlocksWithMetaInfo;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.ipc.ProtocolProxy;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.security.UnixUserGroupInformation;

/**
 * There is a need to perform fast file copy on HDFS (primarily for the purpose
 * of HBase Snapshot). The fast copy mechanism for a file works as follows :
 *
 * 1) Query metadata for all blocks of the source file.
 *
 * 2) For each block 'b' of the file, find out its datanode locations.
 *
 * 3) For each block of the file, add an empty block to the namesystem for the
 * destination file.
 *
 * 4) For each location of the block, instruct the datanode to make a local copy
 * of that block.
 *
 * 5) Once each datanode has copied over the its respective blocks, they report
 * to the namenode about it.
 *
 * 6) Wait for all blocks to be copied and exit.
 *
 * This would speed up the copying process considerably by removing top of the
 * rack data transfers.
 **/

public class FastCopy {

  private static final Log LOG = LogFactory.getLog(FastCopy.class);
  protected final Configuration conf;

  private final Random random = new Random();
  private String clientName;
  public static int THREAD_POOL_SIZE = 5;
  private final ExecutorService executor;
  // Map used to store the status of each block.
  private final Map<Block, BlockStatus> blockStatusMap =
    new ConcurrentHashMap<Block, BlockStatus>();
  // Map used to store the datanode errors.
  private final Map<DatanodeInfo, Integer> datanodeErrors =
    new ConcurrentHashMap<DatanodeInfo, Integer>();
  private final Map <String, ClientDatanodeProtocol> datanodeMap =
    new ConcurrentHashMap<String, ClientDatanodeProtocol>();
  // Maximum time to wait for a file copy to complete.
  public final long MAX_WAIT_TIME;
  public static final long WAIT_SLEEP_TIME = 5000; // 5 seconds
  // Map used to store the file status of each file being copied by this tool.
  private final Map<String, FastCopyFileStatus> fileStatusMap =
    new ConcurrentHashMap<String, FastCopyFileStatus>();
  private final int maxDatanodeErrors;
  private final int rpcTimeout;
  private final short minReplication;
  // The time for which to wait for a block to be reported to the namenode.
  private final long BLK_WAIT_TIME;

  private DistributedFileSystem srcFileSystem = null;
  private DistributedFileSystem dstFileSystem = null;

  public FastCopy() throws Exception {
    this(new Configuration());
  }

  public FastCopy(Configuration conf) throws Exception {
    this(conf, THREAD_POOL_SIZE);
  }

  public FastCopy(Configuration conf, DistributedFileSystem srcFileSystem,
      DistributedFileSystem dstFileSystem) throws Exception {
    this(conf, THREAD_POOL_SIZE);
    this.srcFileSystem = srcFileSystem;
    this.dstFileSystem = dstFileSystem;
  }

  public FastCopy(Configuration conf, int threadPoolSize) throws Exception {
    this.conf = conf;
    this.executor = Executors.newFixedThreadPool(threadPoolSize);
    this.clientName = "FastCopy" + random.nextInt();
    MAX_WAIT_TIME = conf.getInt("dfs.fastcopy.file.wait_time",
        5 * 60 * 1000); // 5 minutes default

    BLK_WAIT_TIME = conf.getInt("dfs.fastcopy.block.wait_time",
        5 * 60 * 1000); // 5 minutes.
    minReplication = (short) conf.getInt("dfs.repli