/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.namenode;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyDefault;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.raid.RaidNode;
import org.apache.hadoop.raid.Codec;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.StringUtils;

/**
 * This BlockPlacementPolicy uses a simple heuristic, random placement of
 * the replicas of a newly-created block, for the purpose of spreading out the 
 * group of blocks which used by RAID for recovering each other. 
 * This is important for the availability of the blocks. 
 * 
 * Replication of an existing block continues to use the default placement
 * policy.
 * 
 * This simple block placement policy does not guarantee that
 * blocks on the RAID stripe are on different nodes. However, BlockMonitor
 * will periodically scans the raided files and will fix the placement
 * if it detects violation. 
 * 
 * This class can be used by multiple threads. It has to be thread safe.
 */
public class BlockPlacementPolicyRaid extends BlockPlacementPolicyDefault {
  public static final Log LOG =
    LogFactory.getLog(BlockPlacementPolicyRaid.class);
  Configuration conf;
  private FSNamesystem namesystem = null;

  private CachedLocatedBlocks cachedLocatedBlocks;
  protected CachedFullPathNames cachedFullPathNames;
  private org.apache.hadoop.raid.ConfigManager policyConfig = null;

  /** {@inheritDoc} */
  @Override
  public void initialize(Configuration conf,  FSClusterStats stats,
                         NetworkTopology clusterMap, HostsFileReader hostsReader,
                         DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem namesystem) {
    super.initialize(conf, stats, clusterMap, 
                     hostsReader, dnsToSwitchMapping, namesystem);
    this.conf = conf;
    this.namesystem = namesystem;
	this.cachedLocatedBlocks = new CachedLocatedBlocks(this.conf, namesystem);
    this.cachedFullPathNames = new CachedFullPathNames(this.conf, namesystem);
  }
  
	private void initPolicyConfig() {
		try {
			this.conf.addResource("raid-default.xml");
			this.conf.addResource("raid-site.xml");
			this.policyConfig = new org.apache.hadoop.raid.ConfigManager(
					this.conf);
		} catch (Exception e) {
			// TODO Auto-generated catch block
			LOG.error("Init policyConfig failed: " + e.getMessage());
			e.printStackTrace();
		} 
	}

  @Override
  public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas,
      DatanodeDescriptor writer, List<DatanodeDescriptor> chosenNodes,
      long blocksize) {
    return chooseTarget(srcPath, numOfReplicas, writer, chosenNodes, null, 
        blocksize);
  }
  
	@Override
	public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas,
			DatanodeDescriptor writer, List<DatanodeDescriptor> chosenNodes,
			List<Node> exlcNodes, long blocksize) {
		try {
			FileInfo info = getFileInfo(srcPath);
			if (LOG.isDebugEnabled()) {
				LOG.debug("FileType:" + srcPath + " " + info.type.name());
			}
			if (info.type == FileType.NOT_RAID) {
				return super.chooseTarget(srcPath, numOfReplicas, writer,
						chosenNodes, exlcNodes, blocksize);
			}

			ArrayList<DatanodeDescriptor> results = new ArrayList<DatanodeDescriptor>();
			HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
			if (exlcNodes != null) {
				for (Node node : exlcNodes) {
					excludedNodes.put(node, node);
				}
			}
			for (Node node : chosenNodes) {
				excludedNodes.put(node, node);
			}

			if (chosenNodes.size() == 0) {
				String src, parity;
				if (info.type == FileType.TO_RAID) {
					src = srcPath;
					parity = null;
				} else if (info.type == FileType.SOURCE) {
					src = srcPath;
					parity = getParityFile(info.codec, src);
				} else  if (info.type == FileType.TEMP_PARITY) {
					String tmpStr = srcPath.substring(info.codec.tmpParityDirectory.length() + 1);
					String str = tmpStr.substring(tmpStr.indexOf("/"));
					src = getSourceFile(str, info.codec.parityDirectory);
					parity = srcPath;
				} else {
					src = getSourceFile(srcPath, info);
					parity = srcPath;
				}

				List<LocatedBlock> srcBlocksList = cachedLocatedBlocks.get(src);
				LocatedBlock[] srcBlocks = srcBlocksList
						.toArray(new LocatedBlock[0]);
				List<LocatedBlock> parityBlocksList = null;
				LocatedBlock[] parityBlocks = null;
				if (parity != null) {
					parityBlocksList = cachedLocatedBlocks.get(parity);
					parityBlocks =