/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.hadoop.hdfs.server.datanode;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
import java.util.Collection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.io.IOUtils;

/** 
 * Data storage information file.
 * <p>
 * @see Storage
 */
public class DataStorage extends Storage {
  // Constants
  final static String BLOCK_SUBDIR_PREFIX = "subdir";
  final static String BLOCK_FILE_PREFIX = "blk_";
  final static String COPY_FILE_PREFIX = "dncp_";
  final static String STORAGE_DIR_DETACHED = "detach";
  public final static String STORAGE_DIR_TMP = "tmp";
  
  private final static String STORAGE_ID = "storageID";
  
  private String storageID;

  // flag to ensure initialzing storage occurs only once
  private boolean initialized = false;
  
  // NameSpaceStorage is map of <Name Space Id, NameSpaceStorage>
  private Map<Integer, NameSpaceSliceStorage> nsStorageMap
    = new HashMap<Integer, NameSpaceSliceStorage>();

  private final DataNode datanode;

  // Map of top level directory to layout version.
  Map<File, Integer> layoutMap = new HashMap<File, Integer>();

  DataStorage(DataNode datanode) {
    super(NodeType.DATA_NODE);
    storageID = "";
    this.datanode = datanode;
  }
  
  public DataStorage(StorageInfo storageInfo, String strgID, DataNode datanode) {
    super(NodeType.DATA_NODE, storageInfo);
    this.storageID = strgID;
    this.datanode = datanode;
  }

  public NameSpaceSliceStorage getNStorage(int namespaceId) {
    return nsStorageMap.get(namespaceId);
  }
  
  public String getStorageID() {
    return storageID;
  }
  
  void setStorageID(String newStorageID) {
    this.storageID = newStorageID;
  }

  synchronized void createStorageID(int datanodePort) {
    if (storageID != null && !storageID.isEmpty()) {
      return;
    }
    storageID = DataNode.createNewStorageId(datanodePort);
  }
  
  ArrayList<StorageDirectory> analyzeStorageDirs(NamespaceInfo nsInfo,
          Collection<File> dataDirs,
          StartupOption startOpt
          ) throws IOException {
    
    if (storageID == null)
      this.storageID = "";

    if (storageDirs == null) {
      this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
    } else {
      ((ArrayList<StorageDirectory>) storageDirs)
          .ensureCapacity(storageDirs.size() + dataDirs.size());
    }

    ArrayList<StorageDirectory> newDirs = new ArrayList<StorageDirectory>(
        dataDirs.size());
    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
      File dataDir = it.next();
      StorageDirectory sd = new StorageDirectory(dataDir);
      StorageState curState;
      try {
        curState = sd.analyzeStorage(startOpt);
        // sd is locked but not opened
        switch(curState) {
          case NORMAL:
            break;
          case NON_EXISTENT:
            // ignore this storage
            LOG.info("Storage directory " + dataDir + " does not exist.");
            it.remove();
            continue;
          case NOT_FORMATTED: // format
            LOG.info("Storage directory " + dataDir + " is not formatted.");
            if (!sd.isEmpty()) {
              LOG.error("Storage directory " + dataDir
                + " is not empty, and will not be formatted! Exiting.");
              throw new IOException(
                "Storage directory " + dataDir + " is not empty!");
            }
            LOG.info("Formatting ...");
            format(sd, nsInfo);
            break;
          default:  // recovery part is common
            sd.doRecover(curState);
        }
      } catch (IOException ioe) {
        try {
          sd.unlock();
        }
        catch (IOException e) {
          LOG.warn("Exception when unlocking storage directory", e);
        }
        LOG.warn("Ignoring storage directory " + dataDir, ioe);
        //continue with other good dirs
        continue;
      }
      //