Java Code Examples for org.apache.hadoop.fs.permission.PermissionStatus#read()

The following examples show how to use org.apache.hadoop.fs.permission.PermissionStatus#read() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();
  
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }
  
  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();
  
  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
 
Example 2
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();
  
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }
  
  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  
  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
 
Example 3
Source File: FSImageFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();
  
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asFile();
  }
  
  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  final long accessTime = in.readLong();
  
  final short replication = namesystem.getBlockManager().adjustReplication(
      in.readShort());
  final long preferredBlockSize = in.readLong();

  return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
      accessTime, replication, preferredBlockSize, (byte) 0, null);
}
 
Example 4
Source File: FSImageFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
    throws IOException {
  final int layoutVersion = getLayoutVersion();
  
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
    return loadINodeWithLocalName(true, in, false).asDirectory();
  }
  
  final byte[] name = FSImageSerialization.readLocalName(in);
  final PermissionStatus permissions = PermissionStatus.read(in);
  final long modificationTime = in.readLong();
  
  // Read quotas: quota by storage type does not need to be processed below.
  // It is handled only in protobuf based FsImagePBINode class for newer
  // fsImages. Tools using this class such as legacy-mode of offline image viewer
  // should only load legacy FSImages without newer features.
  final long nsQuota = in.readLong();
  final long dsQuota = in.readLong();

  return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
      name, permissions, null, modificationTime, null)
    : new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
        null, modificationTime, nsQuota, dsQuota, null, null);
}
 
Example 5
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 4) {
      throw new IOException("Incorrect data format. "
          + "symlink operation.");
    }
  }
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
    this.inodeId = FSImageSerialization.readLong(in);
  } else {
    // This id should be updated when the editLogOp is applied
    this.inodeId = INodeId.GRANDFATHER_INODE_ID;
  }
  this.path = FSImageSerialization.readString(in);
  this.value = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
  this.permissionStatus = PermissionStatus.read(in);
  
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
 
Example 6
Source File: FSEditLogOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
    if (this.length != 4) {
      throw new IOException("Incorrect data format. "
          + "symlink operation.");
    }
  }
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
    this.inodeId = FSImageSerialization.readLong(in);
  } else {
    // This id should be updated when the editLogOp is applied
    this.inodeId = INodeId.GRANDFATHER_INODE_ID;
  }
  this.path = FSImageSerialization.readString(in);
  this.value = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.mtime = FSImageSerialization.readLong(in);
    this.atime = FSImageSerialization.readLong(in);
  } else {
    this.mtime = readLong(in);
    this.atime = readLong(in);
  }
  this.permissionStatus = PermissionStatus.read(in);
  
  // read RPC ids if necessary
  readRpcIds(in, logVersion);
}
 
Example 7
Source File: FSImageSerialization.java    From RDFS with Apache License 2.0 5 votes vote down vote up
static INodeFileUnderConstruction readINodeUnderConstruction(
                          DataInputStream in) throws IOException {
  byte[] name = readBytes(in);
  String path = DFSUtil.bytes2String(name);
  short blockReplication = in.readShort();
  long modificationTime = in.readLong();
  long preferredBlockSize = in.readLong();
  int numBlocks = in.readInt();
  BlockInfo[] blocks = new BlockInfo[numBlocks];
  Block blk = new Block();
  for (int i = 0; i < numBlocks; i++) {
    blk.readFields(in);
    blocks[i] = new BlockInfo(blk, blockReplication);
  }
  PermissionStatus perm = PermissionStatus.read(in);
  String clientName = readString(in);
  String clientMachine = readString(in);

  // These locations are not used at all
  int numLocs = in.readInt();
  DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
  for (int i = 0; i < numLocs; i++) {
    locations[i] = new DatanodeDescriptor();
    locations[i].readFields(in);
  }

  return new INodeFileUnderConstruction(name, 
                                        blockReplication, 
                                        modificationTime,
                                        preferredBlockSize,
                                        blocks,
                                        perm,
                                        clientName,
                                        clientMachine,
                                        null);
}
 
Example 8
Source File: FSEditLogOp.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {

  this.length = in.readInt();
  if (-17 < logVersion && length != 2 ||
      logVersion <= -17 && length != 3) {
    throw new IOException("Incorrect data format. "
                          + "Mkdir operation.");
  }
  this.path = FSImageSerialization.readString(in);
  this.timestamp = readLong(in);

  // The disk format stores atimes for directories as well.
  // However, currently this is not being updated/used because of
  // performance reasons.
  if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
    /* unused this.atime = */
    readLong(in);
   }

  if (logVersion <= -11) {
    this.permissions = PermissionStatus.read(in);
  } else {
    this.permissions = null;
  }
}
 
Example 9
Source File: FSImage.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
static INodeFileUnderConstruction readINodeUnderConstruction(
                          DataInputStream in) throws IOException {
  byte[] name = readBytes(in);
  short blockReplication = in.readShort();
  long modificationTime = in.readLong();
  long preferredBlockSize = in.readLong();
  int numBlocks = in.readInt();
  BlockInfo[] blocks = new BlockInfo[numBlocks];
  Block blk = new Block();
  for (int i = 0; i < numBlocks; i++) {
    blk.readFields(in);
    blocks[i] = new BlockInfo(blk, blockReplication);
  }
  PermissionStatus perm = PermissionStatus.read(in);
  String clientName = readString(in);
  String clientMachine = readString(in);

  // These locations are not used at all
  int numLocs = in.readInt();
  DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
  for (int i = 0; i < numLocs; i++) {
    locations[i] = new DatanodeDescriptor();
    locations[i].readFields(in);
  }

  return new INodeFileUnderConstruction(name, 
                                        blockReplication, 
                                        modificationTime,
                                        preferredBlockSize,
                                        blocks,
                                        perm,
                                        clientName,
                                        clientMachine,
                                        null);
}
 
Example 10
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
  }
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
    this.inodeId = in.readLong();
  } else {
    // The inodeId should be updated when this editLogOp is applied
    this.inodeId = INodeId.GRANDFATHER_INODE_ID;
  }
  if ((-17 < logVersion && length != 4) ||
      (logVersion <= -17 && length != 5 && !NameNodeLayoutVersion.supports(
          LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
    throw new IOException("Incorrect data format."  +
                          " logVersion is " + logVersion +
                          " but writables.length is " +
                          length + ". ");
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.replication = FSImageSerialization.readShort(in);
    this.mtime = FSImageSerialization.readLong(in);
  } else {
    this.replication = readShort(in);
    this.mtime = readLong(in);
  }

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
      this.atime = FSImageSerialization.readLong(in);
    } else {
      this.atime = readLong(in);
    }
  } else {
    this.atime = 0;
  }

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.blockSize = FSImageSerialization.readLong(in);
  } else {
    this.blockSize = readLong(in);
  }

  this.blocks = readBlocks(in, logVersion);
  this.permissions = PermissionStatus.read(in);

  if (this.opCode == OP_ADD) {
    aclEntries = AclEditLogUtil.read(in, logVersion);
    this.xAttrs = readXAttrsFromEditLog(in, logVersion);
    this.clientName = FSImageSerialization.readString(in);
    this.clientMachine = FSImageSerialization.readString(in);
    if (NameNodeLayoutVersion.supports(
        NameNodeLayoutVersion.Feature.CREATE_OVERWRITE, logVersion)) {
      this.overwrite = FSImageSerialization.readBoolean(in);
    } else {
      this.overwrite = false;
    }
    if (NameNodeLayoutVersion.supports(
        NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
      this.storagePolicyId = FSImageSerialization.readByte(in);
    } else {
      this.storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED;
    }
    // read clientId and callId
    readRpcIds(in, logVersion);
  } else {
    this.clientName = "";
    this.clientMachine = "";
  }
}
 
Example 11
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
  }
  if (-17 < logVersion && length != 2 ||
      logVersion <= -17 && length != 3
      && !NameNodeLayoutVersion.supports(
          LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    throw new IOException("Incorrect data format. Mkdir operation.");
  }
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
    this.inodeId = FSImageSerialization.readLong(in);
  } else {
    // This id should be updated when this editLogOp is applied
    this.inodeId = INodeId.GRANDFATHER_INODE_ID;
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // The disk format stores atimes for directories as well.
  // However, currently this is not being updated/used because of
  // performance reasons.
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
      FSImageSerialization.readLong(in);
    } else {
      readLong(in);
    }
  }

  this.permissions = PermissionStatus.read(in);
  aclEntries = AclEditLogUtil.read(in, logVersion);

  xAttrs = readXAttrsFromEditLog(in, logVersion);
}
 
Example 12
Source File: FSEditLogOp.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
  }
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
    this.inodeId = in.readLong();
  } else {
    // The inodeId should be updated when this editLogOp is applied
    this.inodeId = INodeId.GRANDFATHER_INODE_ID;
  }
  if ((-17 < logVersion && length != 4) ||
      (logVersion <= -17 && length != 5 && !NameNodeLayoutVersion.supports(
          LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
    throw new IOException("Incorrect data format."  +
                          " logVersion is " + logVersion +
                          " but writables.length is " +
                          length + ". ");
  }
  this.path = FSImageSerialization.readString(in);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.replication = FSImageSerialization.readShort(in);
    this.mtime = FSImageSerialization.readLong(in);
  } else {
    this.replication = readShort(in);
    this.mtime = readLong(in);
  }

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
      this.atime = FSImageSerialization.readLong(in);
    } else {
      this.atime = readLong(in);
    }
  } else {
    this.atime = 0;
  }

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.blockSize = FSImageSerialization.readLong(in);
  } else {
    this.blockSize = readLong(in);
  }

  this.blocks = readBlocks(in, logVersion);
  this.permissions = PermissionStatus.read(in);

  if (this.opCode == OP_ADD) {
    aclEntries = AclEditLogUtil.read(in, logVersion);
    this.xAttrs = readXAttrsFromEditLog(in, logVersion);
    this.clientName = FSImageSerialization.readString(in);
    this.clientMachine = FSImageSerialization.readString(in);
    if (NameNodeLayoutVersion.supports(
        NameNodeLayoutVersion.Feature.CREATE_OVERWRITE, logVersion)) {
      this.overwrite = FSImageSerialization.readBoolean(in);
    } else {
      this.overwrite = false;
    }
    if (NameNodeLayoutVersion.supports(
        NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
      this.storagePolicyId = FSImageSerialization.readByte(in);
    } else {
      this.storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED;
    }
    // read clientId and callId
    readRpcIds(in, logVersion);
  } else {
    this.clientName = "";
    this.clientMachine = "";
  }
}
 
Example 13
Source File: FSEditLogOp.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
  if (!NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.length = in.readInt();
  }
  if (-17 < logVersion && length != 2 ||
      logVersion <= -17 && length != 3
      && !NameNodeLayoutVersion.supports(
          LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    throw new IOException("Incorrect data format. Mkdir operation.");
  }
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
    this.inodeId = FSImageSerialization.readLong(in);
  } else {
    // This id should be updated when this editLogOp is applied
    this.inodeId = INodeId.GRANDFATHER_INODE_ID;
  }
  this.path = FSImageSerialization.readString(in);
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
    this.timestamp = FSImageSerialization.readLong(in);
  } else {
    this.timestamp = readLong(in);
  }

  // The disk format stores atimes for directories as well.
  // However, currently this is not being updated/used because of
  // performance reasons.
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
    if (NameNodeLayoutVersion.supports(
        LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
      FSImageSerialization.readLong(in);
    } else {
      readLong(in);
    }
  }

  this.permissions = PermissionStatus.read(in);
  aclEntries = AclEditLogUtil.read(in, logVersion);

  xAttrs = readXAttrsFromEditLog(in, logVersion);
}
 
Example 14
Source File: FSEditLogOp.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion)
    throws IOException {
  // versions > 0 support per file replication
  // get name and replication
  this.length = in.readInt();
  if (-7 == logVersion && length != 3||
      -17 < logVersion && logVersion < -7 && length != 4 ||
      (logVersion <= -17 && length != 5)) {
    throw new IOException("Incorrect data format."  +
                          " logVersion is " + logVersion +
                          " but writables.length is " +
                          length + ". ");
  }
  this.path = FSImageSerialization.readString(in);

  this.replication = readShort(in);
  this.mtime = readLong(in);

  if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
    this.atime = readLong(in);
  } else {
    this.atime = 0;
  }
  if (logVersion < -7) {
    this.blockSize = readLong(in);
  } else {
    this.blockSize = 0;
  }

  // get blocks
  this.blocks = readBlocks(in, logVersion);

  if (logVersion <= -11) {
    this.permissions = PermissionStatus.read(in);
  } else {
    this.permissions = null;
  }

  // clientname, clientMachine and block locations of last block.
  if (this.opCode == OP_ADD && logVersion <= -12) {
    this.clientName = FSImageSerialization.readString(in);
    this.clientMachine = FSImageSerialization.readString(in);
    if (-13 <= logVersion) {
      readDatanodeDescriptorArray(in);
    }
  } else {
    this.clientName = "";
    this.clientMachine = "";
  }
}
 
Example 15
Source File: FSImageFormat.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * load an inode from fsimage except for its name
 * 
 * @param in data input stream from which image is read
 * @return an inode
 */
private INode loadINode(DataInputStream in)
    throws IOException {
  long modificationTime = 0;
  long atime = 0;
  long blockSize = 0;
  
  int imgVersion = getLayoutVersion();
  short replication = in.readShort();
  replication = namesystem.adjustReplication(replication);
  modificationTime = in.readLong();
  if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
    atime = in.readLong();
  }
  if (imgVersion <= -8) {
    blockSize = in.readLong();
  }
  int numBlocks = in.readInt();
  BlockInfo blocks[] = null;

  // for older versions, a blocklist of size 0
  // indicates a directory.
  if ((-9 <= imgVersion && numBlocks > 0) ||
      (imgVersion < -9 && numBlocks >= 0)) {
    blocks = new BlockInfo[numBlocks];
    for (int j = 0; j < numBlocks; j++) {
      blocks[j] = new BlockInfo(replication);
      if (-14 < imgVersion) {
        blocks[j].set(in.readLong(), in.readLong(), 
                      Block.GRANDFATHER_GENERATION_STAMP);
      } else {
        blocks[j].readFields(in);
      }
    }
  }
  // Older versions of HDFS does not store the block size in inode.
  // If the file has more than one block, use the size of the 
  // first block as the blocksize. Otherwise use the default block size.
  //
  if (-8 <= imgVersion && blockSize == 0) {
    if (numBlocks > 1) {
      blockSize = blocks[0].getNumBytes();
    } else {
      long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
      blockSize = Math.max(namesystem.getDefaultBlockSize(), first);
    }
  }
  
  // get quota only when the node is a directory
  long nsQuota = -1L;
  if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
      && blocks == null) {
    nsQuota = in.readLong();
  }
  long dsQuota = -1L;
  if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
      && blocks == null) {
    dsQuota = in.readLong();
  }

  PermissionStatus permissions = namesystem.getUpgradePermission();
  if (imgVersion <= -11) {
    permissions = PermissionStatus.read(in);
  }

  return INode.newINode(permissions, blocks, replication,
      modificationTime, atime, nsQuota, dsQuota, blockSize);
  }