Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsFileStatus#getLen()

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsFileStatus#getLen() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Nfs3Utils.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
Example 2
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
Example 3
Source File: Nfs3Utils.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
    throws IOException {
  HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
  if (fstat == null) {
    return null;
  }

  long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
      .getLen();
  return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
      new NfsTime(fstat.getModificationTime()));
}
 
Example 4
Source File: WebHdfsFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
 
Example 5
Source File: TestJsonUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      new Path(f.getFullName(parent)));
}
 
Example 6
Source File: FileDataServlet.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 * @param conf 
 */
private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i,
    Configuration conf) throws IOException {
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
        getServletContext());
    return NamenodeJspHelper.getRandomDatanode(nn);
  }
  return JspHelper.bestNode(blks, conf);
}
 
Example 7
Source File: TestJsonUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      new Path(f.getFullName(parent)));
}
 
Example 8
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Convert an HdfsFileStatus and its block locations to a LocatedFileStatus
 * @param stat an HdfsFileStatus
 * @param locs the file's block locations
 * @param src parent path in string representation
 * @return a FileStatus object
 */
private static LocatedFileStatus toLocatedFileStatus(
    HdfsFileStatus stat, LocatedBlocks locs, String src) {
  if (stat == null) {
    return null;
  }
  return new LocatedFileStatus(stat.getLen(),
      stat.isDir(), stat.getReplication(),
      stat.getBlockSize(), stat.getModificationTime(),
      stat.getAccessTime(),
      stat.getPermission(), stat.getOwner(), stat.getGroup(),
      stat.getFullPath(new Path(src)), // full path
      DFSUtil.locatedBlocks2Locations(locs));
}
 
Example 9
Source File: TestFsck.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
Example 10
Source File: FileDataServlet.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 * @param conf 
 */
private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i,
    Configuration conf) throws IOException {
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
        getServletContext());
    return NamenodeJspHelper.getRandomDatanode(nn);
  }
  return JspHelper.bestNode(blks, conf);
}
 
Example 11
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Convert an HdfsFileStatus to a FileStatus
 * @param stat an HdfsFileStatus
 * @param src parent path in string representation
 * @return a FileStatus object
 */
private static FileStatus toFileStatus(HdfsFileStatus stat, String src) {
  if (stat == null) {
    return null;
  }
  return new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(),
      stat.getBlockSize(), stat.getModificationTime(),
      stat.getAccessTime(),
      stat.getPermission(), stat.getOwner(), stat.getGroup(),
      stat.getFullPath(new Path(src))); // full path
}
 
Example 12
Source File: Nfs3Utils.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
    throws IOException {
  HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
  if (fstat == null) {
    return null;
  }

  long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
      .getLen();
  return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
      new NfsTime(fstat.getModificationTime()));
}
 
Example 13
Source File: TestFsck.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void checkSalvagedRemains() throws IOException {
  int chainIdx = 0;
  HdfsFileStatus status = dfsClient.getFileInfo(name);
  long length = status.getLen();
  int numBlocks = (int)((length + blockSize - 1) / blockSize);
  DFSInputStream in = null;
  byte[] blockBuffer = new byte[blockSize];

  try {
    for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
      if (blocksToCorrupt.contains(blockIdx)) {
        if (in != null) {
          in.close();
          in = null;
        }
        continue;
      }
      if (in == null) {
        in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
        chainIdx++;
      }
      int len = blockBuffer.length;
      if (blockIdx == (numBlocks - 1)) {
        // The last block might not be full-length
        len = (int)(in.getFileLength() % blockSize);
        if (len == 0) len = blockBuffer.length;
      }
      IOUtils.readFully(in, blockBuffer, 0, len);
      int startIdx = blockIdx * blockSize;
      for (int i = 0; i < len; i++) {
        if (initialContents[startIdx + i] != blockBuffer[i]) {
          throw new IOException("salvaged file " + name + " differed " +
          "from what we expected on block " + blockIdx);
        }
      }
    }
  } finally {
    IOUtils.cleanup(null, in);
  }
}
 
Example 14
Source File: WebHdfsFileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
 
Example 15
Source File: TestHDFSConcat.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testConcatNotCompleteBlock() throws IOException {
  long trgFileLen = blockSize*3;
  long srcFileLen = blockSize*3+20; // block at the end - not full

  
  // create first file
  String name1="/trg", name2="/src";
  Path filePath1 = new Path(name1);
  DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
  
  HdfsFileStatus fStatus = nn.getFileInfo(name1);
  long fileLen = fStatus.getLen();
  assertEquals(fileLen, trgFileLen);
  
  //read the file
  FSDataInputStream stm = dfs.open(filePath1);
  byte[] byteFile1 = new byte[(int)trgFileLen];
  stm.readFully(0, byteFile1);
  stm.close();
  
  LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen);
  
  Path filePath2 = new Path(name2);
  DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
  fStatus = nn.getFileInfo(name2);
  fileLen = fStatus.getLen();
  assertEquals(srcFileLen, fileLen);
  
  // read the file
  stm = dfs.open(filePath2);
  byte[] byteFile2 = new byte[(int)srcFileLen];
  stm.readFully(0, byteFile2);
  stm.close();
  
  LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen);
  
  
  System.out.println("trg len="+trgFileLen+"; src len="+srcFileLen);
  
  // move the blocks
  dfs.concat(filePath1, new Path [] {filePath2});
  
  long totalLen = trgFileLen + srcFileLen;
  fStatus = nn.getFileInfo(name1);
  fileLen = fStatus.getLen();
  
  // read the resulting file
  stm = dfs.open(filePath1);
  byte[] byteFileConcat = new byte[(int)fileLen];
  stm.readFully(0, byteFileConcat);
  stm.close();
  
  LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen);
  
  //verifications
  // 1. number of blocks
  assertEquals(lbConcat.locatedBlockCount(), 
      lb1.locatedBlockCount() + lb2.locatedBlockCount());
  
  // 2. file lengths
  System.out.println("file1 len="+fileLen+"; total len="+totalLen);
  assertEquals(fileLen, totalLen);
  
  // 3. removal of the src file
  fStatus = nn.getFileInfo(name2);
  assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist

  // 4. content
  checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2});
}
 
Example 16
Source File: NamenodeWebHdfsMethods.java    From big-c with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
Example 17
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Construct a data streamer for appending to the last partial block
 * @param lastBlock last block of the file to be appended
 * @param stat status of the file to be appended
 * @param bytesPerChecksum number of bytes per checksum
 * @throws IOException if error occurs
 */
private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
    int bytesPerChecksum) throws IOException {
  isAppend = true;
  stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
  block = lastBlock.getBlock();
  bytesSent = block.getNumBytes();
  accessToken = lastBlock.getBlockToken();
  isLazyPersistFile = isLazyPersist(stat);
  long usedInLastBlock = stat.getLen() % blockSize;
  int freeInLastBlock = (int)(blockSize - usedInLastBlock);

  // calculate the amount of free space in the pre-existing 
  // last crc chunk
  int usedInCksum = (int)(stat.getLen() % bytesPerChecksum);
  int freeInCksum = bytesPerChecksum - usedInCksum;

  // if there is space in the last block, then we have to 
  // append to that block
  if (freeInLastBlock == blockSize) {
    throw new IOException("The last block for file " + 
        src + " is full.");
  }

  if (usedInCksum > 0 && freeInCksum > 0) {
    // if there is space in the last partial chunk, then 
    // setup in such a way that the next packet will have only 
    // one chunk that fills up the partial chunk.
    //
    computePacketChunkSize(0, freeInCksum);
    setChecksumBufSize(freeInCksum);
    appendChunk = true;
  } else {
    // if the remaining space in the block is smaller than 
    // that expected size of of a packet, then create 
    // smaller size packet.
    //
    computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), 
        bytesPerChecksum);
  }

  // setup pipeline to append to the last block XXX retries??
  setPipeline(lastBlock);
  errorIndex = -1;   // no errors yet.
  if (nodes.length < 1) {
    throw new IOException("Unable to retrieve blocks locations " +
        " for last block " + block +
        "of file " + src);

  }
}
 
Example 18
Source File: TestHDFSConcat.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testConcatNotCompleteBlock() throws IOException {
  long trgFileLen = blockSize*3;
  long srcFileLen = blockSize*3+20; // block at the end - not full

  
  // create first file
  String name1="/trg", name2="/src";
  Path filePath1 = new Path(name1);
  DFSTestUtil.createFile(dfs, filePath1, trgFileLen, REPL_FACTOR, 1);
  
  HdfsFileStatus fStatus = nn.getFileInfo(name1);
  long fileLen = fStatus.getLen();
  assertEquals(fileLen, trgFileLen);
  
  //read the file
  FSDataInputStream stm = dfs.open(filePath1);
  byte[] byteFile1 = new byte[(int)trgFileLen];
  stm.readFully(0, byteFile1);
  stm.close();
  
  LocatedBlocks lb1 = nn.getBlockLocations(name1, 0, trgFileLen);
  
  Path filePath2 = new Path(name2);
  DFSTestUtil.createFile(dfs, filePath2, srcFileLen, REPL_FACTOR, 1);
  fStatus = nn.getFileInfo(name2);
  fileLen = fStatus.getLen();
  assertEquals(srcFileLen, fileLen);
  
  // read the file
  stm = dfs.open(filePath2);
  byte[] byteFile2 = new byte[(int)srcFileLen];
  stm.readFully(0, byteFile2);
  stm.close();
  
  LocatedBlocks lb2 = nn.getBlockLocations(name2, 0, srcFileLen);
  
  
  System.out.println("trg len="+trgFileLen+"; src len="+srcFileLen);
  
  // move the blocks
  dfs.concat(filePath1, new Path [] {filePath2});
  
  long totalLen = trgFileLen + srcFileLen;
  fStatus = nn.getFileInfo(name1);
  fileLen = fStatus.getLen();
  
  // read the resulting file
  stm = dfs.open(filePath1);
  byte[] byteFileConcat = new byte[(int)fileLen];
  stm.readFully(0, byteFileConcat);
  stm.close();
  
  LocatedBlocks lbConcat = nn.getBlockLocations(name1, 0, fileLen);
  
  //verifications
  // 1. number of blocks
  assertEquals(lbConcat.locatedBlockCount(), 
      lb1.locatedBlockCount() + lb2.locatedBlockCount());
  
  // 2. file lengths
  System.out.println("file1 len="+fileLen+"; total len="+totalLen);
  assertEquals(fileLen, totalLen);
  
  // 3. removal of the src file
  fStatus = nn.getFileInfo(name2);
  assertNull("File "+name2+ "still exists", fStatus); // file shouldn't exist

  // 4. content
  checkFileContent(byteFileConcat, new byte [] [] {byteFile1, byteFile2});
}
 
Example 19
Source File: NamenodeWebHdfsMethods.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
Example 20
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Construct a data streamer for appending to the last partial block
 * @param lastBlock last block of the file to be appended
 * @param stat status of the file to be appended
 * @param bytesPerChecksum number of bytes per checksum
 * @throws IOException if error occurs
 */
private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
    int bytesPerChecksum) throws IOException {
  isAppend = true;
  stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
  block = lastBlock.getBlock();
  bytesSent = block.getNumBytes();
  accessToken = lastBlock.getBlockToken();
  isLazyPersistFile = isLazyPersist(stat);
  long usedInLastBlock = stat.getLen() % blockSize;
  int freeInLastBlock = (int)(blockSize - usedInLastBlock);

  // calculate the amount of free space in the pre-existing 
  // last crc chunk
  int usedInCksum = (int)(stat.getLen() % bytesPerChecksum);
  int freeInCksum = bytesPerChecksum - usedInCksum;

  // if there is space in the last block, then we have to 
  // append to that block
  if (freeInLastBlock == blockSize) {
    throw new IOException("The last block for file " + 
        src + " is full.");
  }

  if (usedInCksum > 0 && freeInCksum > 0) {
    // if there is space in the last partial chunk, then 
    // setup in such a way that the next packet will have only 
    // one chunk that fills up the partial chunk.
    //
    computePacketChunkSize(0, freeInCksum);
    setChecksumBufSize(freeInCksum);
    appendChunk = true;
  } else {
    // if the remaining space in the block is smaller than 
    // that expected size of of a packet, then create 
    // smaller size packet.
    //
    computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), 
        bytesPerChecksum);
  }

  // setup pipeline to append to the last block XXX retries??
  setPipeline(lastBlock);
  errorIndex = -1;   // no errors yet.
  if (nodes.length < 1) {
    throw new IOException("Unable to retrieve blocks locations " +
        " for last block " + block +
        "of file " + src);

  }
}