Java Code Examples for org.apache.hadoop.hdfs.protocol.Block#getNumBytes()

The following examples show how to use org.apache.hadoop.hdfs.protocol.Block#getNumBytes() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSImageSerialization.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Write an array of blocks as compactly as possible. This uses
 * delta-encoding for the generation stamp and size, following
 * the principle that genstamp increases relatively slowly,
 * and size is equal for all but the last block of a file.
 */
public static void writeCompactBlockArray(
    Block[] blocks, DataOutputStream out) throws IOException {
  WritableUtils.writeVInt(out, blocks.length);
  Block prev = null;
  for (Block b : blocks) {
    long szDelta = b.getNumBytes() -
        (prev != null ? prev.getNumBytes() : 0);
    long gsDelta = b.getGenerationStamp() -
        (prev != null ? prev.getGenerationStamp() : 0);
    out.writeLong(b.getBlockId()); // blockid is random
    WritableUtils.writeVLong(out, szDelta);
    WritableUtils.writeVLong(out, gsDelta);
    prev = b;
  }
}
 
Example 2
Source File: INodeFile.java    From RDFS with Apache License 2.0 6 votes vote down vote up
long diskspaceConsumed(Block[] blkArr) {
  long size = 0;
  if(blkArr == null) {
    return 0;
  }
  for (Block blk : blkArr) {
    if (blk != null) {
      size += blk.getNumBytes();
    }
  }
  /* If the last block is being written to, use prefferedBlockSize
   * rather than the actual block size.
   */
  if (blkArr.length > 0 && blkArr[blkArr.length-1] != null &&
      isUnderConstruction()) {
    size += getPreferredBlockSize() - blocks[blocks.length-1].getNumBytes();
  }
  return size * getReplication();
}
 
Example 3
Source File: SimulatedFSDataset.java    From big-c with Apache License 2.0 6 votes vote down vote up
BInfo(String bpid, Block b, boolean forWriting) throws IOException {
  theBlock = new Block(b);
  if (theBlock.getNumBytes() < 0) {
    theBlock.setNumBytes(0);
  }
  if (!storage.alloc(bpid, theBlock.getNumBytes())) { 
    // expected length - actual length may
    // be more - we find out at finalize
    DataNode.LOG.warn("Lack of free storage on a block alloc");
    throw new IOException("Creating block, no free space available");
  }

  if (forWriting) {
    finalized = false;
    oStream = new SimulatedOutputStream();
  } else {
    finalized = true;
    oStream = null;
  }
}
 
Example 4
Source File: TestBalancer.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private Block[] generateBlocks(long size, short numNodes) throws IOException {
  cluster = new MiniDFSCluster( CONF, numNodes, true, null);
  try {
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    short replicationFactor = (short)(numNodes-1);
    long fileLen = size/replicationFactor;
    createFile(fileLen, replicationFactor);

    List<LocatedBlock> locatedBlocks = client.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

    int numOfBlocks = locatedBlocks.size();
    Block[] blocks = new Block[numOfBlocks];
    for(int i=0; i<numOfBlocks; i++) {
      Block b = locatedBlocks.get(i).getBlock();
      blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
    }

    return blocks;
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: FSImageSerialization.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static Block[] readCompactBlockArray(
    DataInput in, int logVersion) throws IOException {
  int num = WritableUtils.readVInt(in);
  if (num < 0) {
    throw new IOException("Invalid block array length: " + num);
  }
  Block prev = null;
  Block[] ret = new Block[num];
  for (int i = 0; i < num; i++) {
    long id = in.readLong();
    long sz = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getNumBytes() : 0);
    long gs = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getGenerationStamp() : 0);
    ret[i] = new Block(id, sz, gs);
    prev = ret[i];
  }
  return ret;
}
 
Example 6
Source File: SimulatedFSDataset.java    From hadoop with Apache License 2.0 6 votes vote down vote up
BInfo(String bpid, Block b, boolean forWriting) throws IOException {
  theBlock = new Block(b);
  if (theBlock.getNumBytes() < 0) {
    theBlock.setNumBytes(0);
  }
  if (!storage.alloc(bpid, theBlock.getNumBytes())) { 
    // expected length - actual length may
    // be more - we find out at finalize
    DataNode.LOG.warn("Lack of free storage on a block alloc");
    throw new IOException("Creating block, no free space available");
  }

  if (forWriting) {
    finalized = false;
    oStream = new SimulatedOutputStream();
  } else {
    finalized = true;
    oStream = null;
  }
}
 
Example 7
Source File: FSImageSerialization.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static Block[] readCompactBlockArray(
    DataInput in, int logVersion) throws IOException {
  int num = WritableUtils.readVInt(in);
  if (num < 0) {
    throw new IOException("Invalid block array length: " + num);
  }
  Block prev = null;
  Block[] ret = new Block[num];
  for (int i = 0; i < num; i++) {
    long id = in.readLong();
    long sz = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getNumBytes() : 0);
    long gs = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getGenerationStamp() : 0);
    ret[i] = new Block(id, sz, gs);
    prev = ret[i];
  }
  return ret;
}
 
Example 8
Source File: InotifyFSEditLogOpTranslator.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static long getSize(FSEditLogOp.AddCloseOp acOp) {
  long size = 0;
  for (Block b : acOp.getBlocks()) {
    size += b.getNumBytes();
  }
  return size;
}
 
Example 9
Source File: Dispatcher.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public String toString() {
  final Block b = block != null ? block.getBlock() : null;
  String bStr = b != null ? (b + " with size=" + b.getNumBytes() + " ")
      : " ";
  return bStr + "from " + source.getDisplayName() + " to " + target
      .getDisplayName() + " through " + (proxySource != null ? proxySource
      .datanode : "");
}
 
Example 10
Source File: INodeFile.java    From big-c with Apache License 2.0 5 votes vote down vote up
public final long storagespaceConsumedNoReplication() {
  FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
  if(sf == null) {
    return computeFileSize(true, true);
  }

  // Collect all distinct blocks
  long size = 0;
  Set<Block> allBlocks = new HashSet<Block>(Arrays.asList(getBlocks()));
  List<FileDiff> diffs = sf.getDiffs().asList();
  for(FileDiff diff : diffs) {
    BlockInfoContiguous[] diffBlocks = diff.getBlocks();
    if (diffBlocks != null) {
      allBlocks.addAll(Arrays.asList(diffBlocks));
    }
  }
  for(Block block : allBlocks) {
    size += block.getNumBytes();
  }
  // check if the last block is under construction
  BlockInfoContiguous lastBlock = getLastBlock();
  if(lastBlock != null &&
      lastBlock instanceof BlockInfoContiguousUnderConstruction) {
    size += getPreferredBlockSize() - lastBlock.getNumBytes();
  }
  return size;
}
 
Example 11
Source File: DataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void transferBlock(int namespaceId, Block block,
    DatanodeInfo xferTargets[]) throws IOException {
  DatanodeProtocol nn = getNSNamenode(namespaceId);
  DatanodeRegistration nsReg = getDNRegistrationForNS(namespaceId);

  if (!data.isValidBlock(namespaceId, block, true)) {
    // block does not exist or is under-construction
    String errStr = "Can't send invalid block " + block;
    LOG.info(errStr);
    nn.errorReport(nsReg, DatanodeProtocol.INVALID_BLOCK, errStr);
    return;
  }

  // Check if NN recorded length matches on-disk length
  long onDiskLength = data.getFinalizedBlockLength(namespaceId, block);
  if (block.getNumBytes() > onDiskLength) {
    // Shorter on-disk len indicates corruption so report NN the corrupt block
    nn.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(block,
        new DatanodeInfo[] { new DatanodeInfo(nsReg) }) });
    LOG.info("Can't replicate block " + block + " because on-disk length "
        + onDiskLength + " is shorter than NameNode recorded length "
        + block.getNumBytes());
    return;
  }

  int numTargets = xferTargets.length;
  if (numTargets > 0) {
    if (LOG.isInfoEnabled()) {
      StringBuilder xfersBuilder = new StringBuilder();
      for (int i = 0; i < numTargets; i++) {
        xfersBuilder.append(xferTargets[i].getName());
        xfersBuilder.append(" ");
      }
      LOG.info(nsReg + " Starting thread to transfer block " + block + " to "
          + xfersBuilder);
    }

    blockCopyExecutor.submit(new DataTransfer(namespaceId, xferTargets, block, this));
  }
}
 
Example 12
Source File: DataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Override
public void copyBlock(int srcNamespaceId, Block srcBlock, int dstNamespaceId,
    Block destinationBlock, DatanodeInfo target, boolean async)
    throws IOException {

  if (!data.isValidBlock(srcNamespaceId, srcBlock, true)) {
    // block does not exist or is under-construction
    String errStr = "copyBlock: Can't send invalid block " + srcBlock 
                  + " at " + srcNamespaceId;
    LOG.info(errStr);
    throw new IOException(errStr);
  }

  // Check if specified length matches on-disk length 
  long onDiskLength = data.getFinalizedBlockLength(srcNamespaceId, srcBlock);
  if (srcBlock.getNumBytes() > onDiskLength) {
    // Shorter on-disk len indicates corruption so report NN the corrupt block
    String msg = "copyBlock: Can't replicate block " + srcBlock
        + " at " + srcNamespaceId
        + " because on-disk length " + onDiskLength
        + " is shorter than provided length " + srcBlock.getNumBytes();
    LOG.info(msg);
    throw new IOException(msg);
  }

  LOG.info(getDatanodeInfo() + " copyBlock: Starting thread to transfer: " +
           "srcNamespaceId: " + srcNamespaceId + " block: " +
           srcBlock + " to " + target.getName());
  DatanodeInfo[] targets = new DatanodeInfo[1];
  targets[0] = target;

  // Use IP Address and port number to determine locality. Relying on the
  // DatanodeID of both the target machine and the local machine to
  // determine locality. This guarantees uniformity in comparison.
  String targetMachine = target.getHost();
  int targetPort = target.getPort();
  DatanodeRegistration dnRegistration = getDNRegistrationForNS(srcNamespaceId);
  int localPort = dnRegistration.getPort();
  String localMachine = dnRegistration.getHost();

  Future<Boolean> result;
  // If the target datanode is our datanode itself, then perform local copy.
  if (targetMachine.equals(localMachine) && targetPort == localPort) {
    LOG.info("Performing local block copy since source and "
        + "destination datanodes are same for  block "
        + srcBlock.getBlockName());
    result = blockCopyExecutor.submit(new LocalBlockCopy(srcNamespaceId,
        srcBlock, dstNamespaceId, destinationBlock));
  } else if (targetMachine.equals(localMachine)) {
    LOG.info("Performing cross datanode local block copy since source " +
        "and destination hosts are same for block "
        + srcBlock.getBlockName());
    result = blockCopyExecutor.submit(new CrossDatanodeLocalBlockCopy(
        srcNamespaceId, srcBlock, dstNamespaceId, destinationBlock, target));
  } else {
    result = blockCopyExecutor.submit(new DataTransfer(targets, srcNamespaceId, srcBlock,
        dstNamespaceId, destinationBlock, this));
  }

  // If this is not an async request, wait for the task to complete, if the
  // task fails this will throw an exception and will be propogated to the
  // client.
  if (!async) {
    try {
      // Wait for 5 minutes.
      result.get(this.blockCopyRPCWaitTime, TimeUnit.SECONDS);
    } catch (Exception e) {
      LOG.error(e);
      throw new IOException(e);
    }
  }
}
 
Example 13
Source File: TestBlockReportProcessingTime.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/** Test the case when a block report processing at namenode
 * startup time is fast.
 */
public void testFasterBlockReports() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster(conf, 40, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    NameNode namenode = cluster.getNameNode();
    LOG.info("Cluster Alive."); 

    // create a single file with one block.
    Path file1 = new Path("/filestatus.dat");
    final long FILE_LEN = 1L;
    DFSTestUtil.createFile(fs, file1, FILE_LEN, (short)2, 1L);
    LocatedBlocks locations = namenode.getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    assertTrue(locations.locatedBlockCount() == 1);
    Block block = locations.get(0).getBlock();
    long blkid = block.getBlockId();
    long genstamp = block.getGenerationStamp();
    long length = block.getNumBytes();
    
    // put namenode in safemode
    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    DatanodeInfo[] dinfo = namenode.getDatanodeReport(DatanodeReportType.ALL);
    LOG.info("Found " + dinfo.length + " number of datanodes.");

    // create artificial block replicas on each datanode
    final int NUMBLOCKS = 1000;
    final int LONGS_PER_BLOCK = 3;
    long tmpblocks[] = new long[NUMBLOCKS * LONGS_PER_BLOCK];
    for (int i = 0; i < NUMBLOCKS; i++) {
      tmpblocks[i * LONGS_PER_BLOCK] = blkid;
      tmpblocks[i * LONGS_PER_BLOCK + 1] = length;
      tmpblocks[i * LONGS_PER_BLOCK + 2] = genstamp;
    }
    BlockListAsLongs blkList = new BlockListAsLongs(tmpblocks);

    // process block report from all machines
    long total = 0;
    for (int i = 0; i < dinfo.length; i++) {
      long start = now();
      namenode.namesystem.processReport(dinfo[i], blkList);
      total += now() - start;
      LOG.info("Processed block report from " + dinfo[i]);
    }
    LOG.info("Average of all block report processing time " +
             " from " + dinfo.length + " datanodes is " +
             (total/dinfo.length) + " milliseconds.");
    
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 14
Source File: FSDataset.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/** {@inheritDoc} */
public void validateBlockMetadata(Block b) throws IOException {
  DatanodeBlockInfo info = volumeMap.get(b);
  if (info == null) {
    throw new IOException("Block " + b + " does not exist in volumeMap.");
  }
  FSVolume v = info.getVolume();
  File tmp = v.getTmpFile(b);
  File f = getFile(b);
  if (f == null) {
    f = tmp;
  }
  if (f == null) {
    throw new IOException("Block " + b + " does not exist on disk.");
  }
  if (!f.exists()) {
    throw new IOException("Block " + b + 
                          " block file " + f +
                          " does not exist on disk.");
  }
  if (b.getNumBytes() != f.length()) {
    throw new IOException("Block " + b + 
                          " length is " + b.getNumBytes()  +
                          " does not match block file length " +
                          f.length());
  }
  File meta = getMetaFile(f, b);
  if (meta == null) {
    throw new IOException("Block " + b + 
                          " metafile does not exist.");
  }
  if (!meta.exists()) {
    throw new IOException("Block " + b + 
                          " metafile " + meta +
                          " does not exist on disk.");
  }
  if (meta.length() == 0) {
    throw new IOException("Block " + b + " metafile " + meta + " is empty.");
  }
  long stamp = parseGenerationStamp(f, meta);
  if (stamp != b.getGenerationStamp()) {
    throw new IOException("Block " + b + 
                          " genstamp is " + b.getGenerationStamp()  +
                          " does not match meta file stamp " +
                          stamp);
  }
}
 
Example 15
Source File: DataNode.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
private void transferBlock( Block block, 
                            DatanodeInfo xferTargets[] 
                            ) throws IOException {
  if (!data.isValidBlock(block)) {
    // block does not exist or is under-construction
    String errStr = "Can't send invalid block " + block;
    LOG.info(errStr);
    namenode.errorReport(dnRegistration, 
                         DatanodeProtocol.INVALID_BLOCK, 
                         errStr);
    return;
  }

  // Check if NN recorded length matches on-disk length 
  long onDiskLength = data.getLength(block);
  if (block.getNumBytes() > onDiskLength) {
    // Shorter on-disk len indicates corruption so report NN the corrupt block
    namenode.reportBadBlocks(new LocatedBlock[]{
        new LocatedBlock(block, new DatanodeInfo[] {
            new DatanodeInfo(dnRegistration)})});
    LOG.info("Can't replicate block " + block
        + " because on-disk length " + onDiskLength 
        + " is shorter than NameNode recorded length " + block.getNumBytes());
    return;
  }
  
  int numTargets = xferTargets.length;
  if (numTargets > 0) {
    if (LOG.isInfoEnabled()) {
      StringBuilder xfersBuilder = new StringBuilder();
      for (int i = 0; i < numTargets; i++) {
        xfersBuilder.append(xferTargets[i].getName());
        xfersBuilder.append(" ");
      }
      LOG.info(dnRegistration + " Starting thread to transfer block " + 
               block + " to " + xfersBuilder);                       
    }

    new Daemon(new DataTransfer(xferTargets, block, this)).start();
  }
}
 
Example 16
Source File: FSDataset.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Try to update an old block to a new block.
 * If there are ongoing create threads running for the old block,
 * the threads will be returned without updating the block.
 *
 * @return ongoing create threads if there is any. Otherwise, return null.
 */
private List<Thread> tryUpdateBlock(int namespaceId, 
    Block oldblock, Block newblock) throws IOException {
  lock.writeLock().lock();
  try {
    //check ongoing create threads
    ArrayList<Thread> activeThreads = getActiveThreads(namespaceId, oldblock);
    if (activeThreads != null) {
      return activeThreads;
    }

    if (volumeMap.get(namespaceId, oldblock) == null) {
      throw new IOException("Block " + oldblock
          + " doesn't exist or has been recovered to a new generation ");
    }

    //No ongoing create threads is alive.  Update block.
    File blockFile = findBlockFile(namespaceId, oldblock.getBlockId());
    if (blockFile == null) {
      throw new IOException("Block " + oldblock + " does not exist.");
    }

    File oldMetaFile = findMetaFile(blockFile);
    long oldgs = parseGenerationStamp(blockFile, oldMetaFile);
    
  // First validate the update

    //update generation stamp
    if (oldgs > newblock.getGenerationStamp()) {
      throw new IOException("Cannot update block (id=" + newblock.getBlockId()
          + ") generation stamp from " + oldgs
          + " to " + newblock.getGenerationStamp());
    }
    
    //update length
    if (newblock.getNumBytes() > oldblock.getNumBytes()) {
      throw new IOException("Cannot update block file (=" + blockFile
          + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes());
    }

    // Although we've waited for the active threads all dead before updating
    // the map so there should be no data race there, we still create new
    // ActiveFile object to make sure in case another thread holds it,
    // it won't cause any problem for us.
    //
    try {
      volumeMap.copyOngoingCreates(namespaceId, oldblock);
    } catch (CloneNotSupportedException e) {
      // It should never happen.
      throw new IOException("Cannot clone ActiveFile object", e);
    }

    // Now perform the update

    // rename meta file to a tmp file
    File tmpMetaFile = new File(oldMetaFile.getParent(),
        oldMetaFile.getName() + "_tmp" + newblock.getGenerationStamp());
    if (!oldMetaFile.renameTo(tmpMetaFile)) {
      throw new IOException("Cannot rename block meta file to " + tmpMetaFile);
    }

    long oldFileLength = blockFile.length();
    if (newblock.getNumBytes() < oldFileLength) {
      truncateBlock(blockFile, tmpMetaFile, oldFileLength,
          newblock.getNumBytes());
    ActiveFile file = volumeMap.getOngoingCreates(namespaceId, oldblock);
    if (file != null) {
      file.setBytesAcked(newblock.getNumBytes());
      file.setBytesOnDisk(newblock.getNumBytes());
    } else {
      // This should never happen unless called from unit tests.
      this.getDatanodeBlockInfo(namespaceId, oldblock).syncInMemorySize();
    }
    }

    //rename the tmp file to the new meta file (with new generation stamp)
    File newMetaFile = getMetaFile(blockFile, newblock);
    if (!tmpMetaFile.renameTo(newMetaFile)) {
      throw new IOException("Cannot rename tmp meta file to " + newMetaFile);
    }

    if(volumeMap.getOngoingCreates(namespaceId, oldblock) != null){
      ActiveFile af = volumeMap.removeOngoingCreates(namespaceId, oldblock);
      volumeMap.addOngoingCreates(namespaceId, newblock, af);
    }
    volumeMap.update(namespaceId, oldblock, newblock);

    // paranoia! verify that the contents of the stored block 
    // matches the block file on disk.
    validateBlockMetadata(namespaceId, newblock);
    return null;
  } finally {
    lock.writeLock().unlock();
  }
}
 
Example 17
Source File: FSDataset.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/** {@inheritDoc} */
public void validateBlockMetadata(int namespaceId, Block b) throws IOException {
  DatanodeBlockInfo info;
  lock.readLock().lock();
  try {
    info = volumeMap.get(namespaceId, b);
  } finally {
    lock.readLock().unlock();
  }
  if (info == null) {
    throw new IOException("Block " + b + " does not exist in volumeMap.");
  }
  FSVolume v = info.getVolume();
  File tmp = v.getTmpFile(namespaceId, b);
  File f = info.getFile();
  long fileSize;
  if (f == null) {
    f = tmp;
    if (f == null) {
      throw new IOException("Block " + b + " does not exist on disk.");
    }
    if (!f.exists()) {
      throw new IOException("Block " + b + 
                            " block file " + f +
                            " does not exist on disk.");
    }
    fileSize = f.length();
  } else {
    if (info.isFinalized()) {
      info.verifyFinalizedSize();
      fileSize = info.getFinalizedSize();
    } else {
      fileSize = f.length();
    }
  }
  if (b.getNumBytes() > fileSize) {
    throw new IOException("Block " + b + 
                          " length is " + b.getNumBytes()  +
                          " does not match block file length " +
                          f.length());
  }
  File meta = getMetaFile(f, b);
  if (meta == null) {
    throw new IOException("Block " + b + 
                          " metafile does not exist.");
  }
  if (!meta.exists()) {
    throw new IOException("Block " + b + 
                          " metafile " + meta +
                          " does not exist on disk.");
  }
  long metaFileSize = meta.length();
  if (metaFileSize == 0 && fileSize > 0) {
    throw new IOException("Block " + b + " metafile " + meta + " is empty.");
  }
  long stamp = parseGenerationStamp(f, meta);
  if (stamp != b.getGenerationStamp()) {
    throw new IOException("Block " + b + 
                          " genstamp is " + b.getGenerationStamp()  +
                          " does not match meta file stamp " +
                          stamp);
  }
  if (metaFileSize == 0) {
    // no need to check metadata size for 0 size file
    return;
  }
  // verify that checksum file has an integral number of checkum values.
  DataChecksum dcs = BlockMetadataHeader.readHeader(meta).getChecksum();
  int checksumsize = dcs.getChecksumSize();
  long actual = metaFileSize - BlockMetadataHeader.getHeaderSize();
  long numChunksInMeta = actual/checksumsize;
  if (actual % checksumsize != 0) {
    throw new IOException("Block " + b +
                          " has a checksum file of size " + metaFileSize +
                          " but it does not align with checksum size of " +
                          checksumsize);
  }
  int bpc = dcs.getBytesPerChecksum();
  long minDataSize = (numChunksInMeta - 1) * bpc;
  long maxDataSize = numChunksInMeta * bpc;
  if (fileSize > maxDataSize || fileSize <= minDataSize) {
    throw new IOException("Block " + b +
                          " is of size " + f.length() +
                          " but has " + (numChunksInMeta + 1) +
                          " checksums and each checksum size is " +
                          checksumsize + " bytes.");
  }
  // We could crc-check the entire block here, but it will be a costly 
  // operation. Instead we rely on the above check (file length mismatch)
  // to detect corrupt blocks.
}
 
Example 18
Source File: DFSInputStream.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Grab the open-file info from namenode
 */
synchronized void openInfo() throws IOException {
  if (src == null && blocks == null) {
    throw new IOException("No fine provided to open");
  }

  LocatedBlocks newInfo = src != null ? 
                          getLocatedBlocks(src, 0, prefetchSize) : blocks;
  if (newInfo == null) {
    throw new IOException("Cannot open filename " + src);
  }

  // I think this check is not correct. A file could have been appended to
  // between two calls to openInfo().
  if (locatedBlocks != null && !locatedBlocks.isUnderConstruction() &&
      !newInfo.isUnderConstruction()) {
    Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
    Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
    while (oldIter.hasNext() && newIter.hasNext()) {
      if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
        throw new IOException("Blocklist for " + src + " has changed!");
      }
    }
  }

  // if the file is under construction, then fetch size of last block
  // from datanode.
  if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
    LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
    if (last.getLocations().length > 0) {
      try {
        Block newBlock = getBlockInfo(last);
        // only if the block has data (not null)
        if (newBlock != null) {
          long newBlockSize = newBlock.getNumBytes();
          newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize);
        }
      } catch (IOException e) {
        DFSClient.LOG.debug("DFSClient file " + src + 
                  " is being concurrently append to" +
                  " but datanodes probably does not have block " +
                  last.getBlock(), e);
      }
    }
  }
  this.locatedBlocks = new DFSLocatedBlocks(newInfo);
  this.currentNode = null;
}
 
Example 19
Source File: ReplicaInPipeline.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 * @param writer a thread that is writing to this replica
 */
ReplicaInPipeline(Block block, 
    FsVolumeSpi vol, File dir, Thread writer) {
  this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
      vol, dir, writer, 0L);
}
 
Example 20
Source File: ReplicaInPipeline.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 * @param writer a thread that is writing to this replica
 */
ReplicaInPipeline(Block block, 
    FsVolumeSpi vol, File dir, Thread writer) {
  this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
      vol, dir, writer, 0L);
}