Java Code Examples for org.apache.hadoop.hdfs.protocol.Block#getGenerationStamp()

The following examples show how to use org.apache.hadoop.hdfs.protocol.Block#getGenerationStamp() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSImageSerialization.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Write an array of blocks as compactly as possible. This uses
 * delta-encoding for the generation stamp and size, following
 * the principle that genstamp increases relatively slowly,
 * and size is equal for all but the last block of a file.
 */
public static void writeCompactBlockArray(
    Block[] blocks, DataOutputStream out) throws IOException {
  WritableUtils.writeVInt(out, blocks.length);
  Block prev = null;
  for (Block b : blocks) {
    long szDelta = b.getNumBytes() -
        (prev != null ? prev.getNumBytes() : 0);
    long gsDelta = b.getGenerationStamp() -
        (prev != null ? prev.getGenerationStamp() : 0);
    out.writeLong(b.getBlockId()); // blockid is random
    WritableUtils.writeVLong(out, szDelta);
    WritableUtils.writeVLong(out, gsDelta);
    prev = b;
  }
}
 
Example 2
Source File: FSImageSerialization.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static Block[] readCompactBlockArray(
    DataInput in, int logVersion) throws IOException {
  int num = WritableUtils.readVInt(in);
  if (num < 0) {
    throw new IOException("Invalid block array length: " + num);
  }
  Block prev = null;
  Block[] ret = new Block[num];
  for (int i = 0; i < num; i++) {
    long id = in.readLong();
    long sz = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getNumBytes() : 0);
    long gs = WritableUtils.readVLong(in) +
        ((prev != null) ? prev.getGenerationStamp() : 0);
    ret[i] = new Block(id, sz, gs);
    prev = ret[i];
  }
  return ret;
}
 
Example 3
Source File: FSImageSerialization.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Write an array of blocks as compactly as possible. This uses
 * delta-encoding for the generation stamp and size, following
 * the principle that genstamp increases relatively slowly,
 * and size is equal for all but the last block of a file.
 */
public static void writeCompactBlockArray(
    Block[] blocks, DataOutputStream out) throws IOException {
  WritableUtils.writeVInt(out, blocks.length);
  Block prev = null;
  for (Block b : blocks) {
    long szDelta = b.getNumBytes() -
        (prev != null ? prev.getNumBytes() : 0);
    long gsDelta = b.getGenerationStamp() -
        (prev != null ? prev.getGenerationStamp() : 0);
    out.writeLong(b.getBlockId()); // blockid is random
    WritableUtils.writeVLong(out, szDelta);
    WritableUtils.writeVLong(out, gsDelta);
    prev = b;
  }
}
 
Example 4
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Remove the replica's meta information from the map that matches
 * the input block's id and generation stamp
 * @param bpid block pool id
 * @param block block with its id as the key
 * @return the removed replica's meta information
 * @throws IllegalArgumentException if the input block is null
 */
ReplicaInfo remove(String bpid, Block block) {
  checkBlockPool(bpid);
  checkBlock(block);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    if (m != null) {
      Long key = Long.valueOf(block.getBlockId());
      ReplicaInfo replicaInfo = m.get(key);
      if (replicaInfo != null &&
          block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
        return m.remove(key);
      } 
    }
  }
  
  return null;
}
 
Example 5
Source File: InvalidateBlocks.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @return true if the given storage has the given block listed for
 * invalidation. Blocks are compared including their generation stamps:
 * if a block is pending invalidation but with a different generation stamp,
 * returns false.
 */
synchronized boolean contains(final DatanodeInfo dn, final Block block) {
  final LightWeightHashSet<Block> s = node2blocks.get(dn);
  if (s == null) {
    return false; // no invalidate blocks for this storage ID
  }
  Block blockInSet = s.getElement(block);
  return blockInSet != null &&
      block.getGenerationStamp() == blockInSet.getGenerationStamp();
}
 
Example 6
Source File: BlockRecoveryCommand.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Create RecoveringBlock with copy-on-truncate option.
 */
public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs,
    Block recoveryBlock) {
  super(b, locs, -1, false); // startOffset is unknown
  this.newGenerationStamp = recoveryBlock.getGenerationStamp();
  this.recoveryBlock = recoveryBlock;
}
 
Example 7
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
Example 8
Source File: TestFileCorruption.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static ExtendedBlock getBlock(String bpid, File dataDir) {
  List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
  if (metadataFiles == null || metadataFiles.isEmpty()) {
    return null;
  }
  File metadataFile = metadataFiles.get(0);
  File blockFile = Block.metaToBlockFile(metadataFile);
  return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
      blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
}
 
Example 9
Source File: ReplicaMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the meta information of the replica that matches both block id 
 * and generation stamp
 * @param bpid block pool id
 * @param block block with its id as the key
 * @return the replica's meta information
 * @throws IllegalArgumentException if the input block or block pool is null
 */
ReplicaInfo get(String bpid, Block block) {
  checkBlockPool(bpid);
  checkBlock(block);
  ReplicaInfo replicaInfo = get(bpid, block.getBlockId());
  if (replicaInfo != null && 
      block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
    return replicaInfo;
  }
  return null;
}
 
Example 10
Source File: FSEditLogLoader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add a new block into the given INodeFile
 */
private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
    throws IOException {
  BlockInfoContiguous[] oldBlocks = file.getBlocks();
  Block pBlock = op.getPenultimateBlock();
  Block newBlock= op.getLastBlock();
  
  if (pBlock != null) { // the penultimate block is not null
    Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0);
    // compare pBlock with the last block of oldBlocks
    Block oldLastBlock = oldBlocks[oldBlocks.length - 1];
    if (oldLastBlock.getBlockId() != pBlock.getBlockId()
        || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
      throw new IOException(
          "Mismatched block IDs or generation stamps for the old last block of file "
              + op.getPath() + ", the old last block is " + oldLastBlock
              + ", and the block read from editlog is " + pBlock);
    }
    
    oldLastBlock.setNumBytes(pBlock.getNumBytes());
    if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) {
      fsNamesys.getBlockManager().forceCompleteBlock(file,
          (BlockInfoContiguousUnderConstruction) oldLastBlock);
      fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
    }
  } else { // the penultimate block is null
    Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
  }
  // add the new block
  BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
        newBlock, file.getBlockReplication());
  fsNamesys.getBlockManager().addBlockCollection(newBI, file);
  file.addBlock(newBI);
  fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
 
Example 11
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
Example 12
Source File: TestBlockReportProcessingTime.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/** Test the case when a block report processing at namenode
 * startup time is fast.
 */
public void testFasterBlockReports() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster(conf, 40, true, null);
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    NameNode namenode = cluster.getNameNode();
    LOG.info("Cluster Alive."); 

    // create a single file with one block.
    Path file1 = new Path("/filestatus.dat");
    final long FILE_LEN = 1L;
    DFSTestUtil.createFile(fs, file1, FILE_LEN, (short)2, 1L);
    LocatedBlocks locations = namenode.getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    assertTrue(locations.locatedBlockCount() == 1);
    Block block = locations.get(0).getBlock();
    long blkid = block.getBlockId();
    long genstamp = block.getGenerationStamp();
    long length = block.getNumBytes();
    
    // put namenode in safemode
    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    DatanodeInfo[] dinfo = namenode.getDatanodeReport(DatanodeReportType.ALL);
    LOG.info("Found " + dinfo.length + " number of datanodes.");

    // create artificial block replicas on each datanode
    final int NUMBLOCKS = 1000;
    final int LONGS_PER_BLOCK = 3;
    long tmpblocks[] = new long[NUMBLOCKS * LONGS_PER_BLOCK];
    for (int i = 0; i < NUMBLOCKS; i++) {
      tmpblocks[i * LONGS_PER_BLOCK] = blkid;
      tmpblocks[i * LONGS_PER_BLOCK + 1] = length;
      tmpblocks[i * LONGS_PER_BLOCK + 2] = genstamp;
    }
    BlockListAsLongs blkList = new BlockListAsLongs(tmpblocks);

    // process block report from all machines
    long total = 0;
    for (int i = 0; i < dinfo.length; i++) {
      long start = now();
      namenode.namesystem.processReport(dinfo[i], blkList);
      total += now() - start;
      LOG.info("Processed block report from " + dinfo[i]);
    }
    LOG.info("Average of all block report processing time " +
             " from " + dinfo.length + " datanodes is " +
             (total/dinfo.length) + " milliseconds.");
    
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 13
Source File: FSDataset.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/** {@inheritDoc} */
public void validateBlockMetadata(int namespaceId, Block b) throws IOException {
  DatanodeBlockInfo info;
  lock.readLock().lock();
  try {
    info = volumeMap.get(namespaceId, b);
  } finally {
    lock.readLock().unlock();
  }
  if (info == null) {
    throw new IOException("Block " + b + " does not exist in volumeMap.");
  }
  FSVolume v = info.getVolume();
  File tmp = v.getTmpFile(namespaceId, b);
  File f = info.getFile();
  long fileSize;
  if (f == null) {
    f = tmp;
    if (f == null) {
      throw new IOException("Block " + b + " does not exist on disk.");
    }
    if (!f.exists()) {
      throw new IOException("Block " + b + 
                            " block file " + f +
                            " does not exist on disk.");
    }
    fileSize = f.length();
  } else {
    if (info.isFinalized()) {
      info.verifyFinalizedSize();
      fileSize = info.getFinalizedSize();
    } else {
      fileSize = f.length();
    }
  }
  if (b.getNumBytes() > fileSize) {
    throw new IOException("Block " + b + 
                          " length is " + b.getNumBytes()  +
                          " does not match block file length " +
                          f.length());
  }
  File meta = getMetaFile(f, b);
  if (meta == null) {
    throw new IOException("Block " + b + 
                          " metafile does not exist.");
  }
  if (!meta.exists()) {
    throw new IOException("Block " + b + 
                          " metafile " + meta +
                          " does not exist on disk.");
  }
  long metaFileSize = meta.length();
  if (metaFileSize == 0 && fileSize > 0) {
    throw new IOException("Block " + b + " metafile " + meta + " is empty.");
  }
  long stamp = parseGenerationStamp(f, meta);
  if (stamp != b.getGenerationStamp()) {
    throw new IOException("Block " + b + 
                          " genstamp is " + b.getGenerationStamp()  +
                          " does not match meta file stamp " +
                          stamp);
  }
  if (metaFileSize == 0) {
    // no need to check metadata size for 0 size file
    return;
  }
  // verify that checksum file has an integral number of checkum values.
  DataChecksum dcs = BlockMetadataHeader.readHeader(meta).getChecksum();
  int checksumsize = dcs.getChecksumSize();
  long actual = metaFileSize - BlockMetadataHeader.getHeaderSize();
  long numChunksInMeta = actual/checksumsize;
  if (actual % checksumsize != 0) {
    throw new IOException("Block " + b +
                          " has a checksum file of size " + metaFileSize +
                          " but it does not align with checksum size of " +
                          checksumsize);
  }
  int bpc = dcs.getBytesPerChecksum();
  long minDataSize = (numChunksInMeta - 1) * bpc;
  long maxDataSize = numChunksInMeta * bpc;
  if (fileSize > maxDataSize || fileSize <= minDataSize) {
    throw new IOException("Block " + b +
                          " is of size " + f.length() +
                          " but has " + (numChunksInMeta + 1) +
                          " checksums and each checksum size is " +
                          checksumsize + " bytes.");
  }
  // We could crc-check the entire block here, but it will be a costly 
  // operation. Instead we rely on the above check (file length mismatch)
  // to detect corrupt blocks.
}
 
Example 14
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** static version of {@link #initReplicaRecovery(RecoveringBlock)}. */
static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map,
    Block block, long recoveryId, long xceiverStopTimeout) throws IOException {
  final ReplicaInfo replica = map.get(bpid, block.getBlockId());
  LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId
      + ", replica=" + replica);

  //check replica
  if (replica == null) {
    return null;
  }

  //stop writer if there is any
  if (replica instanceof ReplicaInPipeline) {
    final ReplicaInPipeline rip = (ReplicaInPipeline)replica;
    rip.stopWriter(xceiverStopTimeout);

    //check replica bytes on disk.
    if (rip.getBytesOnDisk() < rip.getVisibleLength()) {
      throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
          + " getBytesOnDisk() < getVisibleLength(), rip=" + rip);
    }

    //check the replica's files
    checkReplicaFiles(rip);
  }

  //check generation stamp
  if (replica.getGenerationStamp() < block.getGenerationStamp()) {
    throw new IOException(
        "replica.getGenerationStamp() < block.getGenerationStamp(), block="
        + block + ", replica=" + replica);
  }

  //check recovery id
  if (replica.getGenerationStamp() >= recoveryId) {
    throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
        + " replica.getGenerationStamp() >= recoveryId = " + recoveryId
        + ", block=" + block + ", replica=" + replica);
  }

  //check RUR
  final ReplicaUnderRecovery rur;
  if (replica.getState() == ReplicaState.RUR) {
    rur = (ReplicaUnderRecovery)replica;
    if (rur.getRecoveryID() >= recoveryId) {
      throw new RecoveryInProgressException(
          "rur.getRecoveryID() >= recoveryId = " + recoveryId
          + ", block=" + block + ", rur=" + rur);
    }
    final long oldRecoveryID = rur.getRecoveryID();
    rur.setRecoveryID(recoveryId);
    LOG.info("initReplicaRecovery: update recovery id for " + block
        + " from " + oldRecoveryID + " to " + recoveryId);
  }
  else {
    rur = new ReplicaUnderRecovery(replica, recoveryId);
    map.add(bpid, rur);
    LOG.info("initReplicaRecovery: changing replica state for "
        + block + " from " + replica.getState()
        + " to " + rur.getState());
  }
  return rur.createInfo();
}
 
Example 15
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** static version of {@link #initReplicaRecovery(RecoveringBlock)}. */
static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map,
    Block block, long recoveryId, long xceiverStopTimeout) throws IOException {
  final ReplicaInfo replica = map.get(bpid, block.getBlockId());
  LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId
      + ", replica=" + replica);

  //check replica
  if (replica == null) {
    return null;
  }

  //stop writer if there is any
  if (replica instanceof ReplicaInPipeline) {
    final ReplicaInPipeline rip = (ReplicaInPipeline)replica;
    rip.stopWriter(xceiverStopTimeout);

    //check replica bytes on disk.
    if (rip.getBytesOnDisk() < rip.getVisibleLength()) {
      throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
          + " getBytesOnDisk() < getVisibleLength(), rip=" + rip);
    }

    //check the replica's files
    checkReplicaFiles(rip);
  }

  //check generation stamp
  if (replica.getGenerationStamp() < block.getGenerationStamp()) {
    throw new IOException(
        "replica.getGenerationStamp() < block.getGenerationStamp(), block="
        + block + ", replica=" + replica);
  }

  //check recovery id
  if (replica.getGenerationStamp() >= recoveryId) {
    throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
        + " replica.getGenerationStamp() >= recoveryId = " + recoveryId
        + ", block=" + block + ", replica=" + replica);
  }

  //check RUR
  final ReplicaUnderRecovery rur;
  if (replica.getState() == ReplicaState.RUR) {
    rur = (ReplicaUnderRecovery)replica;
    if (rur.getRecoveryID() >= recoveryId) {
      throw new RecoveryInProgressException(
          "rur.getRecoveryID() >= recoveryId = " + recoveryId
          + ", block=" + block + ", rur=" + rur);
    }
    final long oldRecoveryID = rur.getRecoveryID();
    rur.setRecoveryID(recoveryId);
    LOG.info("initReplicaRecovery: update recovery id for " + block
        + " from " + oldRecoveryID + " to " + recoveryId);
  }
  else {
    rur = new ReplicaUnderRecovery(replica, recoveryId);
    map.add(bpid, rur);
    LOG.info("initReplicaRecovery: changing replica state for "
        + block + " from " + replica.getState()
        + " to " + rur.getState());
  }
  return rur.createInfo();
}
 
Example 16
Source File: DirectoryScanner.java    From big-c with Apache License 2.0 4 votes vote down vote up
public long getGenStamp() {
  return metaSuffix != null ? Block.getGenerationStamp(
      getMetaFile().getName()) : 
        GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
 
Example 17
Source File: AvatarNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * @inheritDoc
 */
public long[] blockReceivedAndDeletedNew(DatanodeRegistration nodeReg,
      IncrementalBlockReport receivedAndDeletedBlocks) throws IOException {
  long[] failedMap = null;
  if (runInfo.shutdown || !runInfo.isRunning) {
    // Do not attempt to process blocks when
    // the namenode is not running
    if (currentAvatar == Avatar.STANDBY) {
      return new long[0];
    } else {
      return null;
    }
  }
  HashSet<Long> failedIds;
  if (currentAvatar == Avatar.STANDBY) {
    int noAck = receivedAndDeletedBlocks.getLength();
    
    // retry all block if the standby is behind consuming edits
    if (ignoreDatanodes()) {
      LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
      " to retry incremental block report of " + noAck
      + " blocks later.");
      failedMap = LightWeightBitSet.getBitSet(noAck);
      for (int i = 0; i < noAck; i++)
        LightWeightBitSet.set(failedMap, i);
      return failedMap;
    }
    
    Block blockRD = new Block();
    failedIds = new HashSet<Long>();
    failedMap = LightWeightBitSet.getBitSet(noAck);
    namesystem.writeLock();
    try {
      receivedAndDeletedBlocks.resetIterator();
      for (int currentBlock = 0; currentBlock < noAck; currentBlock++) {
        receivedAndDeletedBlocks.getNext(blockRD);
        if(failedIds.contains(blockRD.getBlockId())){
          // check if there was no other blocking failed request
          blockRD.setNumBytes(BlockFlags.IGNORE);
          receivedAndDeletedBlocks.setBlock(blockRD, currentBlock);
          LightWeightBitSet.set(failedMap, currentBlock);
          continue;
        }
        BlockInfo storedBlock = namesystem.blocksMap.getStoredBlock(blockRD);
        if (!DFSUtil.isDeleted(blockRD) && (storedBlock == null) &&
            (!namesystem.getPersistBlocks() ||
            blockRD.getGenerationStamp() >= namesystem.getGenerationStamp())) {
          // If this block does not belong to anyfile and its GS
          // is no less than the avatar node's GS,
          // AvatarNode may not consume the file/block creation edit log yet,
          // so adding it to the failed list.
          // - do not process any requestes for blocks with the same block id
          // (also add them to the failed list.
          // - do not block other requests
          blockRD.setNumBytes(BlockFlags.IGNORE);
          receivedAndDeletedBlocks.setBlock(blockRD, currentBlock);
          LightWeightBitSet.set(failedMap, currentBlock);
          failedIds.add(blockRD.getBlockId());
        }
      }
    } finally {
      namesystem.writeUnlock();
      if (failedMap != null && LightWeightBitSet.cardinality(failedMap) != 0) {
        LOG.info("*BLOCK* NameNode.blockReceivedAndDeleted: "
          + "from " + nodeReg.getName() + " has to retry "
          + LightWeightBitSet.cardinality(failedMap) + " blocks.");
      }
      receivedAndDeletedBlocks.resetIterator();
      for (int currentBlock = 0; currentBlock < noAck; currentBlock++) {
        receivedAndDeletedBlocks.getNext(blockRD);
        if (!LightWeightBitSet.get(failedMap, currentBlock))
          continue;
        LOG.info("blockReceivedDeleted " + (DFSUtil.isDeleted(blockRD) ? "DELETED" : "RECEIVED")
            + " request received for "
            + blockRD + " on " + nodeReg.getName() + " size "
            + blockRD.getNumBytes()
            + " But it does not belong to any file." + " Retry later.");
      }
    }
  }
  super.blockReceivedAndDeleted(nodeReg, receivedAndDeletedBlocks);
  return failedMap;
}
 
Example 18
Source File: FSDataset.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Try to update an old block to a new block.
 * If there are ongoing create threads running for the old block,
 * the threads will be returned without updating the block.
 *
 * @return ongoing create threads if there is any. Otherwise, return null.
 */
private List<Thread> tryUpdateBlock(int namespaceId, 
    Block oldblock, Block newblock) throws IOException {
  lock.writeLock().lock();
  try {
    //check ongoing create threads
    ArrayList<Thread> activeThreads = getActiveThreads(namespaceId, oldblock);
    if (activeThreads != null) {
      return activeThreads;
    }

    if (volumeMap.get(namespaceId, oldblock) == null) {
      throw new IOException("Block " + oldblock
          + " doesn't exist or has been recovered to a new generation ");
    }

    //No ongoing create threads is alive.  Update block.
    File blockFile = findBlockFile(namespaceId, oldblock.getBlockId());
    if (blockFile == null) {
      throw new IOException("Block " + oldblock + " does not exist.");
    }

    File oldMetaFile = findMetaFile(blockFile);
    long oldgs = parseGenerationStamp(blockFile, oldMetaFile);
    
  // First validate the update

    //update generation stamp
    if (oldgs > newblock.getGenerationStamp()) {
      throw new IOException("Cannot update block (id=" + newblock.getBlockId()
          + ") generation stamp from " + oldgs
          + " to " + newblock.getGenerationStamp());
    }
    
    //update length
    if (newblock.getNumBytes() > oldblock.getNumBytes()) {
      throw new IOException("Cannot update block file (=" + blockFile
          + ") length from " + oldblock.getNumBytes() + " to " + newblock.getNumBytes());
    }

    // Although we've waited for the active threads all dead before updating
    // the map so there should be no data race there, we still create new
    // ActiveFile object to make sure in case another thread holds it,
    // it won't cause any problem for us.
    //
    try {
      volumeMap.copyOngoingCreates(namespaceId, oldblock);
    } catch (CloneNotSupportedException e) {
      // It should never happen.
      throw new IOException("Cannot clone ActiveFile object", e);
    }

    // Now perform the update

    // rename meta file to a tmp file
    File tmpMetaFile = new File(oldMetaFile.getParent(),
        oldMetaFile.getName() + "_tmp" + newblock.getGenerationStamp());
    if (!oldMetaFile.renameTo(tmpMetaFile)) {
      throw new IOException("Cannot rename block meta file to " + tmpMetaFile);
    }

    long oldFileLength = blockFile.length();
    if (newblock.getNumBytes() < oldFileLength) {
      truncateBlock(blockFile, tmpMetaFile, oldFileLength,
          newblock.getNumBytes());
    ActiveFile file = volumeMap.getOngoingCreates(namespaceId, oldblock);
    if (file != null) {
      file.setBytesAcked(newblock.getNumBytes());
      file.setBytesOnDisk(newblock.getNumBytes());
    } else {
      // This should never happen unless called from unit tests.
      this.getDatanodeBlockInfo(namespaceId, oldblock).syncInMemorySize();
    }
    }

    //rename the tmp file to the new meta file (with new generation stamp)
    File newMetaFile = getMetaFile(blockFile, newblock);
    if (!tmpMetaFile.renameTo(newMetaFile)) {
      throw new IOException("Cannot rename tmp meta file to " + newMetaFile);
    }

    if(volumeMap.getOngoingCreates(namespaceId, oldblock) != null){
      ActiveFile af = volumeMap.removeOngoingCreates(namespaceId, oldblock);
      volumeMap.addOngoingCreates(namespaceId, newblock, af);
    }
    volumeMap.update(namespaceId, oldblock, newblock);

    // paranoia! verify that the contents of the stored block 
    // matches the block file on disk.
    validateBlockMetadata(namespaceId, newblock);
    return null;
  } finally {
    lock.writeLock().unlock();
  }
}
 
Example 19
Source File: ReplicaInPipeline.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 * @param writer a thread that is writing to this replica
 */
ReplicaInPipeline(Block block, 
    FsVolumeSpi vol, File dir, Thread writer) {
  this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
      vol, dir, writer, 0L);
}
 
Example 20
Source File: ReplicaInfo.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Constructor
 * @param block a block
 * @param vol volume where replica is located
 * @param dir directory path where block and meta files are located
 */
ReplicaInfo(Block block, FsVolumeSpi vol, File dir) {
  this(block.getBlockId(), block.getNumBytes(), 
      block.getGenerationStamp(), vol, dir);
}