org.apache.hadoop.hdfs.server.datanode.ReplicaInfo Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.ReplicaInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo,
                                 RamDiskReplica replicaState) throws IOException {
  File metaFile = replicaState.getSavedMetaFile();
  File blockFile = replicaState.getSavedBlockFile();
  final long blockId = replicaInfo.getBlockId();
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);

  ReplicaInfo newReplicaInfo =
      new ReplicaBuilder(ReplicaState.FINALIZED)
          .setBlockId(blockId)
          .setLength(replicaInfo.getBytesOnDisk())
          .setGenerationStamp(replicaInfo.getGenerationStamp())
          .setFsVolume(replicaState.getLazyPersistVolume())
          .setDirectoryToUse(targetBlockFile.getParentFile())
          .build();
  return newReplicaInfo;
}
 
Example #2
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * This should be primarily used for testing.
 * @return clone of replica store in datanode memory
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
  ReplicaInfo r = volumeMap.get(bpid, blockId);
  if(r == null)
    return null;
  switch(r.getState()) {
  case FINALIZED:
    return new FinalizedReplica((FinalizedReplica)r);
  case RBW:
    return new ReplicaBeingWritten((ReplicaBeingWritten)r);
  case RWR:
    return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
  case RUR:
    return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
  case TEMPORARY:
    return new ReplicaInPipeline((ReplicaInPipeline)r);
  }
  return null;
}
 
Example #3
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found 
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 * @throws FileNotFoundException             If the block file is not found or there
 *                                              was an error locating it.
 * @throws EOFException                      If the replica length is too short.
 * 
 * @throws IOException                       May be thrown from the methods called. 
 */
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException,
    FileNotFoundException, EOFException, IOException {
  final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if (replicaInfo.getState() != state) {
    throw new UnexpectedReplicaStateException(b,state);
  }
  if (!replicaInfo.getBlockFile().exists()) {
    throw new FileNotFoundException(replicaInfo.getBlockFile().getPath());
  }
  long onDiskLength = getLength(b);
  if (onDiskLength < minLength) {
    throw new EOFException(b + "'s on-disk length " + onDiskLength
        + " is shorter than minLength " + minLength);
  }
}
 
Example #4
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Check the files of a replica. */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
  //check replica's file
  final File f = r.getBlockFile();
  if (!f.exists()) {
    throw new FileNotFoundException("File " + f + " not found, r=" + r);
  }
  if (r.getBytesOnDisk() != f.length()) {
    throw new IOException("File length mismatched.  The length of "
        + f + " is " + f.length() + " but r=" + r);
  }

  //check replica's meta file
  final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
  if (!metafile.exists()) {
    throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
  }
  if (metafile.length() == 0) {
    throw new IOException("Metafile " + metafile + " is empty, r=" + r);
  }
}
 
Example #5
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Bump a replica's generation stamp to a new one.
 * Its on-disk meta file name is renamed to be the new one too.
 * 
 * @param replicaInfo a replica
 * @param newGS new generation stamp
 * @throws IOException if rename fails
 */
private void bumpReplicaGS(ReplicaInfo replicaInfo, 
    long newGS) throws IOException { 
  long oldGS = replicaInfo.getGenerationStamp();
  File oldmeta = replicaInfo.getMetaFile();
  replicaInfo.setGenerationStamp(newGS);
  File newmeta = replicaInfo.getMetaFile();

  // rename meta file to new GS
  if (LOG.isDebugEnabled()) {
    LOG.debug("Renaming " + oldmeta + " to " + newmeta);
  }
  try {
    NativeIO.renameTo(oldmeta, newmeta);
  } catch (IOException e) {
    replicaInfo.setGenerationStamp(oldGS); // restore old GS
    throw new IOException("Block " + replicaInfo + " reopen failed. " +
                          " Unable to move meta file  " + oldmeta +
                          " to " + newmeta, e);
  }
}
 
Example #6
Source File: TestDatanodeRestart.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
    boolean changeBlockFile, 
    boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
Example #7
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
    // remove from volumeMap
    volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
    
    // delete the on-disk temp file
    if (delBlockFromDisk(replicaInfo.getBlockFile(), 
        replicaInfo.getMetaFile(), b.getLocalBlock())) {
      LOG.warn("Block " + b + " unfinalized and removed. " );
    }
    if (replicaInfo.getVolume().isTransientStorage()) {
      ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
    }
  }
}
 
Example #8
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
    ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica;
  try {
    // change the replica's state/gs etc.
    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
      replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
                       newGS, b.getNumBytes());
    } else { //RBW
      bumpReplicaGS(replicaInfo, newGS);
      replica = (ReplicaBeingWritten) replicaInfo;
    }
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example #9
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
  // If a DFSClient has the replica in its cache of short-circuit file
  // descriptors (and the client is using ShortCircuitShm), invalidate it.
  // The short-circuit registry is null in the unit tests, because the
  // datanode is mock object.
  if (datanode.getShortCircuitRegistry() != null) {
    datanode.getShortCircuitRegistry().processBlockInvalidation(
        new ExtendedBlockId(block.getBlockId(), bpid));

    // If the block is cached, start uncaching it.
    cacheManager.uncacheBlock(bpid, block.getBlockId());
  }

  datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
      block.getStorageUuid());
}
 
Example #10
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Bump a replica's generation stamp to a new one.
 * Its on-disk meta file name is renamed to be the new one too.
 * 
 * @param replicaInfo a replica
 * @param newGS new generation stamp
 * @throws IOException if rename fails
 */
private void bumpReplicaGS(ReplicaInfo replicaInfo, 
    long newGS) throws IOException { 
  long oldGS = replicaInfo.getGenerationStamp();
  File oldmeta = replicaInfo.getMetaFile();
  replicaInfo.setGenerationStamp(newGS);
  File newmeta = replicaInfo.getMetaFile();

  // rename meta file to new GS
  if (LOG.isDebugEnabled()) {
    LOG.debug("Renaming " + oldmeta + " to " + newmeta);
  }
  try {
    NativeIO.renameTo(oldmeta, newmeta);
  } catch (IOException e) {
    replicaInfo.setGenerationStamp(oldGS); // restore old GS
    throw new IOException("Block " + replicaInfo + " reopen failed. " +
                          " Unable to move meta file  " + oldmeta +
                          " to " + newmeta, e);
  }
}
 
Example #11
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
    ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica;
  try {
    // change the replica's state/gs etc.
    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
      replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
                       newGS, b.getNumBytes());
    } else { //RBW
      bumpReplicaGS(replicaInfo, newGS);
      replica = (ReplicaBeingWritten) replicaInfo;
    }
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example #12
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Check the files of a replica. */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
  //check replica's file
  final File f = r.getBlockFile();
  if (!f.exists()) {
    throw new FileNotFoundException("File " + f + " not found, r=" + r);
  }
  if (r.getBytesOnDisk() != f.length()) {
    throw new IOException("File length mismatched.  The length of "
        + f + " is " + f.length() + " but r=" + r);
  }

  //check replica's meta file
  final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
  if (!metafile.exists()) {
    throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
  }
  if (metafile.length() == 0) {
    throw new IOException("Metafile " + metafile + " is empty, r=" + r);
  }
}
 
Example #13
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found 
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 * @throws FileNotFoundException             If the block file is not found or there
 *                                              was an error locating it.
 * @throws EOFException                      If the replica length is too short.
 * 
 * @throws IOException                       May be thrown from the methods called. 
 */
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException,
    FileNotFoundException, EOFException, IOException {
  final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if (replicaInfo.getState() != state) {
    throw new UnexpectedReplicaStateException(b,state);
  }
  if (!replicaInfo.getBlockFile().exists()) {
    throw new FileNotFoundException(replicaInfo.getBlockFile().getPath());
  }
  long onDiskLength = getLength(b);
  if (onDiskLength < minLength) {
    throw new EOFException(b + "'s on-disk length " + onDiskLength
        + " is shorter than minLength " + minLength);
  }
}
 
Example #14
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Remove the replica's meta information from the map that matches
 * the input block's id and generation stamp
 * @param bpid block pool id
 * @param block block with its id as the key
 * @return the removed replica's meta information
 * @throws IllegalArgumentException if the input block is null
 */
ReplicaInfo remove(String bpid, Block block) {
  checkBlockPool(bpid);
  checkBlock(block);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    if (m != null) {
      Long key = Long.valueOf(block.getBlockId());
      ReplicaInfo replicaInfo = m.get(key);
      if (replicaInfo != null &&
          block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
        return m.remove(key);
      } 
    }
  }
  
  return null;
}
 
Example #15
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void deleteReplica(final ReplicaInfo replicaToDelete) {
  // Delete the files on disk. Failure here is okay.
  final File blockFile = replicaToDelete.getBlockFile();
  if (!blockFile.delete()) {
    LOG.warn("Failed to delete block file " + blockFile);
  }
  final File metaFile = replicaToDelete.getMetaFile();
  if (!metaFile.delete()) {
    LOG.warn("Failed to delete meta file " + metaFile);
  }
}
 
Example #16
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override // FsDatasetSpi
public synchronized String recoverClose(ExtendedBlock b, long newGS,
    long expectedBlockLen) throws IOException {
  LOG.info("Recover failed close " + b);
  // check replica's state
  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
  // bump the replica's GS
  bumpReplicaGS(replicaInfo, newGS);
  // finalize the replica if RBW
  if (replicaInfo.getState() == ReplicaState.RBW) {
    finalizeReplica(b.getBlockPoolId(), replicaInfo);
  }
  return replicaInfo.getStorageUuid();
}
 
Example #17
Source File: ReplicaMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Remove the replica's meta information from the map if present
 * @param bpid block pool id
 * @param blockId block id of the replica to be removed
 * @return the removed replica's meta information
 */
ReplicaInfo remove(String bpid, long blockId) {
  checkBlockPool(bpid);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    if (m != null) {
      return m.remove(blockId);
    }
  }
  return null;
}
 
Example #18
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Complete the block write!
 */
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
  if (Thread.interrupted()) {
    // Don't allow data modifications from interrupted threads
    throw new IOException("Cannot finalize block from Interrupted Thread");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    // this is legal, when recovery happens on a file that has
    // been opened for append but never modified
    return;
  }
  finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
 
Example #19
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Complete the block write!
 */
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
  if (Thread.interrupted()) {
    // Don't allow data modifications from interrupted threads
    throw new IOException("Cannot finalize block from Interrupted Thread");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    // this is legal, when recovery happens on a file that has
    // been opened for append but never modified
    return;
  }
  finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
 
Example #20
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void removeOldReplica(ReplicaInfo replicaInfo,
    ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
    long blockFileUsed, long metaFileUsed, final String bpid) {
  // Before deleting the files from old storage we must notify the
  // NN that the files are on the new storage. Else a blockReport from
  // the transient storage might cause the NN to think the blocks are lost.
  // Replicas must be evicted from client short-circuit caches, because the
  // storage will no longer be same, and thus will require validating
  // checksum.  This also stops a client from holding file descriptors,
  // which would prevent the OS from reclaiming the memory.
  ExtendedBlock extendedBlock =
      new ExtendedBlock(bpid, newReplicaInfo);
  datanode.getShortCircuitRegistry().processBlockInvalidation(
      ExtendedBlockId.fromExtendedBlock(extendedBlock));
  datanode.notifyNamenodeReceivedBlock(
      extendedBlock, null, newReplicaInfo.getStorageUuid());

  // Remove the old replicas
  if (blockFile.delete() || !blockFile.exists()) {
    ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, blockFileUsed);
    if (metaFile.delete() || !metaFile.exists()) {
      ((FsVolumeImpl) replicaInfo.getVolume()).decDfsUsed(bpid, metaFileUsed);
    }
  }

  // If deletion failed then the directory scanner will cleanup the blocks
  // eventually.
}
 
Example #21
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get the meta information of the replica that matches both block id 
 * and generation stamp
 * @param bpid block pool id
 * @param block block with its id as the key
 * @return the replica's meta information
 * @throws IllegalArgumentException if the input block or block pool is null
 */
ReplicaInfo get(String bpid, Block block) {
  checkBlockPool(bpid);
  checkBlock(block);
  ReplicaInfo replicaInfo = get(bpid, block.getBlockId());
  if (replicaInfo != null && 
      block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
    return replicaInfo;
  }
  return null;
}
 
Example #22
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get the meta information of the replica that matches the block id
 * @param bpid block pool id
 * @param blockId a block's id
 * @return the replica's meta information
 */
ReplicaInfo get(String bpid, long blockId) {
  checkBlockPool(bpid);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    return m != null ? m.get(blockId) : null;
  }
}
 
Example #23
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add a replica's meta information into the map 
 * 
 * @param bpid block pool id
 * @param replicaInfo a replica's meta information
 * @return previous meta information of the replica
 * @throws IllegalArgumentException if the input parameter is null
 */
ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) {
  checkBlockPool(bpid);
  checkBlock(replicaInfo);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    if (m == null) {
      // Add an entry for block pool if it does not exist already
      m = new HashMap<Long, ReplicaInfo>();
      map.put(bpid, m);
    }
    return  m.put(replicaInfo.getBlockId(), replicaInfo);
  }
}
 
Example #24
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Turn the block identifier into a filename
 * @param bpid Block pool Id
 * @param blockId a block's id
 * @return on disk data file path; null if the replica does not exist
 */
File getFile(final String bpid, final long blockId, boolean touch) {
  ReplicaInfo info = volumeMap.get(bpid, blockId);
  if (info != null) {
    if (touch && info.getVolume().isTransientStorage()) {
      ramDiskReplicaTracker.touch(bpid, blockId);
      datanode.getMetrics().incrRamDiskBlocksReadHits();
    }
    return info.getBlockFile();
  }
  return null;    
}
 
Example #25
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void initBlockPool(String bpid) {
  checkBlockPool(bpid);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    if (m == null) {
      // Add an entry for block pool if it does not exist already
      m = new HashMap<Long, ReplicaInfo>();
      map.put(bpid, m);
    }
  }
}
 
Example #26
Source File: TestWriteToReplica.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Generate testing environment and return a collection of blocks
 * on which to run the tests.
 * 
 * @param bpid Block pool ID to generate blocks for
 * @param dataSet Namespace in which to insert blocks
 * @return Contrived blocks for further testing.
 * @throws IOException
 */
private ExtendedBlock[] setup(String bpid, FsDatasetImpl dataSet) throws IOException {
  // setup replicas map
  
  ExtendedBlock[] blocks = new ExtendedBlock[] {
      new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002), 
      new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
      new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
  };
  
  ReplicaMap replicasMap = dataSet.volumeMap;
  FsVolumeImpl vol = (FsVolumeImpl) dataSet.volumes
      .getNextVolume(StorageType.DEFAULT, 0).getVolume();
  ReplicaInfo replicaInfo = new FinalizedReplica(
      blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
  replicasMap.add(bpid, replicaInfo);
  replicaInfo.getBlockFile().createNewFile();
  replicaInfo.getMetaFile().createNewFile();
  
  replicasMap.add(bpid, new ReplicaInPipeline(
      blocks[TEMPORARY].getBlockId(),
      blocks[TEMPORARY].getGenerationStamp(), vol,
      vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile(), 0));
  
  replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
      vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);
  replicasMap.add(bpid, replicaInfo);
  replicaInfo.getBlockFile().createNewFile();
  replicaInfo.getMetaFile().createNewFile();
  
  replicasMap.add(bpid, new ReplicaWaitingToBeRecovered(
      blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
          blocks[RWR].getLocalBlock()).getParentFile()));
  replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
      .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));    
  
  return blocks;
}
 
Example #27
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example #28
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the meta info of a block stored in volumeMap. To find a block,
 * block pool Id, block Id and generation stamp must match.
 * @param b extended block
 * @return the meta replica information
 * @throws ReplicaNotFoundException if no entry is in the map or 
 *                        there is a generation stamp mismatch
 */
ReplicaInfo getReplicaInfo(ExtendedBlock b)
    throws ReplicaNotFoundException {
  ReplicaInfo info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
  if (info == null) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
  }
  return info;
}
 
Example #29
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Removes a set of volumes from FsDataset.
 * @param volumesToRemove a set of absolute root path of each volume.
 * @param clearFailure set true to clear failure information.
 *
 * DataNode should call this function before calling
 * {@link DataStorage#removeVolumes(java.util.Collection)}.
 */
@Override
public synchronized void removeVolumes(
    Set<File> volumesToRemove, boolean clearFailure) {
  // Make sure that all volumes are absolute path.
  for (File vol : volumesToRemove) {
    Preconditions.checkArgument(vol.isAbsolute(),
        String.format("%s is not absolute path.", vol.getPath()));
  }
  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
    Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
    final File absRoot = sd.getRoot().getAbsoluteFile();
    if (volumesToRemove.contains(absRoot)) {
      LOG.info("Removing " + absRoot + " from FsDataset.");

      // Disable the volume from the service.
      asyncDiskService.removeVolume(sd.getCurrentDir());
      volumes.removeVolume(absRoot, clearFailure);

      // Removed all replica information for the blocks on the volume. Unlike
      // updating the volumeMap in addVolume(), this operation does not scan
      // disks.
      for (String bpid : volumeMap.getBlockPoolList()) {
        for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
             it.hasNext(); ) {
          ReplicaInfo block = it.next();
          final File absBasePath =
              new File(block.getVolume().getBasePath()).getAbsoluteFile();
          if (absBasePath.equals(absRoot)) {
            invalidate(bpid, block);
            it.remove();
          }
        }
      }

      storageMap.remove(sd.getStorageUuid());
    }
  }
  setupAsyncLazyPersistThreads();
}
 
Example #30
Source File: ReplicaMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
void initBlockPool(String bpid) {
  checkBlockPool(bpid);
  synchronized(mutex) {
    Map<Long, ReplicaInfo> m = map.get(bpid);
    if (m == null) {
      // Add an entry for block pool if it does not exist already
      m = new HashMap<Long, ReplicaInfo>();
      map.put(bpid, m);
    }
  }
}