Java Code Examples for org.apache.hadoop.hdfs.protocol.ExtendedBlock#getBlockId()

The following examples show how to use org.apache.hadoop.hdfs.protocol.ExtendedBlock#getBlockId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBalancerWithMultipleNameNodes.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
Example 2
Source File: TestBalancerWithMultipleNameNodes.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
Example 3
Source File: BlockTokenSecretManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Generate a block token for a specified user */
public Token<BlockTokenIdentifier> generateToken(String userId,
    ExtendedBlock block, EnumSet<AccessMode> modes) throws IOException {
  BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block
      .getBlockPoolId(), block.getBlockId(), modes);
  return new Token<BlockTokenIdentifier>(id, this);
}
 
Example 4
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 5 votes vote down vote up
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
    + ", self=" + peer.getLocalAddressString()
    + ", remote=" + peer.getRemoteAddressString()
    + ", for file " + file
    + ", for pool " + block.getBlockPoolId()
    + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
 
Example 5
Source File: BlockTokenSecretManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check if access should be allowed. userID is not checked if null. This
 * method doesn't check if token password is correct. It should be used only
 * when token password has already been verified (e.g., in the RPC layer).
 */
public void checkAccess(BlockTokenIdentifier id, String userId,
    ExtendedBlock block, AccessMode mode) throws InvalidToken {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Checking access for user=" + userId + ", block=" + block
        + ", access mode=" + mode + " using " + id.toString());
  }
  if (userId != null && !userId.equals(id.getUserId())) {
    throw new InvalidToken("Block token with " + id.toString()
        + " doesn't belong to user " + userId);
  }
  if (!id.getBlockPoolId().equals(block.getBlockPoolId())) {
    throw new InvalidToken("Block token with " + id.toString()
        + " doesn't apply to block " + block);
  }
  if (id.getBlockId() != block.getBlockId()) {
    throw new InvalidToken("Block token with " + id.toString()
        + " doesn't apply to block " + block);
  }
  if (isExpired(id.getExpiryDate())) {
    throw new InvalidToken("Block token with " + id.toString()
        + " is expired.");
  }
  if (!id.getAccessModes().contains(mode)) {
    throw new InvalidToken("Block token with " + id.toString()
        + " doesn't have " + mode + " permission");
  }
}
 
Example 6
Source File: BlockTokenSecretManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Generate a block token for a specified user */
public Token<BlockTokenIdentifier> generateToken(String userId,
    ExtendedBlock block, EnumSet<AccessMode> modes) throws IOException {
  BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block
      .getBlockPoolId(), block.getBlockId(), modes);
  return new Token<BlockTokenIdentifier>(id, this);
}
 
Example 7
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
    + ", self=" + peer.getLocalAddressString()
    + ", remote=" + peer.getRemoteAddressString()
    + ", for file " + file
    + ", for pool " + block.getBlockPoolId()
    + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
 
Example 8
Source File: TestInterDatanodeProtocol.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The following test first creates a file.
 * It verifies the block information from a datanode.
 * Then, it updates the block with new information and verifies again.
 * @param useDnHostname whether DNs should connect to other DNs by hostname
 */
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
  MiniDFSCluster cluster = null;

  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
  if (useDnHostname) {
    // Since the mini cluster only listens on the loopback we have to
    // ensure the hostname used to access DNs maps to the loopback. We
    // do this by telling the DN to advertise localhost as its hostname
    // instead of the default hostname.
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
  }

  try {
    cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .checkDataNodeHostConfig(true)
      .build();
    cluster.waitActive();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
    assertTrue(dfs.exists(filepath));

    //get block info
    LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    assertTrue(datanodeinfo.length > 0);

    //connect to a data node
    DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(
        datanode, datanodeinfo[0], conf, useDnHostname);
    
    // Stop the block scanners.
    datanode.getBlockScanner().removeAllVolumeScanners();

    //verify BlockMetaDataInfo
    ExtendedBlock b = locatedblock.getBlock();
    InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
    checkMetaInfo(b, datanode);
    long recoveryId = b.getGenerationStamp() + 1;
    idp.initReplicaRecovery(
        new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));

    //verify updateBlock
    ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
        b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
    idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(),
        newblock.getNumBytes());
    checkMetaInfo(newblock, datanode);
    
    // Verify correct null response trying to init recovery for a missing block
    ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
        b.getBlockId(), 0, 0);
    assertNull(idp.initReplicaRecovery(
        new RecoveringBlock(badBlock,
            locatedblock.getLocations(), recoveryId)));
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 9
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
    StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
    throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
      b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
    " already exists in state " + replicaInfo.getState() +
    " and thus cannot be created.");
  }
  // create a new block
  FsVolumeReference ref;
  while (true) {
    try {
      if (allowLazyPersist) {
        // First try to place the block on a transient volume.
        ref = volumes.getNextTransientVolume(b.getNumBytes());
        datanode.getMetrics().incrRamDiskBlocksWrite();
      } else {
        ref = volumes.getNextVolume(storageType, b.getNumBytes());
      }
    } catch (DiskOutOfSpaceException de) {
      if (allowLazyPersist) {
        datanode.getMetrics().incrRamDiskBlocksWriteFallback();
        allowLazyPersist = false;
        continue;
      }
      throw de;
    }
    break;
  }
  FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
  // create an rbw file to hold block in the designated volume
  File f;
  try {
    f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }

  ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  return new ReplicaHandler(newReplicaInfo, ref);
}
 
Example 10
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline convertTemporaryToRbw(
    final ExtendedBlock b) throws IOException {
  final long blockId = b.getBlockId();
  final long expectedGs = b.getGenerationStamp();
  final long visible = b.getNumBytes();
  LOG.info("Convert " + b + " from Temporary to RBW, visible length="
      + visible);

  final ReplicaInPipeline temp;
  {
    // get replica
    final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
    if (r == null) {
      throw new ReplicaNotFoundException(
          ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
    }
    // check the replica's state
    if (r.getState() != ReplicaState.TEMPORARY) {
      throw new ReplicaAlreadyExistsException(
          "r.getState() != ReplicaState.TEMPORARY, r=" + r);
    }
    temp = (ReplicaInPipeline)r;
  }
  // check generation stamp
  if (temp.getGenerationStamp() != expectedGs) {
    throw new ReplicaAlreadyExistsException(
        "temp.getGenerationStamp() != expectedGs = " + expectedGs
        + ", temp=" + temp);
  }

  // TODO: check writer?
  // set writer to the current thread
  // temp.setWriter(Thread.currentThread());

  // check length
  final long numBytes = temp.getNumBytes();
  if (numBytes < visible) {
    throw new IOException(numBytes + " = numBytes < visible = "
        + visible + ", temp=" + temp);
  }
  // check volume
  final FsVolumeImpl v = (FsVolumeImpl)temp.getVolume();
  if (v == null) {
    throw new IOException("r.getVolume() = null, temp="  + temp);
  }
  
  // move block files to the rbw directory
  BlockPoolSlice bpslice = v.getBlockPoolSlice(b.getBlockPoolId());
  final File dest = moveBlockFiles(b.getLocalBlock(), temp.getBlockFile(), 
      bpslice.getRbwDir());
  // create RBW
  final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
      blockId, numBytes, expectedGs,
      v, dest.getParentFile(), Thread.currentThread(), 0);
  rbw.setBytesAcked(visible);
  // overwrite the RBW in the volume map
  volumeMap.add(b.getBlockPoolId(), rbw);
  return rbw;
}
 
Example 11
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public ReplicaHandler createTemporary(
    StorageType storageType, ExtendedBlock b) throws IOException {
  long startTimeMs = Time.monotonicNow();
  long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
  ReplicaInfo lastFoundReplicaInfo = null;
  do {
    synchronized (this) {
      ReplicaInfo currentReplicaInfo =
          volumeMap.get(b.getBlockPoolId(), b.getBlockId());
      if (currentReplicaInfo == lastFoundReplicaInfo) {
        if (lastFoundReplicaInfo != null) {
          invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
        }
        FsVolumeReference ref =
            volumes.getNextVolume(storageType, b.getNumBytes());
        FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
        // create a temporary file to hold block in the designated volume
        File f;
        try {
          f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
        } catch (IOException e) {
          IOUtils.cleanup(null, ref);
          throw e;
        }
        ReplicaInPipeline newReplicaInfo =
            new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
                f.getParentFile(), 0);
        volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
        return new ReplicaHandler(newReplicaInfo, ref);
      } else {
        if (!(currentReplicaInfo.getGenerationStamp() < b
            .getGenerationStamp() && currentReplicaInfo instanceof ReplicaInPipeline)) {
          throw new ReplicaAlreadyExistsException("Block " + b
              + " already exists in state " + currentReplicaInfo.getState()
              + " and thus cannot be created.");
        }
        lastFoundReplicaInfo = currentReplicaInfo;
      }
    }

    // Hang too long, just bail out. This is not supposed to happen.
    long writerStopMs = Time.monotonicNow() - startTimeMs;
    if (writerStopMs > writerStopTimeoutMs) {
      LOG.warn("Unable to stop existing writer for block " + b + " after " 
          + writerStopMs + " miniseconds.");
      throw new IOException("Unable to stop existing writer for block " + b
          + " after " + writerStopMs + " miniseconds.");
    }

    // Stop the previous writer
    ((ReplicaInPipeline) lastFoundReplicaInfo)
        .stopWriter(writerStopTimeoutMs);
  } while (true);
}
 
Example 12
Source File: BlockReaderLocalLegacy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private BlockReaderLocalLegacy(DFSClient.Conf conf, String hdfsfile,
    ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
    long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
    boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
    FileInputStream checksumIn) throws IOException {
  this.filename = hdfsfile;
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max(startOffset, 0);
  this.blockId = block.getBlockId();

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();

  this.dataIn = dataIn;
  this.checksumIn = checksumIn;
  this.offsetFromChunkBoundary = (int) (startOffset-firstChunkOffset);

  int chunksPerChecksumRead = getSlowReadBufferNumChunks(
      conf.shortCircuitBufferSize, bytesPerChecksum);
  slowReadBuff = bufferPool.getBuffer(bytesPerChecksum * chunksPerChecksumRead);
  checksumBuff = bufferPool.getBuffer(checksumSize * chunksPerChecksumRead);
  // Initially the buffers have nothing to read.
  slowReadBuff.flip();
  checksumBuff.flip();
  boolean success = false;
  try {
    // Skip both input streams to beginning of the chunk containing startOffset
    IOUtils.skipFully(dataIn, firstChunkOffset);
    if (checksumIn != null) {
      long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
      IOUtils.skipFully(checksumIn, checkSumOffset);
    }
    success = true;
  } finally {
    if (!success) {
      bufferPool.returnBuffer(slowReadBuff);
      bufferPool.returnBuffer(checksumBuff);
    }
  }
}
 
Example 13
Source File: TestInterDatanodeProtocol.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The following test first creates a file.
 * It verifies the block information from a datanode.
 * Then, it updates the block with new information and verifies again.
 * @param useDnHostname whether DNs should connect to other DNs by hostname
 */
private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
  MiniDFSCluster cluster = null;

  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
  if (useDnHostname) {
    // Since the mini cluster only listens on the loopback we have to
    // ensure the hostname used to access DNs maps to the loopback. We
    // do this by telling the DN to advertise localhost as its hostname
    // instead of the default hostname.
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
  }

  try {
    cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .checkDataNodeHostConfig(true)
      .build();
    cluster.waitActive();

    //create a file
    DistributedFileSystem dfs = cluster.getFileSystem();
    String filestr = "/foo";
    Path filepath = new Path(filestr);
    DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
    assertTrue(dfs.exists(filepath));

    //get block info
    LocatedBlock locatedblock = getLastLocatedBlock(
        DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
    DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
    assertTrue(datanodeinfo.length > 0);

    //connect to a data node
    DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
    InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(
        datanode, datanodeinfo[0], conf, useDnHostname);
    
    // Stop the block scanners.
    datanode.getBlockScanner().removeAllVolumeScanners();

    //verify BlockMetaDataInfo
    ExtendedBlock b = locatedblock.getBlock();
    InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
    checkMetaInfo(b, datanode);
    long recoveryId = b.getGenerationStamp() + 1;
    idp.initReplicaRecovery(
        new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));

    //verify updateBlock
    ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
        b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
    idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(),
        newblock.getNumBytes());
    checkMetaInfo(newblock, datanode);
    
    // Verify correct null response trying to init recovery for a missing block
    ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
        b.getBlockId(), 0, 0);
    assertNull(idp.initReplicaRecovery(
        new RecoveringBlock(badBlock,
            locatedblock.getLocations(), recoveryId)));
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 14
Source File: RemoteBlockReader.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
                                   ExtendedBlock block, 
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, Peer peer,
                                   DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy)
                                     throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));
  
  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache);
}
 
Example 15
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
                                   ExtendedBlock block,
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   boolean verifyChecksum,
                                   String clientName,
                                   Peer peer, DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
        peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
      checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
      datanodeID, peerCache);
}
 
Example 16
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
                                   ExtendedBlock block,
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   boolean verifyChecksum,
                                   String clientName,
                                   Peer peer, DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
        peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
      checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
      datanodeID, peerCache);
}
 
Example 17
Source File: RemoteBlockReader.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
                                   ExtendedBlock block, 
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, Peer peer,
                                   DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy)
                                     throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));
  
  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache);
}
 
Example 18
Source File: BlockReaderLocalLegacy.java    From big-c with Apache License 2.0 4 votes vote down vote up
private BlockReaderLocalLegacy(DFSClient.Conf conf, String hdfsfile,
    ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
    long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
    boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
    FileInputStream checksumIn) throws IOException {
  this.filename = hdfsfile;
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max(startOffset, 0);
  this.blockId = block.getBlockId();

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();

  this.dataIn = dataIn;
  this.checksumIn = checksumIn;
  this.offsetFromChunkBoundary = (int) (startOffset-firstChunkOffset);

  int chunksPerChecksumRead = getSlowReadBufferNumChunks(
      conf.shortCircuitBufferSize, bytesPerChecksum);
  slowReadBuff = bufferPool.getBuffer(bytesPerChecksum * chunksPerChecksumRead);
  checksumBuff = bufferPool.getBuffer(checksumSize * chunksPerChecksumRead);
  // Initially the buffers have nothing to read.
  slowReadBuff.flip();
  checksumBuff.flip();
  boolean success = false;
  try {
    // Skip both input streams to beginning of the chunk containing startOffset
    IOUtils.skipFully(dataIn, firstChunkOffset);
    if (checksumIn != null) {
      long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
      IOUtils.skipFully(checksumIn, checkSumOffset);
    }
    success = true;
  } finally {
    if (!success) {
      bufferPool.returnBuffer(slowReadBuff);
      bufferPool.returnBuffer(checksumBuff);
    }
  }
}
 
Example 19
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public ReplicaHandler createTemporary(
    StorageType storageType, ExtendedBlock b) throws IOException {
  long startTimeMs = Time.monotonicNow();
  long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
  ReplicaInfo lastFoundReplicaInfo = null;
  do {
    synchronized (this) {
      ReplicaInfo currentReplicaInfo =
          volumeMap.get(b.getBlockPoolId(), b.getBlockId());
      if (currentReplicaInfo == lastFoundReplicaInfo) {
        if (lastFoundReplicaInfo != null) {
          invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
        }
        FsVolumeReference ref =
            volumes.getNextVolume(storageType, b.getNumBytes());
        FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
        // create a temporary file to hold block in the designated volume
        File f;
        try {
          f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
        } catch (IOException e) {
          IOUtils.cleanup(null, ref);
          throw e;
        }
        ReplicaInPipeline newReplicaInfo =
            new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
                f.getParentFile(), 0);
        volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
        return new ReplicaHandler(newReplicaInfo, ref);
      } else {
        if (!(currentReplicaInfo.getGenerationStamp() < b
            .getGenerationStamp() && currentReplicaInfo instanceof ReplicaInPipeline)) {
          throw new ReplicaAlreadyExistsException("Block " + b
              + " already exists in state " + currentReplicaInfo.getState()
              + " and thus cannot be created.");
        }
        lastFoundReplicaInfo = currentReplicaInfo;
      }
    }

    // Hang too long, just bail out. This is not supposed to happen.
    long writerStopMs = Time.monotonicNow() - startTimeMs;
    if (writerStopMs > writerStopTimeoutMs) {
      LOG.warn("Unable to stop existing writer for block " + b + " after " 
          + writerStopMs + " miniseconds.");
      throw new IOException("Unable to stop existing writer for block " + b
          + " after " + writerStopMs + " miniseconds.");
    }

    // Stop the previous writer
    ((ReplicaInPipeline) lastFoundReplicaInfo)
        .stopWriter(writerStopTimeoutMs);
  } while (true);
}
 
Example 20
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
    StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
    throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
      b.getBlockId());
  if (replicaInfo != null) {
    throw new ReplicaAlreadyExistsException("Block " + b +
    " already exists in state " + replicaInfo.getState() +
    " and thus cannot be created.");
  }
  // create a new block
  FsVolumeReference ref;
  while (true) {
    try {
      if (allowLazyPersist) {
        // First try to place the block on a transient volume.
        ref = volumes.getNextTransientVolume(b.getNumBytes());
        datanode.getMetrics().incrRamDiskBlocksWrite();
      } else {
        ref = volumes.getNextVolume(storageType, b.getNumBytes());
      }
    } catch (DiskOutOfSpaceException de) {
      if (allowLazyPersist) {
        datanode.getMetrics().incrRamDiskBlocksWriteFallback();
        allowLazyPersist = false;
        continue;
      }
      throw de;
    }
    break;
  }
  FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
  // create an rbw file to hold block in the designated volume
  File f;
  try {
    f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }

  ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), 
      b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
  return new ReplicaHandler(newReplicaInfo, ref);
}