org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
Example #2
Source File: DataTransferProtoUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
Example #3
Source File: DataTransferProtoUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
Example #4
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
Example #5
Source File: DFSTestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example #6
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example #7
Source File: Dispatcher.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
  BlockOpResponseProto response =
      BlockOpResponseProto.parseFrom(vintPrefixed(in));
  while (response.getStatus() == Status.IN_PROGRESS) {
    // read intermediate responses
    response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
  }
  String logInfo = "block move is failed";
  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
 
Example #8
Source File: DataXceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
  BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
    .setStatus(status);
  if (message != null) {
    response.setMessage(message);
  }
  response.build().writeDelimitedTo(out);
  out.flush();
}
 
Example #9
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 5 votes vote down vote up
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
    + ", self=" + peer.getLocalAddressString()
    + ", remote=" + peer.getRemoteAddressString()
    + ", for file " + file
    + ", for pool " + block.getBlockPoolId()
    + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
 
Example #10
Source File: DataXceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
    DataOutputStream out) throws IOException {

  ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
    .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
    .setChunkOffset(blockSender.getOffset())
    .build();
    
  BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
    .setStatus(SUCCESS)
    .setReadOpChecksumInfo(ckInfo)
    .build();
  response.writeDelimitedTo(out);
  out.flush();
}
 
Example #11
Source File: DataXceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void checkAccess(OutputStream out, final boolean reply, 
    final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> t,
    final Op op,
    final BlockTokenSecretManager.AccessMode mode) throws IOException {
  if (datanode.isBlockTokenEnabled) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Checking block access token for block '" + blk.getBlockId()
          + "' with mode '" + mode + "'");
    }
    try {
      datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
    } catch(InvalidToken e) {
      try {
        if (reply) {
          BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
            .setStatus(ERROR_ACCESS_TOKEN);
          if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
            DatanodeRegistration dnR = 
              datanode.getDNRegistrationForBP(blk.getBlockPoolId());
            // NB: Unconditionally using the xfer addr w/o hostname
            resp.setFirstBadLink(dnR.getXferAddr());
          }
          resp.build().writeDelimitedTo(out);
          out.flush();
        }
        LOG.warn("Block token verification failed: op=" + op
            + ", remoteAddress=" + remoteAddress
            + ", message=" + e.getLocalizedMessage());
        throw e;
      } finally {
        IOUtils.closeStream(out);
      }
    }
  }
}
 
Example #12
Source File: TestBlockReplacement.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private boolean replaceBlock(
    ExtendedBlock block,
    DatanodeInfo source,
    DatanodeInfo sourceProxy,
    DatanodeInfo destination,
    StorageType targetStorageType) throws IOException, SocketException {
  Socket sock = new Socket();
  try {
    sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
        HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, targetStorageType,
        BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
        sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());

    BlockOpResponseProto proto =
        BlockOpResponseProto.parseDelimitedFrom(reply);
    while (proto.getStatus() == Status.IN_PROGRESS) {
      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
    }
    return proto.getStatus() == Status.SUCCESS;
  } finally {
    sock.close();
  }
}
 
Example #13
Source File: TestDataTransferProtocol.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void sendResponse(Status status, String firstBadLink,
    String message,
    DataOutputStream out)
throws IOException {
  Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
  if (firstBadLink != null) {
    builder.setFirstBadLink(firstBadLink);
  }
  if (message != null) {
    builder.setMessage(message);
  }
  builder.build()
    .writeDelimitedTo(out);
}
 
Example #14
Source File: DataXceiver.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void checkAccess(OutputStream out, final boolean reply, 
    final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> t,
    final Op op,
    final BlockTokenSecretManager.AccessMode mode) throws IOException {
  if (datanode.isBlockTokenEnabled) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Checking block access token for block '" + blk.getBlockId()
          + "' with mode '" + mode + "'");
    }
    try {
      datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
    } catch(InvalidToken e) {
      try {
        if (reply) {
          BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
            .setStatus(ERROR_ACCESS_TOKEN);
          if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
            DatanodeRegistration dnR = 
              datanode.getDNRegistrationForBP(blk.getBlockPoolId());
            // NB: Unconditionally using the xfer addr w/o hostname
            resp.setFirstBadLink(dnR.getXferAddr());
          }
          resp.build().writeDelimitedTo(out);
          out.flush();
        }
        LOG.warn("Block token verification failed: op=" + op
            + ", remoteAddress=" + remoteAddress
            + ", message=" + e.getLocalizedMessage());
        throw e;
      } finally {
        IOUtils.closeStream(out);
      }
    }
  }
}
 
Example #15
Source File: DataXceiver.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
    DataOutputStream out) throws IOException {

  ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
    .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
    .setChunkOffset(blockSender.getOffset())
    .build();
    
  BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
    .setStatus(SUCCESS)
    .setReadOpChecksumInfo(ckInfo)
    .build();
  response.writeDelimitedTo(out);
  out.flush();
}
 
Example #16
Source File: DataXceiver.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
  BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
    .setStatus(status);
  if (message != null) {
    response.setMessage(message);
  }
  response.build().writeDelimitedTo(out);
  out.flush();
}
 
Example #17
Source File: TestDataTransferProtocol.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void sendResponse(Status status, String firstBadLink,
    String message,
    DataOutputStream out)
throws IOException {
  Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
  if (firstBadLink != null) {
    builder.setFirstBadLink(firstBadLink);
  }
  if (message != null) {
    builder.setMessage(message);
  }
  builder.build()
    .writeDelimitedTo(out);
}
 
Example #18
Source File: Dispatcher.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
  BlockOpResponseProto response =
      BlockOpResponseProto.parseFrom(vintPrefixed(in));
  while (response.getStatus() == Status.IN_PROGRESS) {
    // read intermediate responses
    response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
  }
  String logInfo = "block move is failed";
  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
 
Example #19
Source File: TestBlockReplacement.java    From big-c with Apache License 2.0 5 votes vote down vote up
private boolean replaceBlock(
    ExtendedBlock block,
    DatanodeInfo source,
    DatanodeInfo sourceProxy,
    DatanodeInfo destination,
    StorageType targetStorageType) throws IOException, SocketException {
  Socket sock = new Socket();
  try {
    sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
        HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, targetStorageType,
        BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
        sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());

    BlockOpResponseProto proto =
        BlockOpResponseProto.parseDelimitedFrom(reply);
    while (proto.getStatus() == Status.IN_PROGRESS) {
      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
    }
    return proto.getStatus() == Status.SUCCESS;
  } finally {
    sock.close();
  }
}
 
Example #20
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
    + ", self=" + peer.getLocalAddressString()
    + ", remote=" + peer.getRemoteAddressString()
    + ", for file " + file
    + ", for pool " + block.getBlockPoolId()
    + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
 
Example #21
Source File: TestTransferRbw.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testTransferRbw() throws Exception {
  final HdfsConfiguration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem fs = cluster.getFileSystem();

    //create a file, write some data and leave it open. 
    final Path p = new Path("/foo");
    final int size = (1 << 16) + RAN.nextInt(1 << 16);
    LOG.info("size = " + size);
    final FSDataOutputStream out = fs.create(p, REPLICATION);
    final byte[] bytes = new byte[1024];
    for(int remaining = size; remaining > 0; ) {
      RAN.nextBytes(bytes);
      final int len = bytes.length < remaining? bytes.length: remaining;
      out.write(bytes, 0, len);
      out.hflush();
      remaining -= len;
    }

    //get the RBW
    final ReplicaBeingWritten oldrbw;
    final DataNode newnode;
    final DatanodeInfo newnodeinfo;
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    {
      final DataNode oldnode = cluster.getDataNodes().get(0);
      oldrbw = getRbw(oldnode, bpid);
      LOG.info("oldrbw = " + oldrbw);
      
      //add a datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      newnode = cluster.getDataNodes().get(REPLICATION);
      
      final DatanodeInfo oldnodeinfo;
      {
        final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc(
            ).getDatanodeReport(DatanodeReportType.LIVE);
        Assert.assertEquals(2, datatnodeinfos.length);
        int i = 0;
        for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
            i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
        Assert.assertTrue(i < datatnodeinfos.length);
        newnodeinfo = datatnodeinfos[i];
        oldnodeinfo = datatnodeinfos[1 - i];
      }
      
      //transfer RBW
      final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
          oldrbw.getGenerationStamp());
      final BlockOpResponseProto s = DFSTestUtil.transferRbw(
          b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
      Assert.assertEquals(Status.SUCCESS, s.getStatus());
    }

    //check new rbw
    final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
    LOG.info("newrbw = " + newrbw);
    Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
    Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
    Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());

    LOG.info("DONE");
  } finally {
    cluster.shutdown();
  }
}
 
Example #22
Source File: RemoteBlockReader.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
                                   ExtendedBlock block, 
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, Peer peer,
                                   DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy)
                                     throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));
  
  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache);
}
 
Example #23
Source File: DataXceiver.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);
  
  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
 
Example #24
Source File: BlockReaderFactory.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Request file descriptors from a DomainPeer.
 *
 * @param peer   The peer to use for communication.
 * @param slot   If non-null, the shared memory slot to associate with the 
 *               new ShortCircuitReplica.
 * 
 * @return  A ShortCircuitReplica object if we could communicate with the
 *          datanode; null, otherwise. 
 * @throws  IOException If we encountered an I/O exception while communicating
 *          with the datanode.
 */
private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer,
        Slot slot) throws IOException {
  ShortCircuitCache cache = clientContext.getShortCircuitCache();
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  SlotId slotId = slot == null ? null : slot.getSlotId();
  new Sender(out).requestShortCircuitFds(block, token, slotId, 1,
      failureInjector.getSupportsReceiptVerification());
  DataInputStream in = new DataInputStream(peer.getInputStream());
  BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  DomainSocket sock = peer.getDomainSocket();
  failureInjector.injectRequestFileDescriptorsFailure();
  switch (resp.getStatus()) {
  case SUCCESS:
    byte buf[] = new byte[1];
    FileInputStream fis[] = new FileInputStream[2];
    sock.recvFileInputStreams(fis, buf, 0, buf.length);
    ShortCircuitReplica replica = null;
    try {
      ExtendedBlockId key =
          new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
      if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) {
        LOG.trace("Sending receipt verification byte for slot " + slot);
        sock.getOutputStream().write(0);
      }
      replica = new ShortCircuitReplica(key, fis[0], fis[1], cache,
          Time.monotonicNow(), slot);
      return new ShortCircuitReplicaInfo(replica);
    } catch (IOException e) {
      // This indicates an error reading from disk, or a format error.  Since
      // it's not a socket communication problem, we return null rather than
      // throwing an exception.
      LOG.warn(this + ": error creating ShortCircuitReplica.", e);
      return null;
    } finally {
      if (replica == null) {
        IOUtils.cleanup(DFSClient.LOG, fis[0], fis[1]);
      }
    }
  case ERROR_UNSUPPORTED:
    if (!resp.hasShortCircuitAccessVersion()) {
      LOG.warn("short-circuit read access is disabled for " +
          "DataNode " + datanode + ".  reason: " + resp.getMessage());
      clientContext.getDomainSocketFactory()
          .disableShortCircuitForPath(pathInfo.getPath());
    } else {
      LOG.warn("short-circuit read access for the file " +
          fileName + " is disabled for DataNode " + datanode +
          ".  reason: " + resp.getMessage());
    }
    return null;
  case ERROR_ACCESS_TOKEN:
    String msg = "access control error while " +
        "attempting to set up short-circuit access to " +
        fileName + resp.getMessage();
    if (LOG.isDebugEnabled()) {
      LOG.debug(this + ":" + msg);
    }
    return new ShortCircuitReplicaInfo(new InvalidToken(msg));
  default:
    LOG.warn(this + ": unknown response code " + resp.getStatus() +
        " while attempting to set up short-circuit access. " +
        resp.getMessage());
    clientContext.getDomainSocketFactory()
        .disableShortCircuitForPath(pathInfo.getPath());
    return null;
  }
}
 
Example #25
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
                                   ExtendedBlock block,
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   boolean verifyChecksum,
                                   String clientName,
                                   Peer peer, DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
        peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
      checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
      datanodeID, peerCache);
}
 
Example #26
Source File: RemoteBlockReader.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
                                   ExtendedBlock block, 
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, Peer peer,
                                   DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy)
                                     throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));
  
  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache);
}
 
Example #27
Source File: TestTransferRbw.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testTransferRbw() throws Exception {
  final HdfsConfiguration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem fs = cluster.getFileSystem();

    //create a file, write some data and leave it open. 
    final Path p = new Path("/foo");
    final int size = (1 << 16) + RAN.nextInt(1 << 16);
    LOG.info("size = " + size);
    final FSDataOutputStream out = fs.create(p, REPLICATION);
    final byte[] bytes = new byte[1024];
    for(int remaining = size; remaining > 0; ) {
      RAN.nextBytes(bytes);
      final int len = bytes.length < remaining? bytes.length: remaining;
      out.write(bytes, 0, len);
      out.hflush();
      remaining -= len;
    }

    //get the RBW
    final ReplicaBeingWritten oldrbw;
    final DataNode newnode;
    final DatanodeInfo newnodeinfo;
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    {
      final DataNode oldnode = cluster.getDataNodes().get(0);
      oldrbw = getRbw(oldnode, bpid);
      LOG.info("oldrbw = " + oldrbw);
      
      //add a datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      newnode = cluster.getDataNodes().get(REPLICATION);
      
      final DatanodeInfo oldnodeinfo;
      {
        final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc(
            ).getDatanodeReport(DatanodeReportType.LIVE);
        Assert.assertEquals(2, datatnodeinfos.length);
        int i = 0;
        for(DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid);
            i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++);
        Assert.assertTrue(i < datatnodeinfos.length);
        newnodeinfo = datatnodeinfos[i];
        oldnodeinfo = datatnodeinfos[1 - i];
      }
      
      //transfer RBW
      final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
          oldrbw.getGenerationStamp());
      final BlockOpResponseProto s = DFSTestUtil.transferRbw(
          b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
      Assert.assertEquals(Status.SUCCESS, s.getStatus());
    }

    //check new rbw
    final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
    LOG.info("newrbw = " + newrbw);
    Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
    Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
    Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());

    LOG.info("DONE");
  } finally {
    cluster.shutdown();
  }
}
 
Example #28
Source File: DataXceiver.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);
  
  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
 
Example #29
Source File: BlockReaderFactory.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Request file descriptors from a DomainPeer.
 *
 * @param peer   The peer to use for communication.
 * @param slot   If non-null, the shared memory slot to associate with the 
 *               new ShortCircuitReplica.
 * 
 * @return  A ShortCircuitReplica object if we could communicate with the
 *          datanode; null, otherwise. 
 * @throws  IOException If we encountered an I/O exception while communicating
 *          with the datanode.
 */
private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer,
        Slot slot) throws IOException {
  ShortCircuitCache cache = clientContext.getShortCircuitCache();
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  SlotId slotId = slot == null ? null : slot.getSlotId();
  new Sender(out).requestShortCircuitFds(block, token, slotId, 1,
      failureInjector.getSupportsReceiptVerification());
  DataInputStream in = new DataInputStream(peer.getInputStream());
  BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  DomainSocket sock = peer.getDomainSocket();
  failureInjector.injectRequestFileDescriptorsFailure();
  switch (resp.getStatus()) {
  case SUCCESS:
    byte buf[] = new byte[1];
    FileInputStream fis[] = new FileInputStream[2];
    sock.recvFileInputStreams(fis, buf, 0, buf.length);
    ShortCircuitReplica replica = null;
    try {
      ExtendedBlockId key =
          new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
      if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) {
        LOG.trace("Sending receipt verification byte for slot " + slot);
        sock.getOutputStream().write(0);
      }
      replica = new ShortCircuitReplica(key, fis[0], fis[1], cache,
          Time.monotonicNow(), slot);
      return new ShortCircuitReplicaInfo(replica);
    } catch (IOException e) {
      // This indicates an error reading from disk, or a format error.  Since
      // it's not a socket communication problem, we return null rather than
      // throwing an exception.
      LOG.warn(this + ": error creating ShortCircuitReplica.", e);
      return null;
    } finally {
      if (replica == null) {
        IOUtils.cleanup(DFSClient.LOG, fis[0], fis[1]);
      }
    }
  case ERROR_UNSUPPORTED:
    if (!resp.hasShortCircuitAccessVersion()) {
      LOG.warn("short-circuit read access is disabled for " +
          "DataNode " + datanode + ".  reason: " + resp.getMessage());
      clientContext.getDomainSocketFactory()
          .disableShortCircuitForPath(pathInfo.getPath());
    } else {
      LOG.warn("short-circuit read access for the file " +
          fileName + " is disabled for DataNode " + datanode +
          ".  reason: " + resp.getMessage());
    }
    return null;
  case ERROR_ACCESS_TOKEN:
    String msg = "access control error while " +
        "attempting to set up short-circuit access to " +
        fileName + resp.getMessage();
    if (LOG.isDebugEnabled()) {
      LOG.debug(this + ":" + msg);
    }
    return new ShortCircuitReplicaInfo(new InvalidToken(msg));
  default:
    LOG.warn(this + ": unknown response code " + resp.getStatus() +
        " while attempting to set up short-circuit access. " +
        resp.getMessage());
    clientContext.getDomainSocketFactory()
        .disableShortCircuitForPath(pathInfo.getPath());
    return null;
  }
}
 
Example #30
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
                                   ExtendedBlock block,
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   boolean verifyChecksum,
                                   String clientName,
                                   Peer peer, DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
        peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
      checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
      datanodeID, peerCache);
}