org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FanOutOneBlockAsyncDFSOutput.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
  Status reply = getStatus(ack);
  if (reply != Status.SUCCESS) {
    failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " +
      block + " from datanode " + ctx.channel().remoteAddress()));
    return;
  }
  if (PipelineAck.isRestartOOBStatus(reply)) {
    failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " +
      block + " from datanode " + ctx.channel().remoteAddress()));
    return;
  }
  if (ack.getSeqno() == HEART_BEAT_SEQNO) {
    return;
  }
  completed(ctx.channel());
}
 
Example #2
Source File: DataXceiver.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example #3
Source File: PipelineAck.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
 
Example #4
Source File: DataXceiver.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example #5
Source File: PipelineAck.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Constructor
 * @param seqno sequence number
 * @param replies an array of replies
 * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
 */
public PipelineAck(long seqno, int[] replies,
                   long downstreamAckTimeNanos) {
  ArrayList<Status> statusList = Lists.newArrayList();
  ArrayList<Integer> flagList = Lists.newArrayList();
  for (int r : replies) {
    statusList.add(StatusFormat.getStatus(r));
    flagList.add(r);
  }
  proto = PipelineAckProto.newBuilder()
    .setSeqno(seqno)
    .addAllReply(statusList)
    .addAllFlag(flagList)
    .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
    .build();
}
 
Example #6
Source File: DataTransferProtoUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
Example #7
Source File: TestClientBlockVerification.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
 
Example #8
Source File: TestDataTransferProtocol.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
 
Example #9
Source File: TestClientBlockVerification.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
 
Example #10
Source File: TestDataTransferProtocol.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
 
Example #11
Source File: PipelineAck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Constructor
 * @param seqno sequence number
 * @param replies an array of replies
 * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
 */
public PipelineAck(long seqno, int[] replies,
                   long downstreamAckTimeNanos) {
  ArrayList<Status> statusList = Lists.newArrayList();
  ArrayList<Integer> flagList = Lists.newArrayList();
  for (int r : replies) {
    statusList.add(StatusFormat.getStatus(r));
    flagList.add(r);
  }
  proto = PipelineAckProto.newBuilder()
    .setSeqno(seqno)
    .addAllReply(statusList)
    .addAllFlag(flagList)
    .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
    .build();
}
 
Example #12
Source File: TestDataTransferProtocol.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
Example #13
Source File: TestDataTransferProtocol.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
Example #14
Source File: DataTransferProtoUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
Example #15
Source File: PipelineAck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
 
Example #16
Source File: RemoteBlockReader.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized int read(byte[] buf, int off, int len) 
                             throws IOException {
  
  // This has to be set here, *before* the skip, since we can
  // hit EOS during the skip, in the case that our entire read
  // is smaller than the checksum chunk.
  boolean eosBefore = eos;

  //for the first read, skip the extra bytes at the front.
  if (lastChunkLen < 0 && startOffset > firstChunkOffset && len > 0) {
    // Skip these bytes. But don't call this.skip()!
    int toSkip = (int)(startOffset - firstChunkOffset);
    if ( skipBuf == null ) {
      skipBuf = new byte[bytesPerChecksum];
    }
    if ( super.read(skipBuf, 0, toSkip) != toSkip ) {
      // should never happen
      throw new IOException("Could not skip required number of bytes");
    }
  }
  
  int nRead = super.read(buf, off, len);

  // if eos was set in the previous read, send a status code to the DN
  if (eos && !eosBefore && nRead >= 0) {
    if (needChecksum()) {
      sendReadResult(peer, Status.CHECKSUM_OK);
    } else {
      sendReadResult(peer, Status.SUCCESS);
    }
  }
  return nRead;
}
 
Example #17
Source File: DataXceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
  BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
    .setStatus(status);
  if (message != null) {
    response.setMessage(message);
  }
  response.build().writeDelimitedTo(out);
  out.flush();
}
 
Example #18
Source File: BlockReceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Send an OOB response. If all acks have been sent already for the block
 * and the responder is about to close, the delivery is not guaranteed.
 * This is because the other end can close the connection independently.
 * An OOB coming from downstream will be automatically relayed upstream
 * by the responder. This method is used only by originating datanode.
 *
 * @param ackStatus the type of ack to be sent
 */
void sendOOBResponse(final Status ackStatus) throws IOException,
    InterruptedException {
  if (!running) {
    LOG.info("Cannot send OOB response " + ackStatus + 
        ". Responder not running.");
    return;
  }

  synchronized(this) {
    if (sending) {
      wait(PipelineAck.getOOBTimeout(ackStatus));
      // Didn't get my turn in time. Give up.
      if (sending) {
        throw new IOException("Could not send OOB reponse in time: "
            + ackStatus);
      }
    }
    sending = true;
  }

  LOG.info("Sending an out of band ack of type " + ackStatus);
  try {
    sendAckUpstreamUnprotected(null, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
        PipelineAck.combineHeader(datanode.getECN(), ackStatus));
  } finally {
    // Let others send ack. Unless there are miltiple OOB send
    // calls, there can be only one waiter, the responder thread.
    // In any case, only one needs to be notified.
    synchronized(this) {
      sending = false;
      notify();
    }
  }
}
 
Example #19
Source File: TestClientBlockVerification.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that if we ask for a half block, and read it all, we *do*
 * send CHECKSUM_OK. The DN takes care of knowing whether it was
 * the whole block or not.
 */
@Test
public void testCompletePartialRead() throws Exception {
  // Ask for half the file
  RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
      util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2));
  // And read half the file
  util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true);
  verify(reader).sendReadResult(Status.CHECKSUM_OK);
  reader.close();
}
 
Example #20
Source File: TestDataTransferProtocol.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void sendResponse(Status status, String firstBadLink,
    String message,
    DataOutputStream out)
throws IOException {
  Builder builder = BlockOpResponseProto.newBuilder().setStatus(status);
  if (firstBadLink != null) {
    builder.setFirstBadLink(firstBadLink);
  }
  if (message != null) {
    builder.setMessage(message);
  }
  builder.build()
    .writeDelimitedTo(out);
}
 
Example #21
Source File: TestDataTransferProtocol.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void TestPipeLineAckCompatibility() throws IOException {
  DataTransferProtos.PipelineAckProto proto = DataTransferProtos
      .PipelineAckProto.newBuilder()
      .setSeqno(0)
      .addReply(Status.CHECKSUM_OK)
      .build();

  DataTransferProtos.PipelineAckProto newProto = DataTransferProtos
      .PipelineAckProto.newBuilder().mergeFrom(proto)
      .addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED,
                                         Status.CHECKSUM_OK))
      .build();

  ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream();
  proto.writeDelimitedTo(oldAckBytes);
  PipelineAck oldAck = new PipelineAck();
  oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray()));
  assertEquals(
      PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.CHECKSUM_OK),
      oldAck.getHeaderFlag(0));

  PipelineAck newAck = new PipelineAck();
  ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream();
  newProto.writeDelimitedTo(newAckBytes);
  newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray()));
  assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED,
                                         Status.CHECKSUM_OK),
               newAck.getHeaderFlag(0));
}
 
Example #22
Source File: TestBlockReplacement.java    From big-c with Apache License 2.0 5 votes vote down vote up
private boolean replaceBlock(
    ExtendedBlock block,
    DatanodeInfo source,
    DatanodeInfo sourceProxy,
    DatanodeInfo destination,
    StorageType targetStorageType) throws IOException, SocketException {
  Socket sock = new Socket();
  try {
    sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
        HdfsServerConstants.READ_TIMEOUT);
    sock.setKeepAlive(true);
    // sendRequest
    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
    new Sender(out).replaceBlock(block, targetStorageType,
        BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
        sourceProxy);
    out.flush();
    // receiveResponse
    DataInputStream reply = new DataInputStream(sock.getInputStream());

    BlockOpResponseProto proto =
        BlockOpResponseProto.parseDelimitedFrom(reply);
    while (proto.getStatus() == Status.IN_PROGRESS) {
      proto = BlockOpResponseProto.parseDelimitedFrom(reply);
    }
    return proto.getStatus() == Status.SUCCESS;
  } finally {
    sock.close();
  }
}
 
Example #23
Source File: FanOutOneBlockAsyncDFSOutputHelper.java    From hbase with Apache License 2.0 5 votes vote down vote up
static Status getStatus(PipelineAckProto ack) {
  List<Integer> flagList = ack.getFlagList();
  Integer headerFlag;
  if (flagList.isEmpty()) {
    Status reply = ack.getReply(0);
    headerFlag = PipelineAck.combineHeader(ECN.DISABLED, reply);
  } else {
    headerFlag = flagList.get(0);
  }
  return PipelineAck.getStatusFromHeader(headerFlag);
}
 
Example #24
Source File: TestClientBlockVerification.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that if we do an incomplete read, we don't call CHECKSUM_OK
 */
@Test
public void testIncompleteRead() throws Exception {
  RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
      util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
  util.readAndCheckEOS(reader, FILE_SIZE_K / 2 * 1024, false);

  // We asked the blockreader for the whole file, and only read
  // half of it, so no CHECKSUM_OK
  verify(reader, never()).sendReadResult(Status.CHECKSUM_OK);
  reader.close();
}
 
Example #25
Source File: PipelineAck.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the timeout to be used for transmitting the OOB type
 * @return the timeout in milliseconds
 */
public static long getOOBTimeout(Status status) throws IOException {
  int index = status.getNumber() - OOB_START;
  if (index >= 0 && index < NUM_OOB_TYPES) {
    return OOB_TIMEOUT[index];
  } 
  // Not an OOB.
  throw new IOException("Not an OOB status: " + status);
}
 
Example #26
Source File: RemoteBlockReader.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * When the reader reaches end of the read, it sends a status response
 * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN
 * closing our connection (which we will re-open), but won't affect
 * data correctness.
 */
void sendReadResult(Peer peer, Status statusCode) {
  assert !sentStatusCode : "already sent status code to " + peer;
  try {
    RemoteBlockReader2.writeReadResult(peer.getOutputStream(), statusCode);
    sentStatusCode = true;
  } catch (IOException e) {
    // It's ok not to be able to send this. But something is probably wrong.
    LOG.info("Could not send read status (" + statusCode + ") to datanode " +
             peer.getRemoteAddressString() + ": " + e.getMessage());
  }
}
 
Example #27
Source File: RemoteBlockReader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized int read(byte[] buf, int off, int len) 
                             throws IOException {
  
  // This has to be set here, *before* the skip, since we can
  // hit EOS during the skip, in the case that our entire read
  // is smaller than the checksum chunk.
  boolean eosBefore = eos;

  //for the first read, skip the extra bytes at the front.
  if (lastChunkLen < 0 && startOffset > firstChunkOffset && len > 0) {
    // Skip these bytes. But don't call this.skip()!
    int toSkip = (int)(startOffset - firstChunkOffset);
    if ( skipBuf == null ) {
      skipBuf = new byte[bytesPerChecksum];
    }
    if ( super.read(skipBuf, 0, toSkip) != toSkip ) {
      // should never happen
      throw new IOException("Could not skip required number of bytes");
    }
  }
  
  int nRead = super.read(buf, off, len);

  // if eos was set in the previous read, send a status code to the DN
  if (eos && !eosBefore && nRead >= 0) {
    if (needChecksum()) {
      sendReadResult(peer, Status.CHECKSUM_OK);
    } else {
      sendReadResult(peer, Status.SUCCESS);
    }
  }
  return nRead;
}
 
Example #28
Source File: PipelineAck.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check if this ack contains error status
 * @return true if all statuses are SUCCESS
 */
public boolean isSuccess() {
  for (Status s : proto.getReplyList()) {
    if (s != Status.SUCCESS) {
      return false;
    }
  }
  return true;
}
 
Example #29
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Serialize the actual read result on the wire.
 */
static void writeReadResult(OutputStream out, Status statusCode)
    throws IOException {
  
  ClientReadStatusProto.newBuilder()
    .setStatus(statusCode)
    .build()
    .writeDelimitedTo(out);

  out.flush();
}
 
Example #30
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * When the reader reaches end of the read, it sends a status response
 * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN
 * closing our connection (which we will re-open), but won't affect
 * data correctness.
 */
void sendReadResult(Status statusCode) {
  assert !sentStatusCode : "already sent status code to " + peer;
  try {
    writeReadResult(peer.getOutputStream(), statusCode);
    sentStatusCode = true;
  } catch (IOException e) {
    // It's ok not to be able to send this. But something is probably wrong.
    LOG.info("Could not send read status (" + statusCode + ") to datanode " +
             peer.getRemoteAddressString() + ": " + e.getMessage());
  }
}