Java Code Examples for org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader#PKT_MAX_HEADER_LEN

The following examples show how to use org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader#PKT_MAX_HEADER_LEN . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
                       getChecksumSize(), lastPacketInBlock);
}
 
Example 2
Source File: DFSPacket.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
Example 3
Source File: TestDFSPacket.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
 
Example 4
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
    long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
  final byte[] buf;
  final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;

  try {
    buf = byteArrayManager.newByteArray(bufferSize);
  } catch (InterruptedException ie) {
    final InterruptedIOException iioe = new InterruptedIOException(
        "seqno=" + seqno);
    iioe.initCause(ie);
    throw iioe;
  }

  return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
                       getChecksumSize(), lastPacketInBlock);
}
 
Example 5
Source File: DFSPacket.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Create a new packet.
 *
 * @param buf the buffer storing data and checksums
 * @param chunksPerPkt maximum number of chunks per packet.
 * @param offsetInBlock offset in bytes into the HDFS block.
 * @param seqno the sequence number of this packet
 * @param checksumSize the size of checksum
 * @param lastPacketInBlock if this is the last packet
 */
DFSPacket(byte[] buf, int chunksPerPkt, long offsetInBlock, long seqno,
                 int checksumSize, boolean lastPacketInBlock) {
  this.lastPacketInBlock = lastPacketInBlock;
  this.numChunks = 0;
  this.offsetInBlock = offsetInBlock;
  this.seqno = seqno;

  this.buf = buf;

  checksumStart = PacketHeader.PKT_MAX_HEADER_LEN;
  checksumPos = checksumStart;
  dataStart = checksumStart + (chunksPerPkt * checksumSize);
  dataPos = dataStart;
  maxChunks = chunksPerPkt;
}
 
Example 6
Source File: TestDFSPacket.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testPacket() throws Exception {
  Random r = new Random(12345L);
  byte[] data =  new byte[chunkSize];
  r.nextBytes(data);
  byte[] checksum = new byte[checksumSize];
  r.nextBytes(checksum);

  DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);

  byte[] packetBuf = new byte[data.length * 2];
  DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
                              0, 0, checksumSize, false);
  p.setSyncBlock(true);
  p.writeData(data, 0, data.length);
  p.writeChecksum(checksum, 0, checksum.length);
  p.writeTo(os);

  //we have set syncBlock to true, so the header has the maximum length
  int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
  byte[] readBuf = os.getData();

  assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
  assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);

}
 
Example 7
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
              ", chunkSize=" + chunkSize +
              ", chunksPerPacket=" + chunksPerPacket +
              ", packetSize=" + packetSize);
  }
}
 
Example 8
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void computePacketChunkSize(int psize, int csize) {
  final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
  final int chunkSize = csize + getChecksumSize();
  chunksPerPacket = Math.max(bodySize/chunkSize, 1);
  packetSize = chunkSize*chunksPerPacket;
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
              ", chunkSize=" + chunkSize +
              ", chunksPerPacket=" + chunksPerPacket +
              ", packetSize=" + packetSize);
  }
}
 
Example 9
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * For heartbeat packets, create buffer directly by new byte[]
 * since heartbeats should not be blocked.
 */
private DFSPacket createHeartbeatPacket() throws InterruptedIOException {
  final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
  return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO,
                       getChecksumSize(), false);
}
 
Example 10
Source File: BlockSender.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private long doSendBlock(DataOutputStream out, OutputStream baseStream,
      DataTransferThrottler throttler) throws IOException {
  if (out == null) {
    throw new IOException( "out stream is null" );
  }
  initialOffset = offset;
  long totalRead = 0;
  OutputStream streamForSendChunks = out;
  
  lastCacheDropOffset = initialOffset;

  if (isLongRead() && blockInFd != null) {
    // Advise that this file descriptor will be accessed sequentially.
    NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
        block.getBlockName(), blockInFd, 0, 0,
        NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
  }
  
  // Trigger readahead of beginning of file if configured.
  manageOsCache();

  final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
  try {
    int maxChunksPerPacket;
    int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
    boolean transferTo = transferToAllowed && !verifyChecksum
        && baseStream instanceof SocketOutputStream
        && blockIn instanceof FileInputStream;
    if (transferTo) {
      FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
      blockInPosition = fileChannel.position();
      streamForSendChunks = baseStream;
      maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
      
      // Smaller packet size to only hold checksum when doing transferTo
      pktBufSize += checksumSize * maxChunksPerPacket;
    } else {
      maxChunksPerPacket = Math.max(1,
          numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE));
      // Packet size includes both checksum and data
      pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket;
    }

    ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);

    while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
      manageOsCache();
      long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
          transferTo, throttler);
      offset += len;
      totalRead += len + (numberOfChunks(len) * checksumSize);
      seqno++;
    }
    // If this thread was interrupted, then it did not send the full block.
    if (!Thread.currentThread().isInterrupted()) {
      try {
        // send an empty packet to mark the end of the block
        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
            throttler);
        out.flush();
      } catch (IOException e) { //socket error
        throw ioeToSocketException(e);
      }

      sentEntireByteRange = true;
    }
  } finally {
    if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
      final long endTime = System.nanoTime();
      ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
          initialOffset, endTime - startTime));
    }
    close();
  }
  return totalRead;
}
 
Example 11
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * For heartbeat packets, create buffer directly by new byte[]
 * since heartbeats should not be blocked.
 */
private DFSPacket createHeartbeatPacket() throws InterruptedIOException {
  final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
  return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO,
                       getChecksumSize(), false);
}
 
Example 12
Source File: BlockSender.java    From big-c with Apache License 2.0 4 votes vote down vote up
private long doSendBlock(DataOutputStream out, OutputStream baseStream,
      DataTransferThrottler throttler) throws IOException {
  if (out == null) {
    throw new IOException( "out stream is null" );
  }
  initialOffset = offset;
  long totalRead = 0;
  OutputStream streamForSendChunks = out;
  
  lastCacheDropOffset = initialOffset;

  if (isLongRead() && blockInFd != null) {
    // Advise that this file descriptor will be accessed sequentially.
    NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
        block.getBlockName(), blockInFd, 0, 0,
        NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
  }
  
  // Trigger readahead of beginning of file if configured.
  manageOsCache();

  final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
  try {
    int maxChunksPerPacket;
    int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
    boolean transferTo = transferToAllowed && !verifyChecksum
        && baseStream instanceof SocketOutputStream
        && blockIn instanceof FileInputStream;
    if (transferTo) {
      FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
      blockInPosition = fileChannel.position();
      streamForSendChunks = baseStream;
      maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
      
      // Smaller packet size to only hold checksum when doing transferTo
      pktBufSize += checksumSize * maxChunksPerPacket;
    } else {
      maxChunksPerPacket = Math.max(1,
          numberOfChunks(HdfsConstants.IO_FILE_BUFFER_SIZE));
      // Packet size includes both checksum and data
      pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket;
    }

    ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);

    while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
      manageOsCache();
      long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
          transferTo, throttler);
      offset += len;
      totalRead += len + (numberOfChunks(len) * checksumSize);
      seqno++;
    }
    // If this thread was interrupted, then it did not send the full block.
    if (!Thread.currentThread().isInterrupted()) {
      try {
        // send an empty packet to mark the end of the block
        sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
            throttler);
        out.flush();
      } catch (IOException e) { //socket error
        throw ioeToSocketException(e);
      }

      sentEntireByteRange = true;
    }
  } finally {
    if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
      final long endTime = System.nanoTime();
      ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
          initialOffset, endTime - startTime));
    }
    close();
  }
  return totalRead;
}