org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ShortCircuitReplica.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public ShortCircuitReplica(ExtendedBlockId key,
    FileInputStream dataStream, FileInputStream metaStream,
    ShortCircuitCache cache, long creationTimeMs, Slot slot) throws IOException {
  this.key = key;
  this.dataStream = dataStream;
  this.metaStream = metaStream;
  this.metaHeader =
        BlockMetadataHeader.preadHeader(metaStream.getChannel());
  if (metaHeader.getVersion() != 1) {
    throw new IOException("invalid metadata header version " +
        metaHeader.getVersion() + ".  Can only handle version 1.");
  }
  this.cache = cache;
  this.creationTimeMs = creationTimeMs;
  this.slot = slot;
}
 
Example #2
Source File: TestShortCircuitCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
 
Example #3
Source File: ShortCircuitReplica.java    From big-c with Apache License 2.0 6 votes vote down vote up
public ShortCircuitReplica(ExtendedBlockId key,
    FileInputStream dataStream, FileInputStream metaStream,
    ShortCircuitCache cache, long creationTimeMs, Slot slot) throws IOException {
  this.key = key;
  this.dataStream = dataStream;
  this.metaStream = metaStream;
  this.metaHeader =
        BlockMetadataHeader.preadHeader(metaStream.getChannel());
  if (metaHeader.getVersion() != 1) {
    throw new IOException("invalid metadata header version " +
        metaHeader.getVersion() + ".  Can only handle version 1.");
  }
  this.cache = cache;
  this.creationTimeMs = creationTimeMs;
  this.slot = slot;
}
 
Example #4
Source File: TestShortCircuitCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
 
Example #5
Source File: BlockReaderLocal.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private BlockReaderLocal(Builder builder) {
  this.replica = builder.replica;
  this.dataIn = replica.getDataStream().getChannel();
  this.dataPos = builder.dataPos;
  this.checksumIn = replica.getMetaStream().getChannel();
  BlockMetadataHeader header = builder.replica.getMetaHeader();
  this.checksum = header.getChecksum();
  this.verifyChecksum = builder.verifyChecksum &&
      (this.checksum.getChecksumType().id != DataChecksum.CHECKSUM_NULL);
  this.filename = builder.filename;
  this.block = builder.block;
  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  this.checksumSize = checksum.getChecksumSize();

  this.maxAllocatedChunks = (bytesPerChecksum == 0) ? 0 :
      ((builder.bufferSize + bytesPerChecksum - 1) / bytesPerChecksum);
  // Calculate the effective maximum readahead.
  // We can't do more readahead than there is space in the buffer.
  int maxReadaheadChunks = (bytesPerChecksum == 0) ? 0 :
      ((Math.min(builder.bufferSize, builder.maxReadahead) +
          bytesPerChecksum - 1) / bytesPerChecksum);
  if (maxReadaheadChunks == 0) {
    this.zeroReadaheadRequested = true;
    maxReadaheadChunks = 1;
  } else {
    this.zeroReadaheadRequested = false;
  }
  this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
  this.storageType = builder.storageType;
}
 
Example #6
Source File: BlockReaderLocal.java    From big-c with Apache License 2.0 5 votes vote down vote up
private BlockReaderLocal(Builder builder) {
  this.replica = builder.replica;
  this.dataIn = replica.getDataStream().getChannel();
  this.dataPos = builder.dataPos;
  this.checksumIn = replica.getMetaStream().getChannel();
  BlockMetadataHeader header = builder.replica.getMetaHeader();
  this.checksum = header.getChecksum();
  this.verifyChecksum = builder.verifyChecksum &&
      (this.checksum.getChecksumType().id != DataChecksum.CHECKSUM_NULL);
  this.filename = builder.filename;
  this.block = builder.block;
  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  this.checksumSize = checksum.getChecksumSize();

  this.maxAllocatedChunks = (bytesPerChecksum == 0) ? 0 :
      ((builder.bufferSize + bytesPerChecksum - 1) / bytesPerChecksum);
  // Calculate the effective maximum readahead.
  // We can't do more readahead than there is space in the buffer.
  int maxReadaheadChunks = (bytesPerChecksum == 0) ? 0 :
      ((Math.min(builder.bufferSize, builder.maxReadahead) +
          bytesPerChecksum - 1) / bytesPerChecksum);
  if (maxReadaheadChunks == 0) {
    this.zeroReadaheadRequested = true;
    maxReadaheadChunks = 1;
  } else {
    this.zeroReadaheadRequested = false;
  }
  this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
  this.storageType = builder.storageType;
}
 
Example #7
Source File: ShortCircuitReplica.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public BlockMetadataHeader getMetaHeader() {
  return metaHeader;
}
 
Example #8
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static private void truncateBlock(File blockFile, File metaFile,
    long oldlen, long newlen) throws IOException {
  LOG.info("truncateBlock: blockFile=" + blockFile
      + ", metaFile=" + metaFile
      + ", oldlen=" + oldlen
      + ", newlen=" + newlen);

  if (newlen == oldlen) {
    return;
  }
  if (newlen > oldlen) {
    throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
        + ") to newlen (=" + newlen + ")");
  }

  DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); 
  int checksumsize = dcs.getChecksumSize();
  int bpc = dcs.getBytesPerChecksum();
  long n = (newlen - 1)/bpc + 1;
  long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
  long lastchunkoffset = (n - 1)*bpc;
  int lastchunksize = (int)(newlen - lastchunkoffset); 
  byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; 

  RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
  try {
    //truncate blockFile 
    blockRAF.setLength(newlen);
 
    //read last chunk
    blockRAF.seek(lastchunkoffset);
    blockRAF.readFully(b, 0, lastchunksize);
  } finally {
    blockRAF.close();
  }

  //compute checksum
  dcs.update(b, 0, lastchunksize);
  dcs.writeValue(b, 0, false);

  //update metaFile 
  RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
  try {
    metaRAF.setLength(newmetalen);
    metaRAF.seek(newmetalen - checksumsize);
    metaRAF.write(b, 0, checksumsize);
  } finally {
    metaRAF.close();
  }
}
 
Example #9
Source File: TestScrLazyPersistFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
 
Example #10
Source File: ShortCircuitReplica.java    From big-c with Apache License 2.0 4 votes vote down vote up
public BlockMetadataHeader getMetaHeader() {
  return metaHeader;
}
 
Example #11
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
static private void truncateBlock(File blockFile, File metaFile,
    long oldlen, long newlen) throws IOException {
  LOG.info("truncateBlock: blockFile=" + blockFile
      + ", metaFile=" + metaFile
      + ", oldlen=" + oldlen
      + ", newlen=" + newlen);

  if (newlen == oldlen) {
    return;
  }
  if (newlen > oldlen) {
    throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
        + ") to newlen (=" + newlen + ")");
  }

  DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); 
  int checksumsize = dcs.getChecksumSize();
  int bpc = dcs.getBytesPerChecksum();
  long n = (newlen - 1)/bpc + 1;
  long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
  long lastchunkoffset = (n - 1)*bpc;
  int lastchunksize = (int)(newlen - lastchunkoffset); 
  byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; 

  RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
  try {
    //truncate blockFile 
    blockRAF.setLength(newlen);
 
    //read last chunk
    blockRAF.seek(lastchunkoffset);
    blockRAF.readFully(b, 0, lastchunksize);
  } finally {
    blockRAF.close();
  }

  //compute checksum
  dcs.update(b, 0, lastchunksize);
  dcs.writeValue(b, 0, false);

  //update metaFile 
  RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
  try {
    metaRAF.setLength(newmetalen);
    metaRAF.seek(newmetalen - checksumsize);
    metaRAF.write(b, 0, checksumsize);
  } finally {
    metaRAF.close();
  }
}
 
Example #12
Source File: TestScrLazyPersistFiles.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
 
Example #13
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Find out the number of bytes in the block that match its crc.
 *
 * This algorithm assumes that data corruption caused by unexpected
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 *
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    try (DataInputStream checksumIn = new DataInputStream(
        new BufferedInputStream(
            fileIoProvider.getFileInputStream(volume, metaFile),
            ioFileBufferSize))) {
      // read and handle the common header here. For now just a version
      final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
          checksumIn, metaFile);
      int bytesPerChecksum = checksum.getBytesPerChecksum();
      int checksumSize = checksum.getChecksumSize();
      long numChunks = Math.min(
          (blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum,
          (metaFileLen - crcHeaderLen) / checksumSize);
      if (numChunks == 0) {
        return 0;
      }
      try (InputStream blockIn = fileIoProvider.getFileInputStream(
          volume, blockFile);
           ReplicaInputStreams ris = new ReplicaInputStreams(blockIn,
               checksumIn, volume.obtainReference(), fileIoProvider)) {
        ris.skipChecksumFully((numChunks - 1) * checksumSize);
        long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum;
        ris.skipDataFully(lastChunkStartPos);
        int lastChunkSize = (int) Math.min(
            bytesPerChecksum, blockFileLen - lastChunkStartPos);
        byte[] buf = new byte[lastChunkSize + checksumSize];
        ris.readChecksumFully(buf, lastChunkSize, checksumSize);
        ris.readDataFully(buf, 0, lastChunkSize);
        checksum.update(buf, 0, lastChunkSize);
        long validFileLength;
        if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
          validFileLength = lastChunkStartPos + lastChunkSize;
        } else { // last chunk is corrupt
          validFileLength = lastChunkStartPos;
        }
        // truncate if extra bytes are present without CRC
        if (blockFile.length() > validFileLength) {
          try (RandomAccessFile blockRAF =
                   fileIoProvider.getRandomAccessFile(
                       volume, blockFile, "rw")) {
            // truncate blockFile
            blockRAF.setLength(validFileLength);
          }
        }
        return validFileLength;
      }
    }
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn("Getting exception while validating integrity " +
        "and setting length for blockFile", e);
    return 0;
  }
}
 
Example #14
Source File: BlockReaderLocal.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * The only way this object can be instantiated.
 */
public static BlockReaderLocal newBlockReader(Configuration conf,
  String file, int namespaceid, Block blk, DatanodeInfo node, 
  long startOffset, long length,
  DFSClientMetrics metrics, boolean verifyChecksum,
  boolean clearOsBuffer) throws IOException {
  // check in cache first
  BlockPathInfo pathinfo = cache.get(blk);

  if (pathinfo == null) {
    // cache the connection to the local data for eternity.
    if (datanode == null) {
      datanode = DFSClient.createClientDNProtocolProxy(node, conf, 0);
    }
    // make RPC to local datanode to find local pathnames of blocks
    if (datanode.isMethodSupported("getBlockPathInfo", int.class, Block.class)) {
      pathinfo = datanode.getProxy().getBlockPathInfo(namespaceid, blk);
    } else {
      pathinfo = datanode.getProxy().getBlockPathInfo(blk);
    }
    if (pathinfo != null) {
      cache.put(blk, pathinfo);
    }
  }
  
  // check to see if the file exists. It may so happen that the
  // HDFS file has been deleted and this block-lookup is occuring
  // on behalf of a new HDFS file. This time, the block file could
  // be residing in a different portion of the fs.data.dir directory.
  // In this case, we remove this entry from the cache. The next
  // call to this method will repopulate the cache.
  try {

    // get a local file system
    File blkfile = new File(pathinfo.getBlockPath());
    FileInputStream dataIn = new FileInputStream(blkfile);
    
    if (LOG.isDebugEnabled()) {
      LOG.debug("New BlockReaderLocal for file " +
                blkfile + " of size " + blkfile.length() +
                " startOffset " + startOffset +
                " length " + length);
    }

    if (verifyChecksum) {
    
      // get the metadata file
      File metafile = new File(pathinfo.getMetaPath());
      FileInputStream checksumIn = new FileInputStream(metafile);
  
      // read and handle the common header here. For now just a version
      BlockMetadataHeader header = BlockMetadataHeader.readHeader(new DataInputStream(checksumIn), new PureJavaCrc32());
      short version = header.getVersion();
    
      if (version != FSDataset.METADATA_VERSION) {
        LOG.warn("Wrong version (" + version + ") for metadata file for "
            + blk + " ignoring ...");
      }
      DataChecksum checksum = header.getChecksum();

      return new BlockReaderLocal(conf, file, blk, startOffset, length,
          pathinfo, metrics, checksum, verifyChecksum, dataIn, checksumIn,
          clearOsBuffer);
    }
    else {
      return new BlockReaderLocal(conf, file, blk, startOffset, length,
          pathinfo, metrics, dataIn, clearOsBuffer);
    }
    
  } catch (FileNotFoundException e) {
    cache.remove(blk);    // remove from cache
    DFSClient.LOG.warn("BlockReaderLoca: Removing " + blk +
                       " from cache because local file " +
                       pathinfo.getBlockPath() + 
                       " could not be opened.");
    throw e;
  }
}