org.apache.hadoop.util.DataChecksum Java Examples

The following examples show how to use org.apache.hadoop.util.DataChecksum. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockXCodingMerger.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public BlockXCodingMerger(Block block, int namespaceId,
		DataInputStream[] childInputStreams, long offsetInBlock,
		long length, String[] childAddrs, String myAddr,
		DataTransferThrottler throttler,
		int mergerLevel) throws IOException{
	super();
	this.block = block;
	this.namespaceId = namespaceId;
	this.childInputStreams = childInputStreams;
	this.offsetInBlock = offsetInBlock;
	this.length = length;
	this.childAddrs = childAddrs;
	this.myAddr = myAddr;
	this.throttler = throttler;
	this.mergerLevel = mergerLevel;
	Configuration conf = new Configuration();
	this.packetSize = conf.getInt("raid.blockreconstruct.packetsize", 4096);
	this.bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
	this.checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
			bytesPerChecksum, new PureJavaCrc32());
	this.checksumSize = checksum.getChecksumSize();
}
 
Example #2
Source File: BlockMetadataHeader.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Read the header without changing the position of the FileChannel.
 *
 * @param fc The FileChannel to read.
 * @return the Metadata Header.
 * @throws IOException on error.
 */
public static BlockMetadataHeader preadHeader(FileChannel fc)
    throws IOException {
  final byte arr[] = new byte[getHeaderSize()];
  ByteBuffer buf = ByteBuffer.wrap(arr);

  while (buf.hasRemaining()) {
    if (fc.read(buf, 0) <= 0) {
      throw new EOFException("unexpected EOF while reading " +
          "metadata file header");
    }
  }
  short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
  return new BlockMetadataHeader(version, dataChecksum);
}
 
Example #3
Source File: TestShortCircuitCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
 
Example #4
Source File: FanOutOneBlockAsyncDFSOutput.java    From hbase with Apache License 2.0 6 votes vote down vote up
FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs,
    DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId,
    LocatedBlock locatedBlock, Encryptor encryptor, List<Channel> datanodeList,
    DataChecksum summer, ByteBufAllocator alloc) {
  this.conf = conf;
  this.dfs = dfs;
  this.client = client;
  this.namenode = namenode;
  this.fileId = fileId;
  this.clientName = clientName;
  this.src = src;
  this.block = locatedBlock.getBlock();
  this.locations = locatedBlock.getLocations();
  this.encryptor = encryptor;
  this.datanodeList = datanodeList;
  this.summer = summer;
  this.maxDataLen = MAX_DATA_LEN - (MAX_DATA_LEN % summer.getBytesPerChecksum());
  this.alloc = alloc;
  this.buf = alloc.directBuffer(sendBufSizePRedictor.initialSize());
  this.state = State.STREAMING;
  setupReceiver(conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT));
}
 
Example #5
Source File: ChecksumFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
Example #6
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
 
Example #7
Source File: FSEditLogOp.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
 
Example #8
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
Example #9
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
 
Example #10
Source File: IFileInputStream.java    From tez with Apache License 2.0 6 votes vote down vote up
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 * @param readAhead Whether to attempt readAhead for this stream
 * @param readAheadLength Number of bytes to readAhead if it is enabled
 */
public IFileInputStream(InputStream in, long len, boolean readAhead, int readAheadLength) {
  this.in = in;
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      Integer.MAX_VALUE);
  checksumSize = sum.getChecksumSize();
  buffer = new byte[4096];
  offset = 0;
  length = len;
  dataLength = length - checksumSize;

  readahead = readAhead;
  readaheadLength = readAheadLength;

  if (readahead) {
    this.inFd = getFileDescriptorIfAvail(in);
    doReadahead();
  } else {
    this.inFd = null;
  }
}
 
Example #11
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
 
Example #12
Source File: DataTransferProtoUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) return null;

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
 
Example #13
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
 
Example #14
Source File: Options.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * A helper method for processing user input and default value to 
 * create a combined checksum option. This is a bit complicated because
 * bytesPerChecksum is kept for backward compatibility.
 *
 * @param defaultOpt Default checksum option
 * @param userOpt User-specified checksum option. Ignored if null.
 * @param userBytesPerChecksum User-specified bytesPerChecksum
 *                Ignored if < 0.
 */
public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt, 
    ChecksumOpt userOpt, int userBytesPerChecksum) {
  final boolean useDefaultType;
  final DataChecksum.Type type;
  if (userOpt != null 
      && userOpt.getChecksumType() != DataChecksum.Type.DEFAULT) {
    useDefaultType = false;
    type = userOpt.getChecksumType();
  } else {
    useDefaultType = true;
    type = defaultOpt.getChecksumType();
  }

  //  bytesPerChecksum - order of preference
  //    user specified value in bytesPerChecksum
  //    user specified value in checksumOpt
  //    default.
  if (userBytesPerChecksum > 0) {
    return new ChecksumOpt(type, userBytesPerChecksum);
  } else if (userOpt != null && userOpt.getBytesPerChecksum() > 0) {
    return !useDefaultType? userOpt
        : new ChecksumOpt(type, userOpt.getBytesPerChecksum());
  } else {
    return useDefaultType? defaultOpt
        : new ChecksumOpt(type, defaultOpt.getBytesPerChecksum());
  }
}
 
Example #15
Source File: BlockMetadataHeader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Read the checksum header from the meta input stream.
 * @return the data checksum obtained from the header.
 */
public static DataChecksum readDataChecksum(final DataInputStream metaIn,
    final Object name) throws IOException {
  // read and handle the common header here. For now just a version
  final BlockMetadataHeader header = readHeader(metaIn);
  if (header.getVersion() != VERSION) {
    LOG.warn("Unexpected meta-file version for " + name
        + ": version in file is " + header.getVersion()
        + " but expected version is " + VERSION);
  }
  return header.getChecksum();
}
 
Example #16
Source File: DataXceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }
  
  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
 
Example #17
Source File: DataTransferProtoUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static ChecksumProto toProto(DataChecksum checksum) {
  ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
  // ChecksumType#valueOf never returns null
  return ChecksumProto.newBuilder()
    .setBytesPerChecksum(checksum.getBytesPerChecksum())
    .setType(type)
    .build();
}
 
Example #18
Source File: IFileOutputStream.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Create a checksum output stream that writes
 * the bytes to the given stream.
 * @param out
 */
public IFileOutputStream(OutputStream out) {
  super(out);
  sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
      Integer.MAX_VALUE);
  barray = new byte[sum.getChecksumSize()];
}
 
Example #19
Source File: ReplicaOutputStreams.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Create an object with a data output stream, a checksum output stream
 * and a checksum.
 */
public ReplicaOutputStreams(OutputStream dataOut, OutputStream checksumOut,
    DataChecksum checksum, boolean isTransientStorage) {
  this.dataOut = dataOut;
  this.checksumOut = checksumOut;
  this.checksum = checksum;
  this.isTransientStorage = isTransientStorage;
}
 
Example #20
Source File: SimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  if (finalized) {
    throw new IOException("Trying to write to a finalized replica "
        + theBlock);
  } else {
    SimulatedOutputStream crcStream = new SimulatedOutputStream();
    return new ReplicaOutputStreams(oStream, crcStream, requestedChecksum,
        volume.isTransientStorage());
  }
}
 
Example #21
Source File: TestSimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
 
Example #22
Source File: TestCopyMapper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void createSourceDataWithDifferentChecksumType()
    throws Exception {
  mkdirs(SOURCE_PATH + "/1");
  mkdirs(SOURCE_PATH + "/2");
  mkdirs(SOURCE_PATH + "/2/3/4");
  mkdirs(SOURCE_PATH + "/2/3");
  mkdirs(SOURCE_PATH + "/5");
  touchFile(SOURCE_PATH + "/5/6", new ChecksumOpt(DataChecksum.Type.CRC32,
      512));
  mkdirs(SOURCE_PATH + "/7");
  mkdirs(SOURCE_PATH + "/7/8");
  touchFile(SOURCE_PATH + "/7/8/9", new ChecksumOpt(DataChecksum.Type.CRC32C,
      512));
}
 
Example #23
Source File: IFileOutputStream.java    From tez with Apache License 2.0 5 votes vote down vote up
/**
 * Create a checksum output stream that writes
 * the bytes to the given stream.
 * @param out
 */
public IFileOutputStream(OutputStream out) {
  super(out);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      Integer.MAX_VALUE);
  barray = new byte[sum.getChecksumSize()];
  buffer = new byte[4096];
  offset = 0;
}
 
Example #24
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  super(getChecksum4Compute(checksum, stat));
  this.dfsClient = dfsClient;
  this.src = src;
  this.fileId = stat.getFileId();
  this.blockSize = stat.getBlockSize();
  this.blockReplication = stat.getReplication();
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
  this.progress = progress;
  this.cachingStrategy = new AtomicReference<CachingStrategy>(
      dfsClient.getDefaultWriteCachingStrategy());
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug(
        "Set non-null progress callback on DFSOutputStream " + src);
  }
  
  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  if (bytesPerChecksum <= 0) {
    throw new HadoopIllegalArgumentException(
        "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0");
  }
  if (blockSize % bytesPerChecksum != 0) {
    throw new HadoopIllegalArgumentException("Invalid values: "
        + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
        + ") must divide block size (=" + blockSize + ").");
  }
  this.checksum4WriteBlock = checksum;

  this.dfsclientSlowLogThresholdMs =
    dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
  this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
}
 
Example #25
Source File: FsServerDefaults.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public FsServerDefaults(long blockSize, int bytesPerChecksum,
    int writePacketSize, short replication, int fileBufferSize,
    boolean encryptDataTransfer, long trashInterval,
    DataChecksum.Type checksumType) {
  this.blockSize = blockSize;
  this.bytesPerChecksum = bytesPerChecksum;
  this.writePacketSize = writePacketSize;
  this.replication = replication;
  this.fileBufferSize = fileBufferSize;
  this.encryptDataTransfer = encryptDataTransfer;
  this.trashInterval = trashInterval;
  this.checksumType = checksumType;
}
 
Example #26
Source File: FsServerDefaults.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
  blockSize = in.readLong();
  bytesPerChecksum = in.readInt();
  writePacketSize = in.readInt();
  replication = in.readShort();
  fileBufferSize = in.readInt();
  checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
}
 
Example #27
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** 
 * @return the object for computing checksum.
 *         The type is NULL if checksum is not computed.
 */
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
    HdfsFileStatus stat) {
  if (isLazyPersist(stat) && stat.getReplication() == 1) {
    // do not compute checksum for writing to single replica to memory
    return DataChecksum.newDataChecksum(Type.NULL,
        checksum.getBytesPerChecksum());
  }
  return checksum;
}
 
Example #28
Source File: ChecksumFs.java    From big-c with Apache License 2.0 5 votes vote down vote up
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);
  
  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
Example #29
Source File: ChecksumFs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);
  
  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
Example #30
Source File: MD5MD5CRC32FileChecksum.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
    throws IOException {
  if (algorithm.endsWith(DataChecksum.Type.CRC32.name())) {
    return DataChecksum.Type.CRC32;
  } else if (algorithm.endsWith(DataChecksum.Type.CRC32C.name())) {
    return DataChecksum.Type.CRC32C;
  }

  throw new IOException("Unknown checksum type in " + algorithm);
}