Java Code Examples for org.apache.hadoop.fs.Options.ChecksumOpt

The following examples show how to use org.apache.hadoop.fs.Options.ChecksumOpt. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example 2
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example 3
Source Project: hadoop   Source File: FileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
   FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
   short replication, long blockSize, Progressable progress,
   ChecksumOpt checksumOpt) throws IOException {

  boolean pathExists = exists(f);
  CreateFlag.validate(f, pathExists, flag);
  
  // Default impl  assumes that permissions do not matter and 
  // nor does the bytesPerChecksum  hence
  // calling the regular create is good enough.
  // FSs that implement permissions should override this.

  if (pathExists && flag.contains(CreateFlag.APPEND)) {
    return append(f, bufferSize, progress);
  }
  
  return this.create(f, absolutePermission,
      flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
      blockSize, progress);
}
 
Example 4
Source Project: hadoop   Source File: ViewFs.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
    if (createParent) {
      throw readOnlyMountTable("create", f);
    } else {
      throw e;
    }
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.createInternal(res.remainingPath, flag,
      absolutePermission, bufferSize, replication,
      blockSize, progress, checksumOpt,
      createParent);
}
 
Example 5
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
 * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
 * a hint to where the namenode should place the file blocks.
 * The favored nodes hint is not persisted in HDFS. Hence it may be honored
 * at the creation time only. HDFS could move the blocks during balancing or
 * replication, to move the blocks from favored nodes. A value of null means
 * no favored nodes for this create
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt,
                           InetSocketAddress[] favoredNodes) throws IOException {
  checkOpen();
  if (permission == null) {
    permission = FsPermission.getFileDefault();
  }
  FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
  if(LOG.isDebugEnabled()) {
    LOG.debug(src + ": masked=" + masked);
  }
  final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
      src, masked, flag, createParent, replication, blockSize, progress,
      buffersize, dfsClientConf.createChecksum(checksumOpt),
      getFavoredNodesStr(favoredNodes));
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example 6
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example 7
Source Project: big-c   Source File: FileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
   FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
   short replication, long blockSize, Progressable progress,
   ChecksumOpt checksumOpt) throws IOException {

  boolean pathExists = exists(f);
  CreateFlag.validate(f, pathExists, flag);
  
  // Default impl  assumes that permissions do not matter and 
  // nor does the bytesPerChecksum  hence
  // calling the regular create is good enough.
  // FSs that implement permissions should override this.

  if (pathExists && flag.contains(CreateFlag.APPEND)) {
    return append(f, bufferSize, progress);
  }
  
  return this.create(f, absolutePermission,
      flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
      blockSize, progress);
}
 
Example 8
Source Project: big-c   Source File: ViewFs.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
    if (createParent) {
      throw readOnlyMountTable("create", f);
    } else {
      throw e;
    }
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.createInternal(res.remainingPath, flag,
      absolutePermission, bufferSize, replication,
      blockSize, progress, checksumOpt,
      createParent);
}
 
Example 9
Source Project: hadoop   Source File: Hdfs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
 
Example 10
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
private DataChecksum createChecksum(ChecksumOpt userOpt) {
  // Fill in any missing field with the default.
  ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
      defaultChecksumOpt, userOpt);
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(
      myOpt.getChecksumType(),
      myOpt.getBytesPerChecksum());
  if (dataChecksum == null) {
    throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
        + userOpt + ", default=" + defaultChecksumOpt
        + ", effective=null");
  }
  return dataChecksum;
}
 
Example 11
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
 * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
 *  set to true.
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException {
  return create(src, permission, flag, true,
      replication, blockSize, progress, buffersize, checksumOpt, null);
}
 
Example 12
Source Project: hadoop   Source File: DistributedFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
  FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
  short replication, long blockSize, Progressable progress,
  ChecksumOpt checksumOpt) throws IOException {
  statistics.incrementWriteOps(1);
  final DFSOutputStream dfsos = dfs.primitiveCreate(
    getPathName(fixRelativePart(f)),
    absolutePermission, flag, true, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics);
}
 
Example 13
Source Project: hadoop   Source File: RetriableFileCopyCommand.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return the checksum spec of the source checksum if checksum type should be
 *         preserved
 */
private ChecksumOpt getChecksumOpt(EnumSet<FileAttribute> fileAttributes,
    FileChecksum sourceChecksum) {
  if (fileAttributes.contains(FileAttribute.CHECKSUMTYPE)
      && sourceChecksum != null) {
    return sourceChecksum.getChecksumOpt();
  }
  return null;
}
 
Example 14
Source Project: hadoop   Source File: TestCopyMapper.java    License: Apache License 2.0 5 votes vote down vote up
private static void createSourceDataWithDifferentChecksumType()
    throws Exception {
  mkdirs(SOURCE_PATH + "/1");
  mkdirs(SOURCE_PATH + "/2");
  mkdirs(SOURCE_PATH + "/2/3/4");
  mkdirs(SOURCE_PATH + "/2/3");
  mkdirs(SOURCE_PATH + "/5");
  touchFile(SOURCE_PATH + "/5/6", new ChecksumOpt(DataChecksum.Type.CRC32,
      512));
  mkdirs(SOURCE_PATH + "/7");
  mkdirs(SOURCE_PATH + "/7/8");
  touchFile(SOURCE_PATH + "/7/8/9", new ChecksumOpt(DataChecksum.Type.CRC32C,
      512));
}
 
Example 15
Source Project: hadoop   Source File: TestCopyMapper.java    License: Apache License 2.0 5 votes vote down vote up
private static void touchFile(String path, boolean createMultipleBlocks,
    ChecksumOpt checksumOpt) throws Exception {
  FileSystem fs;
  DataOutputStream outputStream = null;
  try {
    fs = cluster.getFileSystem();
    final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
        fs.getWorkingDirectory());
    final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs
        .getDefaultBlockSize(qualifiedPath) * 2;
    FsPermission permission = FsPermission.getFileDefault().applyUMask(
        FsPermission.getUMask(fs.getConf()));
    outputStream = fs.create(qualifiedPath, permission,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0,
        (short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize,
        null, checksumOpt);
    byte[] bytes = new byte[DEFAULT_FILE_SIZE];
    outputStream.write(bytes);
    long fileSize = DEFAULT_FILE_SIZE;
    if (createMultipleBlocks) {
      while (fileSize < 2*blockSize) {
        outputStream.write(bytes);
        outputStream.flush();
        fileSize += DEFAULT_FILE_SIZE;
      }
    }
    pathList.add(qualifiedPath);
    ++nFiles;

    FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
    System.out.println(fileStatus.getBlockSize());
    System.out.println(fileStatus.getReplication());
  }
  finally {
    IOUtils.cleanup(null, outputStream);
  }
}
 
Example 16
Source Project: hadoop   Source File: FilterFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream create(Path f,
      FsPermission permission,
      EnumSet<CreateFlag> flags,
      int bufferSize,
      short replication,
      long blockSize,
      Progressable progress,
      ChecksumOpt checksumOpt) throws IOException {
  return fs.create(f, permission,
    flags, bufferSize, replication, blockSize, progress, checksumOpt);
}
 
Example 17
Source Project: hadoop   Source File: FilterFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected FSDataOutputStream primitiveCreate(Path f,
    FsPermission absolutePermission, EnumSet<CreateFlag> flag,
    int bufferSize, short replication, long blockSize,
    Progressable progress, ChecksumOpt checksumOpt)
    throws IOException {
  return fs.primitiveCreate(f, absolutePermission, flag,
      bufferSize, replication, blockSize, progress, checksumOpt);
}
 
Example 18
Source Project: hadoop   Source File: FilterFs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream createInternal(Path f,
  EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
  short replication, long blockSize, Progressable progress,
  ChecksumOpt checksumOpt, boolean createParent) 
    throws IOException, UnresolvedLinkException {
  checkPath(f);
  return myFs.createInternal(f, flag, absolutePermission, bufferSize,
      replication, blockSize, progress, checksumOpt, createParent);
}
 
Example 19
Source Project: hadoop   Source File: ChecksumFs.java    License: Apache License 2.0 5 votes vote down vote up
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);
  
  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
Example 20
Source Project: hadoop   Source File: ChecksumFs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {
  final FSDataOutputStream out = new FSDataOutputStream(
      new ChecksumFSOutputSummer(this, f, createFlag, absolutePermission,
          bufferSize, replication, blockSize, progress,
          checksumOpt,  createParent), null);
  return out;
}
 
Example 21
Source Project: hadoop   Source File: DelegateToFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("deprecation") // call to primitiveCreate
public FSDataOutputStream createInternal (Path f,
    EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
    short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {
  checkPath(f);
  
  // Default impl assumes that permissions do not matter
  // calling the regular create is good enough.
  // FSs that implement permissions should override this.

  if (!createParent) { // parent must exist.
    // since this.create makes parent dirs automatically
    // we must throw exception if parent does not exist.
    final FileStatus stat = getFileStatus(f.getParent());
    if (stat == null) {
      throw new FileNotFoundException("Missing parent:" + f);
    }
    if (!stat.isDirectory()) {
        throw new ParentNotDirectoryException("parent is not a dir:" + f);
    }
    // parent does exist - go ahead with create of file.
  }
  return fsImpl.primitiveCreate(f, absolutePermission, flag,
      bufferSize, replication, blockSize, progress, checksumOpt);
}
 
Example 22
Source Project: hadoop   Source File: ChRootedFs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws IOException, UnresolvedLinkException {
  return myFs.createInternal(fullPath(f), flag,
      absolutePermission, bufferSize,
      replication, blockSize, progress, checksumOpt, createParent);
}
 
Example 23
Source Project: hadoop   Source File: ViewFs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException,
    UnresolvedLinkException, IOException {
  throw readOnlyMountTable("create", f);
}
 
Example 24
Source Project: hadoop   Source File: AbstractFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * The specification of this method matches that of
 * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
 * have been declared explicitly.
 */
public abstract FSDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> flag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent)
    throws AccessControlException, FileAlreadyExistsException,
    FileNotFoundException, ParentNotDirectoryException,
    UnsupportedFileSystemException, UnresolvedLinkException, IOException;
 
Example 25
Source Project: hadoop   Source File: TestFilterFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
public FSDataOutputStream create(Path f,
    FsPermission permission,
    EnumSet<CreateFlag> flags,
    int bufferSize,
    short replication,
    long blockSize,
    Progressable progress,
    ChecksumOpt checksumOpt) throws IOException {
  return null;
}
 
Example 26
Source Project: hadoop   Source File: TestAfsCheckPath.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> flag,
    FsPermission absolutePermission, int bufferSize, short replication,
    long blockSize, Progressable progress, ChecksumOpt checksumOpt,
    boolean createParent) throws IOException {
  // deliberately empty
  return null;
}
 
Example 27
Source Project: dremio-oss   Source File: HadoopFileSystemWrapper.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FSDataOutputStream create(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize,
    short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt) throws IOException {
  try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
    return newFSDataOutputStreamWrapper(underlyingFs.create(f, permission, flags, bufferSize, replication,
        blockSize, progress, checksumOpt));
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
 
Example 28
Source Project: big-c   Source File: Hdfs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public HdfsDataOutputStream createInternal(Path f,
    EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
    int bufferSize, short replication, long blockSize, Progressable progress,
    ChecksumOpt checksumOpt, boolean createParent) throws IOException {

  final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
    absolutePermission, createFlag, createParent, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics,
      dfsos.getInitialLen());
}
 
Example 29
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
private DataChecksum createChecksum(ChecksumOpt userOpt) {
  // Fill in any missing field with the default.
  ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
      defaultChecksumOpt, userOpt);
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(
      myOpt.getChecksumType(),
      myOpt.getBytesPerChecksum());
  if (dataChecksum == null) {
    throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
        + userOpt + ", default=" + defaultChecksumOpt
        + ", effective=null");
  }
  return dataChecksum;
}
 
Example 30
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
 * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
 *  set to true.
 */
public DFSOutputStream create(String src, 
                           FsPermission permission,
                           EnumSet<CreateFlag> flag, 
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException {
  return create(src, permission, flag, true,
      replication, blockSize, progress, buffersize, checksumOpt, null);
}