Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsFileStatus#getFileEncryptionInfo()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.HdfsFileStatus#getFileEncryptionInfo() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 6 votes |
/** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum) throws IOException { this(dfsClient, src, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum); } else { computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null); } this.fileEncryptionInfo = stat.getFileEncryptionInfo(); }
Example 2
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 6 votes |
/** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum) throws IOException { this(dfsClient, src, progress, stat, checksum); initialFileSize = stat.getLen(); // length of file when opened this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK); boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK); // The last partial block of the file has to be filled. if (!toNewBlock && lastBlock != null) { // indicate that we are appending to an existing block bytesCurBlock = lastBlock.getBlockSize(); streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum); } else { computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum); streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null); } this.fileEncryptionInfo = stat.getFileEncryptionInfo(); }
Example 3
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 5 votes |
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, HdfsFileStatus stat, DataChecksum checksum) throws IOException { super(getChecksum4Compute(checksum, stat)); this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication(); this.fileEncryptionInfo = stat.getFileEncryptionInfo(); this.progress = progress; this.cachingStrategy = new AtomicReference<CachingStrategy>( dfsClient.getDefaultWriteCachingStrategy()); if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug( "Set non-null progress callback on DFSOutputStream " + src); } this.bytesPerChecksum = checksum.getBytesPerChecksum(); if (bytesPerChecksum <= 0) { throw new HadoopIllegalArgumentException( "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0"); } if (blockSize % bytesPerChecksum != 0) { throw new HadoopIllegalArgumentException("Invalid values: " + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum + ") must divide block size (=" + blockSize + ")."); } this.checksum4WriteBlock = checksum; this.dfsclientSlowLogThresholdMs = dfsClient.getConf().dfsclientSlowIoWarningThresholdMs; this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager(); }
Example 4
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs == null) return null; FileType fType = FileType.IS_FILE; if (fs.isDir()) { fType = FileType.IS_DIR; } else if (fs.isSymlink()) { fType = FileType.IS_SYMLINK; } HdfsFileStatusProto.Builder builder = HdfsFileStatusProto.newBuilder(). setLength(fs.getLen()). setFileType(fType). setBlockReplication(fs.getReplication()). setBlocksize(fs.getBlockSize()). setModificationTime(fs.getModificationTime()). setAccessTime(fs.getAccessTime()). setPermission(PBHelper.convert(fs.getPermission())). setOwner(fs.getOwner()). setGroup(fs.getGroup()). setFileId(fs.getFileId()). setChildrenNum(fs.getChildrenNum()). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())). setStoragePolicy(fs.getStoragePolicy()); if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); } if (fs.getFileEncryptionInfo() != null) { builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo())); } if (fs instanceof HdfsLocatedFileStatus) { final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs; LocatedBlocks locations = lfs.getBlockLocations(); if (locations != null) { builder.setLocations(PBHelper.convert(locations)); } } return builder.build(); }
Example 5
Source File: DFSOutputStream.java From big-c with Apache License 2.0 | 5 votes |
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, HdfsFileStatus stat, DataChecksum checksum) throws IOException { super(getChecksum4Compute(checksum, stat)); this.dfsClient = dfsClient; this.src = src; this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication(); this.fileEncryptionInfo = stat.getFileEncryptionInfo(); this.progress = progress; this.cachingStrategy = new AtomicReference<CachingStrategy>( dfsClient.getDefaultWriteCachingStrategy()); if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug( "Set non-null progress callback on DFSOutputStream " + src); } this.bytesPerChecksum = checksum.getBytesPerChecksum(); if (bytesPerChecksum <= 0) { throw new HadoopIllegalArgumentException( "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0"); } if (blockSize % bytesPerChecksum != 0) { throw new HadoopIllegalArgumentException("Invalid values: " + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum + ") must divide block size (=" + blockSize + ")."); } this.checksum4WriteBlock = checksum; this.dfsclientSlowLogThresholdMs = dfsClient.getConf().dfsclientSlowIoWarningThresholdMs; this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager(); }
Example 6
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs == null) return null; FileType fType = FileType.IS_FILE; if (fs.isDir()) { fType = FileType.IS_DIR; } else if (fs.isSymlink()) { fType = FileType.IS_SYMLINK; } HdfsFileStatusProto.Builder builder = HdfsFileStatusProto.newBuilder(). setLength(fs.getLen()). setFileType(fType). setBlockReplication(fs.getReplication()). setBlocksize(fs.getBlockSize()). setModificationTime(fs.getModificationTime()). setAccessTime(fs.getAccessTime()). setPermission(PBHelper.convert(fs.getPermission())). setOwner(fs.getOwner()). setGroup(fs.getGroup()). setFileId(fs.getFileId()). setChildrenNum(fs.getChildrenNum()). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())). setStoragePolicy(fs.getStoragePolicy()); if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); } if (fs.getFileEncryptionInfo() != null) { builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo())); } if (fs instanceof HdfsLocatedFileStatus) { final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs; LocatedBlocks locations = lfs.getBlockLocations(); if (locations != null) { builder.setLocations(PBHelper.convert(locations)); } } return builder.build(); }
Example 7
Source File: FanOutOneBlockAsyncDFSOutputSaslHelper.java From hbase with Apache License 2.0 | 5 votes |
static Encryptor createEncryptor(Configuration conf, HdfsFileStatus stat, DFSClient client) throws IOException { FileEncryptionInfo feInfo = stat.getFileEncryptionInfo(); if (feInfo == null) { return null; } return TRANSPARENT_CRYPTO_HELPER.createEncryptor(conf, feInfo, client); }