Java Code Examples for org.apache.hadoop.fs.FileStatus#getGroup()

The following examples show how to use org.apache.hadoop.fs.FileStatus#getGroup() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DistCpV1.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static void updateDestStatus(FileStatus src, FileStatus dst,
    EnumSet<FileAttribute> preseved, FileSystem destFileSys
    ) throws IOException {
  String owner = null;
  String group = null;
  if (preseved.contains(FileAttribute.USER)
      && !src.getOwner().equals(dst.getOwner())) {
    owner = src.getOwner();
  }
  if (preseved.contains(FileAttribute.GROUP)
      && !src.getGroup().equals(dst.getGroup())) {
    group = src.getGroup();
  }
  if (owner != null || group != null) {
    destFileSys.setOwner(dst.getPath(), owner, group);
  }
  if (preseved.contains(FileAttribute.PERMISSION)
      && !src.getPermission().equals(dst.getPermission())) {
    destFileSys.setPermission(dst.getPath(), src.getPermission());
  }
  if (preseved.contains(FileAttribute.TIMES)) {
    destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
  }
}
 
Example 2
Source File: TarArchiveInputStreamDataWriterTest.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
/**
 * Find the test compressed file <code><filePath/code> in classpath and read it as a {@link FileAwareInputStream}
 */
private FileAwareInputStream getCompressedInputStream(final String filePath, final String newFileName) throws Exception {
  UnGzipConverter converter = new UnGzipConverter();

  FileSystem fs = FileSystem.getLocal(new Configuration());

  String fullPath = getClass().getClassLoader().getResource(filePath).getFile();
  FileStatus status = fs.getFileStatus(testTempPath);

  OwnerAndPermission ownerAndPermission =
      new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL,
          FsAction.ALL));
  CopyableFile cf =
      CopyableFileUtils.getTestCopyableFile(filePath, new Path(testTempPath, newFileName).toString(), newFileName,
          ownerAndPermission);

  FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
      .inputStream(fs.open(new Path(fullPath))).build();

  Iterable<FileAwareInputStream> iterable =
      converter.convertRecord("outputSchema", fileAwareInputStream, new WorkUnitState());

  return Iterables.getFirst(iterable, null);
}
 
Example 3
Source File: DistCp.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private static void updatePermissions(FileStatus src, FileStatus dst,
    EnumSet<FileAttribute> preseved, FileSystem destFileSys
    ) throws IOException {
  String owner = null;
  String group = null;
  if (preseved.contains(FileAttribute.USER)
      && !src.getOwner().equals(dst.getOwner())) {
    owner = src.getOwner();
  }
  if (preseved.contains(FileAttribute.GROUP)
      && !src.getGroup().equals(dst.getGroup())) {
    group = src.getGroup();
  }
  if (owner != null || group != null) {
    destFileSys.setOwner(dst.getPath(), owner, group);
  }
  if (preseved.contains(FileAttribute.PERMISSION)
      && !src.getPermission().equals(dst.getPermission())) {
    destFileSys.setPermission(dst.getPath(), src.getPermission());
  }
}
 
Example 4
Source File: NTFSLocalFileSystem.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public FileStatus getFileStatus(Path f) throws IOException {
    // it's the RawFS in place which messes things up as it dynamically returns the permissions...
    // workaround by doing a copy
    FileStatus fs = super.getFileStatus(f);

    // work-around for Hive 0.14
    if (SCRATCH_DIR.equals(f.toString())) {
        System.out.println("Faking scratch dir permissions on Windows...");

        return new FileStatus(fs.getLen(), fs.isDir(), fs.getReplication(), fs.getBlockSize(),
                fs.getModificationTime(), fs.getAccessTime(), SCRATCH_DIR_PERMS, fs.getOwner(), fs.getGroup(),
                fs.getPath());
        // this doesn't work since the RawFS impl has its own algo that does the lookup dynamically
        //fs.getPermission().fromShort((short) 777);
    }
    return fs;
}
 
Example 5
Source File: TestSecureIOUtils.java    From big-c with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void makeTestFile() throws Exception {
  Configuration conf = new Configuration();
  fs = FileSystem.getLocal(conf).getRaw();
  testFilePathIs =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "1")).toUri().getRawPath());
  testFilePathRaf =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "2")).toUri().getRawPath());
  testFilePathFadis =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "3")).toUri().getRawPath());
  for (File f : new File[] { testFilePathIs, testFilePathRaf,
      testFilePathFadis }) {
    FileOutputStream fos = new FileOutputStream(f);
    fos.write("hello".getBytes("UTF-8"));
    fos.close();
  }

  FileStatus stat = fs.getFileStatus(
      new Path(testFilePathIs.toString()));
  // RealOwner and RealGroup would be same for all three files.
  realOwner = stat.getOwner();
  realGroup = stat.getGroup();
}
 
Example 6
Source File: TestSecureIOUtils.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void makeTestFile() throws Exception {
  Configuration conf = new Configuration();
  fs = FileSystem.getLocal(conf).getRaw();
  testFilePathIs =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "1")).toUri().getRawPath());
  testFilePathRaf =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "2")).toUri().getRawPath());
  testFilePathFadis =
      new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
          + "3")).toUri().getRawPath());
  for (File f : new File[] { testFilePathIs, testFilePathRaf,
      testFilePathFadis }) {
    FileOutputStream fos = new FileOutputStream(f);
    fos.write("hello".getBytes("UTF-8"));
    fos.close();
  }

  FileStatus stat = fs.getFileStatus(
      new Path(testFilePathIs.toString()));
  // RealOwner and RealGroup would be same for all three files.
  realOwner = stat.getOwner();
  realGroup = stat.getGroup();
}
 
Example 7
Source File: Records.java    From Bats with Apache License 2.0 6 votes vote down vote up
public File(String schemaName, WorkspaceSchemaFactory.WorkspaceSchema wsSchema, FileStatus fileStatus) {
  this.SCHEMA_NAME = schemaName;
  this.ROOT_SCHEMA_NAME = wsSchema.getSchemaPath().get(0);
  this.WORKSPACE_NAME = wsSchema.getName();
  this.FILE_NAME = fileStatus.getPath().getName();
  this.RELATIVE_PATH = Path.getPathWithoutSchemeAndAuthority(new Path(wsSchema.getDefaultLocation())).toUri()
    .relativize(Path.getPathWithoutSchemeAndAuthority(fileStatus.getPath()).toUri()).getPath();
  this.IS_DIRECTORY = fileStatus.isDirectory();
  this.IS_FILE = fileStatus.isFile();
  this.LENGTH = fileStatus.getLen();
  this.OWNER = fileStatus.getOwner();
  this.GROUP = fileStatus.getGroup();
  this.PERMISSION = fileStatus.getPermission().toString();
  this.ACCESS_TIME = getTimestampWithReplacedZone(fileStatus.getAccessTime());
  this.MODIFICATION_TIME = getTimestampWithReplacedZone(fileStatus.getModificationTime());
}
 
Example 8
Source File: RemoteNodeFileSystem.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Converts a Hadoop {@link FileStatus} instance into a protobuf
 * {@link DFSProtos.FileStatus}
 *
 * @param status
 *          the Hadoop status instance to convert
 * @return a protobuf status instance
 * @throws IOException
 */
static DFS.FileStatus toProtoFileStatus(FileStatus status) throws IOException {
  DFS.FileStatus.Builder builder = DFS.FileStatus.newBuilder();

  builder
    .setLength(status.getLen())
    .setIsDirectory(status.isDirectory())
    .setBlockReplication(status.getReplication())
    .setBlockSize(status.getBlockSize())
    .setModificationTime(status.getModificationTime())
    .setAccessTime(status.getAccessTime());

  // Handling potential null values
  if (status.getPath() != null) {
    builder = builder.setPath(status.getPath().toUri().getPath());
  }
  if (status.getPermission() != null) {
    builder = builder.setPermission(status.getPermission().toExtendedShort());
  }
  if (status.getOwner() != null) {
    builder = builder.setOwner(status.getOwner());
  }
  if (status.getGroup() != null) {
    builder = builder.setGroup(status.getGroup());
  }
  if (status.isSymlink()) {
    builder = builder.setSymlink(status.getSymlink().toString());
  }

  return builder.build();
}
 
Example 9
Source File: ContainerFileSystem.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Transform remote file status to local.
 */
private static FileStatus transform(FileStatus input, String containerName) {
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          transform(input.getPath(), containerName));
}
 
Example 10
Source File: FileAwareInputStreamDataWriterTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test
public void testBlockWrite() throws Exception {
  String streamString = "testContents";

  FileStatus status = fs.getFileStatus(testTempPath);
  OwnerAndPermission ownerAndPermission =
      new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission);

  CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));

  WorkUnitState state = TestUtils.createTestWorkUnitState();
  state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
  state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
  state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
  state.setProp(DistcpFileSplitter.SPLIT_ENABLED, true);
  CopySource.serializeCopyEntity(state, cf);
  CopySource.serializeCopyableDataset(state, metadata);

  FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);

  long splitLen = 4;
  int splits = (int) (streamString.length() / splitLen + 1);
  DistcpFileSplitter.Split split = new DistcpFileSplitter.Split(0, splitLen, 0, splits,
      String.format("%s.__PART%d__", cf.getDestination().getName(), 0));
  FSDataInputStream dataInputStream = StreamUtils.convertStream(IOUtils.toInputStream(streamString));
  dataInputStream.seek(split.getLowPosition());
  FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
      .inputStream(dataInputStream)
      .split(Optional.of(split))
      .build();
  dataWriter.write(fileAwareInputStream);
  dataWriter.commit();
  Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
      cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
  Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())),
      streamString.substring(0, (int) splitLen));
}
 
Example 11
Source File: FileAwareInputStreamDataWriterTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test
public void testWriteWithEncryption() throws Exception {
  byte[] streamString = "testEncryptedContents".getBytes("UTF-8");
  byte[] expectedContents = new byte[streamString.length];
  for (int i = 0; i < streamString.length; i++) {
    expectedContents[i] = (byte)((streamString[i] + 1) % 256);
  }

  FileStatus status = fs.getFileStatus(testTempPath);
  OwnerAndPermission ownerAndPermission =
      new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission);

  CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));

  WorkUnitState state = TestUtils.createTestWorkUnitState();
  state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
  state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
  state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
  state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "insecure_shift");

  CopySource.serializeCopyEntity(state, cf);
  CopySource.serializeCopyableDataset(state, metadata);

  FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);

  FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
      .inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build();
  dataWriter.write(fileAwareInputStream);
  dataWriter.commit();

  Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
      cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
  Assert.assertTrue(writtenFilePath.getName().endsWith("insecure_shift"),
      "Expected encryption name to be appended to destination");
  Assert.assertEquals(IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString())), expectedContents);
}
 
Example 12
Source File: DistCp.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static void updateDestStatus(FileStatus src, FileStatus dst,
    EnumSet<FileAttribute> preserved, FileSystem destFileSys
    ) throws IOException {
  String owner = null;
  String group = null;
  if (preserved.contains(FileAttribute.USER)
      && !src.getOwner().equals(dst.getOwner())) {
    owner = src.getOwner();
  }
  if (preserved.contains(FileAttribute.GROUP)
      && !src.getGroup().equals(dst.getGroup())) {
    group = src.getGroup();
  }
  if (owner != null || group != null) {
    destFileSys.setOwner(dst.getPath(), owner, group);
  }
  if (preserved.contains(FileAttribute.PERMISSION)
      && !src.getPermission().equals(dst.getPermission())) {
    destFileSys.setPermission(dst.getPath(), src.getPermission());
  }
  if (preserved.contains(FileAttribute.TIMES)) {
    try {
      destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
    } catch (IOException exc) {
      if (!dst.isDir()) { //hadoop 0.20 doesn't allow setTimes on dirs
        throw exc;
      }
    }
  }
}
 
Example 13
Source File: InLineFileSystem.java    From hudi with Apache License 2.0 5 votes vote down vote up
@Override
public FileStatus getFileStatus(Path inlinePath) throws IOException {
  Path outerPath = InLineFSUtils.getOuterfilePathFromInlinePath(inlinePath);
  FileSystem outerFs = outerPath.getFileSystem(conf);
  FileStatus status = outerFs.getFileStatus(outerPath);
  FileStatus toReturn = new FileStatus(InLineFSUtils.length(inlinePath), status.isDirectory(), status.getReplication(), status.getBlockSize(),
      status.getModificationTime(), status.getAccessTime(), status.getPermission(), status.getOwner(),
      status.getGroup(), inlinePath);
  return toReturn;
}
 
Example 14
Source File: FileStatusEntity.java    From Eagle with Apache License 2.0 5 votes vote down vote up
public FileStatusEntity(FileStatus status) throws IOException {
    //this.path = status.getPath();
    this.length = status.getLen();
    this.isdir = status.isDirectory();
    this.block_replication = status.getReplication();
    this.blocksize = status.getBlockSize();
    this.modification_time = status.getModificationTime();
    this.access_time = status.getAccessTime();
    this.permission = status.getPermission();
    this.owner = status.getOwner();
    this.group = status.getGroup();
    if(status.isSymlink()) {
        this.symlink = status.getSymlink();
    }
}
 
Example 15
Source File: FileAwareInputStreamDataWriterTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test
public void testWrite() throws Exception {
  String streamString = "testContents";

  FileStatus status = fs.getFileStatus(testTempPath);
  OwnerAndPermission ownerAndPermission =
      new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission);

  CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));

  WorkUnitState state = TestUtils.createTestWorkUnitState();
  state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
  state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
  state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
  CopySource.serializeCopyEntity(state, cf);
  CopySource.serializeCopyableDataset(state, metadata);

  FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);

  FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
      .inputStream(StreamUtils.convertStream(IOUtils.toInputStream(streamString))).build();
  dataWriter.write(fileAwareInputStream);
  dataWriter.commit();
  Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
      cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
  Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())), streamString);
}
 
Example 16
Source File: FileAwareInputStreamDataWriterTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
@Test
public void testWriteWithGPGAsymmetricEncryption() throws Exception {
  byte[] streamString = "testEncryptedContents".getBytes("UTF-8");

  FileStatus status = fs.getFileStatus(testTempPath);
  OwnerAndPermission ownerAndPermission =
      new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission);

  CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));

  WorkUnitState state = TestUtils.createTestWorkUnitState();
  state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
  state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
  state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
  state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_ALGORITHM_KEY, "gpg");

  File publicKeyFile = new File(testTempPath.toString(), "public.key");

  FileUtils.copyInputStreamToFile(GPGFileEncryptor.class.getResourceAsStream(GPGFileEncryptorTest.PUBLIC_KEY),
      publicKeyFile);

  state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PATH_KEY, publicKeyFile.getAbsolutePath());
  state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEYSTORE_PASSWORD_KEY,
      GPGFileEncryptorTest.PASSPHRASE);
  state.setProp("writer.encrypt." + EncryptionConfigParser.ENCRYPTION_KEY_NAME,
      GPGFileEncryptorTest.KEY_ID);

  CopySource.serializeCopyEntity(state, cf);
  CopySource.serializeCopyableDataset(state, metadata);

  FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);

  FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
      .inputStream(StreamUtils.convertStream(new ByteArrayInputStream(streamString))).build();
  dataWriter.write(fileAwareInputStream);
  dataWriter.commit();

  Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
      cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
  Assert.assertTrue(writtenFilePath.getName().endsWith("gpg"),
      "Expected encryption name to be appended to destination");
  byte[] encryptedContent = IOUtils.toByteArray(new FileInputStream(writtenFilePath.toString()));
  byte[] decryptedContent = new byte[streamString.length];
  IOUtils.readFully(GPGFileDecryptor.decryptFile(new FileInputStream(writtenFilePath.toString()),
      GPGFileEncryptor.class.getResourceAsStream(GPGFileEncryptorTest.PRIVATE_KEY),
      GPGFileEncryptorTest.PASSPHRASE), decryptedContent);


  // encrypted string should not be the same as the plaintext
  Assert.assertNotEquals(encryptedContent, streamString);

  // decrypted string should be the same as the plaintext
  Assert.assertEquals(decryptedContent, streamString);

}
 
Example 17
Source File: DistCpUtils.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Preserve attribute on file matching that of the file status being sent
 * as argument. Barring the block size, all the other attributes are preserved
 * by this function
 *
 * @param targetFS - File system
 * @param path - Path that needs to preserve original file status
 * @param srcFileStatus - Original file status
 * @param attributes - Attribute set that needs to be preserved
 * @param preserveRawXattrs if true, raw.* xattrs should be preserved
 * @throws IOException - Exception if any (particularly relating to group/owner
 *                       change or any transient error)
 */
public static void preserve(FileSystem targetFS, Path path,
                            CopyListingFileStatus srcFileStatus,
                            EnumSet<FileAttribute> attributes,
                            boolean preserveRawXattrs) throws IOException {

  FileStatus targetFileStatus = targetFS.getFileStatus(path);
  String group = targetFileStatus.getGroup();
  String user = targetFileStatus.getOwner();
  boolean chown = false;

  if (attributes.contains(FileAttribute.ACL)) {
    List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
    List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
    if (!srcAcl.equals(targetAcl)) {
      targetFS.setAcl(path, srcAcl);
    }
    // setAcl doesn't preserve sticky bit, so also call setPermission if needed.
    if (srcFileStatus.getPermission().getStickyBit() !=
        targetFileStatus.getPermission().getStickyBit()) {
      targetFS.setPermission(path, srcFileStatus.getPermission());
    }
  } else if (attributes.contains(FileAttribute.PERMISSION) &&
    !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
    targetFS.setPermission(path, srcFileStatus.getPermission());
  }

  final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
  if (preserveXAttrs || preserveRawXattrs) {
    final String rawNS =
        StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
    Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
    Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
    if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
      for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) {
        String xattrName = entry.getKey();
        if (xattrName.startsWith(rawNS) || preserveXAttrs) {
          targetFS.setXAttr(path, xattrName, entry.getValue());
        }
      }
    }
  }

  if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDirectory() &&
      (srcFileStatus.getReplication() != targetFileStatus.getReplication())) {
    targetFS.setReplication(path, srcFileStatus.getReplication());
  }

  if (attributes.contains(FileAttribute.GROUP) &&
      !group.equals(srcFileStatus.getGroup())) {
    group = srcFileStatus.getGroup();
    chown = true;
  }

  if (attributes.contains(FileAttribute.USER) &&
      !user.equals(srcFileStatus.getOwner())) {
    user = srcFileStatus.getOwner();
    chown = true;
  }

  if (chown) {
    targetFS.setOwner(path, user, group);
  }
  
  if (attributes.contains(FileAttribute.TIMES)) {
    targetFS.setTimes(path, 
        srcFileStatus.getModificationTime(), 
        srcFileStatus.getAccessTime());
  }
}
 
Example 18
Source File: TestDistCh.java    From RDFS with Apache License 2.0 4 votes vote down vote up
ChPermissionStatus(FileStatus filestatus, String owner, String group, String permission) {
  super("".equals(owner)? filestatus.getOwner(): owner, 
      "".equals(group)? filestatus.getGroup(): group,
      "".equals(permission)? filestatus.getPermission(): new FsPermission(Short.parseShort(permission, 8)));
}
 
Example 19
Source File: TestDistCh.java    From hadoop with Apache License 2.0 4 votes vote down vote up
ChPermissionStatus(FileStatus filestatus, String owner, String group, String permission) {
  super("".equals(owner)? filestatus.getOwner(): owner, 
      "".equals(group)? filestatus.getGroup(): group,
      "".equals(permission)? filestatus.getPermission(): new FsPermission(Short.parseShort(permission, 8)));
  defaultPerm = permission == null || "".equals(permission);
}
 
Example 20
Source File: HiveConverterUtils.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a staging directory with the permission as in source directory.
 * @param fs filesystem object
 * @param destination staging directory location
 * @param conversionEntity conversion entity used to get source directory permissions
 * @param workUnit workunit
 */
public static void createStagingDirectory(FileSystem fs, String destination, HiveProcessingEntity conversionEntity,
    WorkUnitState workUnit) {
  /*
   * Create staging data location with the same permissions as source data location
   *
   * Note that hive can also automatically create the non-existing directories but it does not
   * seem to create it with the desired permissions.
   * According to hive docs permissions for newly created directories/files can be controlled using uMask like,
   *
   * SET hive.warehouse.subdir.inherit.perms=false;
   * SET fs.permissions.umask-mode=022;
   * Upon testing, this did not work
   */
  Path destinationPath = new Path(destination);
  try {
    FsPermission permission;
    String group = null;
    if (conversionEntity.getTable().getDataLocation() != null) {
      FileStatus sourceDataFileStatus = fs.getFileStatus(conversionEntity.getTable().getDataLocation());
      permission = sourceDataFileStatus.getPermission();
      group = sourceDataFileStatus.getGroup();
    } else {
      permission = FsPermission.getDefault();
    }

    if (!fs.mkdirs(destinationPath, permission)) {
      throw new RuntimeException(String.format("Failed to create path %s with permissions %s",
          destinationPath, permission));
    } else {
      fs.setPermission(destinationPath, permission);
      // Set the same group as source
      if (group != null && !workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP, DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
        fs.setOwner(destinationPath, null, group);
      }
      log.info(String.format("Created %s with permissions %s and group %s", destinationPath, permission, group));
    }
  } catch (IOException e) {
    Throwables.propagate(e);
  }
}