Java Code Examples for org.apache.hadoop.fs.permission.FsAction#READ_WRITE

The following examples show how to use org.apache.hadoop.fs.permission.FsAction#READ_WRITE . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestHttpFSWith.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void testCreate(Path path, boolean override) throws Exception {
  FileSystem fs = getHttpFSFileSystem();
  FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
                              (short) 2, 100 * 1024 * 1024, null);
  os.write(1);
  os.close();
  fs.close();

  fs = FileSystem.get(getProxiedFSConf());
  FileStatus status = fs.getFileStatus(path);
  if (!isLocalFS()) {
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
  }
  Assert.assertEquals(status.getPermission(), permission);
  InputStream is = fs.open(path);
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
 
Example 2
Source File: TestHttpFSFileSystemLocalFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
Example 3
Source File: TestBlobMetadata.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testPermissionMetadata() throws Exception {
  FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
      FsAction.NONE);
  Path selfishFile = new Path("/noOneElse");
  fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(),
      fs.getDefaultBlockSize(), null).close();
  HashMap<String, String> metadata = backingStore
      .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-------"), storedPermission);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
}
 
Example 4
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testCreate(Path path, boolean override) throws Exception {
  FileSystem fs = getHttpFSFileSystem();
  FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
                              (short) 2, 100 * 1024 * 1024, null);
  os.write(1);
  os.close();
  fs.close();

  fs = FileSystem.get(getProxiedFSConf());
  FileStatus status = fs.getFileStatus(path);
  if (!isLocalFS()) {
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
  }
  Assert.assertEquals(status.getPermission(), permission);
  InputStream is = fs.open(path);
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
 
Example 5
Source File: TestHttpFSFileSystemLocalFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
Example 6
Source File: TestBlobMetadata.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testPermissionMetadata() throws Exception {
  FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
      FsAction.NONE);
  Path selfishFile = new Path("/noOneElse");
  fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(),
      fs.getDefaultBlockSize(), null).close();
  HashMap<String, String> metadata = backingStore
      .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-------"), storedPermission);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
}
 
Example 7
Source File: SentryPermissions.java    From incubator-sentry with Apache License 2.0 6 votes vote down vote up
@Override
public List<AclEntry> getAcls(String authzObj) {
  Map<String, FsAction> groupPerms = getGroupPerms(authzObj);
  List<AclEntry> retList = new LinkedList<AclEntry>();
  for (Map.Entry<String, FsAction> groupPerm : groupPerms.entrySet()) {
    AclEntry.Builder builder = new AclEntry.Builder();
    builder.setName(groupPerm.getKey());
    builder.setType(AclEntryType.GROUP);
    builder.setScope(AclEntryScope.ACCESS);
    FsAction action = groupPerm.getValue();
    if (action == FsAction.READ || action == FsAction.WRITE
        || action == FsAction.READ_WRITE) {
      action = action.or(FsAction.EXECUTE);
    }
    builder.setPermission(action);
    retList.add(builder.build());
  }
  return retList;
}
 
Example 8
Source File: TestSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @return A random FsPermission
 */
private FsPermission genRandomPermission() {
  // randomly select between "rwx" and "rw-"
  FsAction u = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  FsAction g = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  FsAction o = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  return new FsPermission(u, g, o);
}
 
Example 9
Source File: TestBlobMetadata.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that WASB understands the old-style ASV metadata and changes it when
 * it gets the chance.
 */
@Test
public void testOldPermissionMetadata() throws Exception {
  Path selfishFile = new Path("/noOneElse");
  HashMap<String, String> metadata =
      new HashMap<String, String>();
  metadata.put("asv_permission",
      getExpectedPermissionString("rw-------"));
  backingStore.setContent(
      AzureBlobStorageTestAccount.toMockUri(selfishFile),
      new byte[] { },
      metadata, false, 0);
  FsPermission justMe = new FsPermission(
      FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
  FsPermission meAndYou = new FsPermission(
      FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.NONE);
  fs.setPermission(selfishFile, meAndYou);
  metadata =
      backingStore.getMetadata(
          AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-rw----"),
      storedPermission);
  assertNull(metadata.get("asv_permission"));
}
 
Example 10
Source File: TestSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @return A random FsPermission
 */
private FsPermission genRandomPermission() {
  // randomly select between "rwx" and "rw-"
  FsAction u = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  FsAction g = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  FsAction o = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  return new FsPermission(u, g, o);
}
 
Example 11
Source File: TestBlobMetadata.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that WASB understands the old-style ASV metadata and changes it when
 * it gets the chance.
 */
@Test
public void testOldPermissionMetadata() throws Exception {
  Path selfishFile = new Path("/noOneElse");
  HashMap<String, String> metadata =
      new HashMap<String, String>();
  metadata.put("asv_permission",
      getExpectedPermissionString("rw-------"));
  backingStore.setContent(
      AzureBlobStorageTestAccount.toMockUri(selfishFile),
      new byte[] { },
      metadata, false, 0);
  FsPermission justMe = new FsPermission(
      FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
  FsPermission meAndYou = new FsPermission(
      FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.NONE);
  fs.setPermission(selfishFile, meAndYou);
  metadata =
      backingStore.getMetadata(
          AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-rw----"),
      storedPermission);
  assertNull(metadata.get("asv_permission"));
}
 
Example 12
Source File: FileAwareInputStreamDataWriterTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
@Test
public void testCommit() throws IOException {

  String destinationExistingToken = "destination";
  String destinationAdditionalTokens = "path";
  String fileName = "file";

  // Asemble destination paths
  Path destination = new Path(new Path(new Path("/", destinationExistingToken), destinationAdditionalTokens), fileName);
  Path destinationWithoutLeadingSeparator = new Path(new Path(destinationExistingToken, destinationAdditionalTokens), fileName);

  // Create temp directory
  File tmpFile = Files.createTempDir();
  tmpFile.deleteOnExit();
  Path tmpPath = new Path(tmpFile.getAbsolutePath());

  // create origin file
  Path originFile = new Path(tmpPath, fileName);
  this.fs.createNewFile(originFile);

  // create stating dir
  Path stagingDir = new Path(tmpPath, "staging");
  this.fs.mkdirs(stagingDir);

  // create output dir
  Path outputDir = new Path(tmpPath, "output");
  this.fs.mkdirs(outputDir);

  // create copyable file
  FileStatus status = this.fs.getFileStatus(originFile);
  FsPermission readWrite = new FsPermission(FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
  FsPermission dirReadWrite = new FsPermission(FsAction.ALL, FsAction.READ_WRITE, FsAction.READ_WRITE);
  OwnerAndPermission ownerAndPermission = new OwnerAndPermission(status.getOwner(), status.getGroup(), readWrite);
  List<OwnerAndPermission> ancestorOwnerAndPermissions = Lists.newArrayList();
  ancestorOwnerAndPermissions.add(ownerAndPermission);
  ancestorOwnerAndPermissions.add(ownerAndPermission);
  ancestorOwnerAndPermissions.add(ownerAndPermission);
  ancestorOwnerAndPermissions.add(ownerAndPermission);

  Properties properties = new Properties();
  properties.setProperty(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, "/publisher");

  CopyableFile cf = CopyableFile.fromOriginAndDestination(this.fs, status, destination,
      CopyConfiguration.builder(FileSystem.getLocal(new Configuration()), properties).publishDir(new Path("/target"))
          .preserve(PreserveAttributes.fromMnemonicString("")).build())
      .destinationOwnerAndPermission(ownerAndPermission)
      .ancestorsOwnerAndPermission(ancestorOwnerAndPermissions)
      .build();

  // create work unit state
  WorkUnitState state = TestUtils.createTestWorkUnitState();
  state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, stagingDir.toUri().getPath());
  state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, outputDir.toUri().getPath());
  state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
  CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));
  CopySource.serializeCopyEntity(state, cf);
  CopySource.serializeCopyableDataset(state, metadata);

  // create writer
  FileAwareInputStreamDataWriter writer = new FileAwareInputStreamDataWriter(state, 1, 0);

  // create output of writer.write
  Path writtenFile = writer.getStagingFilePath(cf);
  this.fs.mkdirs(writtenFile.getParent());
  this.fs.createNewFile(writtenFile);

  // create existing directories in writer output
  Path outputRoot = FileAwareInputStreamDataWriter.getPartitionOutputRoot(outputDir, cf.getDatasetAndPartition(metadata));
  Path existingOutputPath = new Path(outputRoot, destinationExistingToken);
  this.fs.mkdirs(existingOutputPath);
  FileStatus fileStatus = this.fs.getFileStatus(existingOutputPath);
  FsPermission existingPathPermission = fileStatus.getPermission();

  // check initial state of the relevant directories
  Assert.assertTrue(this.fs.exists(existingOutputPath));
  Assert.assertEquals(this.fs.listStatus(existingOutputPath).length, 0);

  writer.actualProcessedCopyableFile = Optional.of(cf);

  // commit
  writer.commit();

  // check state of relevant paths after commit
  Path expectedOutputPath = new Path(outputRoot, destinationWithoutLeadingSeparator);
  Assert.assertTrue(this.fs.exists(expectedOutputPath));
  fileStatus = this.fs.getFileStatus(expectedOutputPath);
  Assert.assertEquals(fileStatus.getOwner(), ownerAndPermission.getOwner());
  Assert.assertEquals(fileStatus.getGroup(), ownerAndPermission.getGroup());
  Assert.assertEquals(fileStatus.getPermission(), readWrite);
  // parent should have permissions set correctly
  fileStatus = this.fs.getFileStatus(expectedOutputPath.getParent());
  Assert.assertEquals(fileStatus.getPermission(), dirReadWrite);
  // previously existing paths should not have permissions changed
  fileStatus = this.fs.getFileStatus(existingOutputPath);
  Assert.assertEquals(fileStatus.getPermission(), existingPathPermission);

  Assert.assertFalse(this.fs.exists(writer.stagingDir));
}