org.apache.hadoop.fs.permission.FsAction Java Examples

The following examples show how to use org.apache.hadoop.fs.permission.FsAction. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FSPermissionChecker.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
    throws AccessControlException {
  FsPermission mode = pool.getMode();
  if (isSuperUser()) {
    return;
  }
  if (getUser().equals(pool.getOwnerName())
      && mode.getUserAction().implies(access)) {
    return;
  }
  if (getGroups().contains(pool.getGroupName())
      && mode.getGroupAction().implies(access)) {
    return;
  }
  if (mode.getOtherAction().implies(access)) {
    return;
  }
  throw new AccessControlException("Permission denied while accessing pool "
      + pool.getPoolName() + ": user " + getUser() + " does not have "
      + access.toString() + " permissions.");
}
 
Example #2
Source File: TestBlobMetadata.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testPermissionMetadata() throws Exception {
  FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
      FsAction.NONE);
  Path selfishFile = new Path("/noOneElse");
  fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(),
      fs.getDefaultBlockSize(), null).close();
  HashMap<String, String> metadata = backingStore
      .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-------"), storedPermission);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
}
 
Example #3
Source File: HiveTableDeployer.java    From celos with Apache License 2.0 6 votes vote down vote up
private Path createTempHdfsFileForInsertion(FixTable fixTable, TestRun testRun) throws Exception {

        Path pathToParent = new Path(testRun.getHdfsPrefix(), ".hive");
        Path pathTo = new Path(pathToParent, UUID.randomUUID().toString());
        FileSystem fileSystem = testRun.getCiContext().getFileSystem();
        fileSystem.mkdirs(pathTo.getParent());
        FSDataOutputStream outputStream = fileSystem.create(pathTo);

        CSVWriter writer = new CSVWriter(new OutputStreamWriter(outputStream), '\t', CSVWriter.NO_QUOTE_CHARACTER);

        for (FixTable.FixRow fixRow : fixTable.getRows()) {
            List<String> rowData = Lists.newArrayList();
            for (String colName : fixTable.getColumnNames()) {
                rowData.add(fixRow.getCells().get(colName));
            }
            String[] dataArray = rowData.toArray(new String[rowData.size()]);
            writer.writeNext(dataArray);
        }

        writer.close();

        fileSystem.setPermission(pathToParent, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
        fileSystem.setPermission(pathTo, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
        return pathTo;
    }
 
Example #4
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testCreate(Path path, boolean override) throws Exception {
  FileSystem fs = getHttpFSFileSystem();
  FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
                              (short) 2, 100 * 1024 * 1024, null);
  os.write(1);
  os.close();
  fs.close();

  fs = FileSystem.get(getProxiedFSConf());
  FileStatus status = fs.getFileStatus(path);
  if (!isLocalFS()) {
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
  }
  Assert.assertEquals(status.getPermission(), permission);
  InputStream is = fs.open(path);
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
 
Example #5
Source File: TestHttpFSFileSystemLocalFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
Example #6
Source File: TestFSPermissionChecker.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void assertPermissionDenied(UserGroupInformation user, String path,
    FsAction access) throws IOException {
  try {
    INodesInPath iip = dir.getINodesInPath(path, true);
    dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
      false, null, null, access, null, false);
    fail("expected AccessControlException for user + " + user + ", path = " +
      path + ", access = " + access);
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(user.getUserName().toString()));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                new Path(path).getParent().toUri().getPath()));
  }
}
 
Example #7
Source File: TestDFSPermission.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
  FileSystem rootFs = FileSystem.get(conf);
  Path p2 = new Path("/p2");
  rootFs.mkdirs(p2);
  rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
  rootFs.setPermission(p2, new FsPermission((short) 0740));
  fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(conf);
    }
  });
  fs.access(p2, FsAction.READ);
  try {
    fs.access(p2, FsAction.EXECUTE);
    fail("The access call should have failed.");
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(USER1_NAME));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                p2.getParent().toUri().getPath()));
  }
}
 
Example #8
Source File: HDFSRangerTest.java    From ranger with Apache License 2.0 6 votes vote down vote up
void createFile(String baseDir, Integer index) throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    String dirName = baseDir + (index != null ? String.valueOf(index) : "");
    String fileName = dirName + "/dummy-data";
    final Path file = new Path(fileName);
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
}
 
Example #9
Source File: FSDownload.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static boolean checkPublicPermsForAll(FileSystem fs, 
      FileStatus status, FsAction dir, FsAction file) 
  throws IOException {
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (status.isDirectory()) {
    if (!otherAction.implies(dir)) {
      return false;
    }
    
    for (FileStatus child : fs.listStatus(status.getPath())) {
      if(!checkPublicPermsForAll(fs, child, dir, file)) {
        return false;
      }
    }
    return true;
  }
  return (otherAction.implies(file));
}
 
Example #10
Source File: FSDirXAttrOp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static void checkXAttrChangeAccess(
    FSDirectory fsd, INodesInPath iip, XAttr xAttr,
    FSPermissionChecker pc)
    throws AccessControlException {
  if (fsd.isPermissionEnabled() && xAttr.getNameSpace() == XAttr.NameSpace
      .USER) {
    final INode inode = iip.getLastINode();
    if (inode != null &&
        inode.isDirectory() &&
        inode.getFsPermission().getStickyBit()) {
      if (!pc.isSuperUser()) {
        fsd.checkOwner(pc, iip);
      }
    } else {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }
  }
}
 
Example #11
Source File: RangerHdfsAuthorizer.java    From ranger with Apache License 2.0 6 votes vote down vote up
public void logHadoopEvent(String path, FsAction action, boolean accessGranted) {
	if(LOG.isDebugEnabled()) {
		LOG.debug("==> RangerHdfsAuditHandler.logHadoopEvent(" + path + ", " + action + ", " + accessGranted + ")");
	}

	if(auditEvent != null) {
		auditEvent.setResultReason(path);
		auditEvent.setAccessResult((short) (accessGranted ? 1 : 0));
		auditEvent.setAccessType(action == null ? null : action.toString());
		auditEvent.setAclEnforcer(hadoopModuleName);
		auditEvent.setPolicyId(-1);
	}

	if(LOG.isDebugEnabled()) {
		LOG.debug("<== RangerHdfsAuditHandler.logHadoopEvent(" + path + ", " + action + ", " + accessGranted + "): " + auditEvent);
	}
}
 
Example #12
Source File: RestorableHivePartitionDataset.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public void restore()
    throws IOException {
  State state = new State(this.state);
  this.datasetOwnerFs = ProxyUtils.getOwnerFs(state, this.datasetOwner);
  try (HiveProxyQueryExecutor queryExecutor = ProxyUtils
      .getQueryExecutor(state, this.datasetOwner, this.datasetToRestoreOwner, this.trashOwner)) {
    if (this.state.getPropAsBoolean(ComplianceConfigurationKeys.COMPLIANCE_JOB_SIMULATE,
        ComplianceConfigurationKeys.DEFAULT_COMPLIANCE_JOB_SIMULATE)) {
      log.info("Simulating restore of " + datasetURN() + " with " + this.datasetToRestore.datasetURN());
      return;
    }

    Path trashPartitionLocation = getTrashPartitionLocation();
    executeTrashTableQueries(queryExecutor);
    this.datasetOwnerFs.mkdirs(trashPartitionLocation.getParent());
    this.datasetOwnerFs.rename(getLocation(), trashPartitionLocation);
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE);
    HadoopUtils
        .setPermissions(trashPartitionLocation.getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs,
            permission);
    log.info(
        "Moved dataset " + datasetURN() + " from " + getLocation() + " to trash location " + trashPartitionLocation);
    fsMove(this.datasetToRestore.getLocation(), getLocation());
    HadoopUtils.setPermissions(getLocation().getParent(), this.datasetOwner, this.trashOwner, this.datasetOwnerFs,
        permission);
    log.info("Moved data from backup " + this.datasetToRestore.getLocation() + " to location " + getLocation());
    executeDropPartitionQueries(queryExecutor);
  }
}
 
Example #13
Source File: TestSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @return A random FsPermission
 */
private FsPermission genRandomPermission() {
  // randomly select between "rwx" and "rw-"
  FsAction u = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  FsAction g = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  FsAction o = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
  return new FsPermission(u, g, o);
}
 
Example #14
Source File: FSDirMkdirOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm,
    PermissionStatus perm) {
  FsPermission p = parentPerm.getPermission();
  FsPermission ancestorPerm = new FsPermission(
      p.getUserAction().or(FsAction.WRITE_EXECUTE),
      p.getGroupAction(),
      p.getOtherAction());
  return new PermissionStatus(perm.getUserName(), perm.getGroupName(),
      ancestorPerm);
}
 
Example #15
Source File: RangerHdfsAuthorizer.java    From ranger with Apache License 2.0 5 votes vote down vote up
public void start() {
	if(LOG.isDebugEnabled()) {
		LOG.debug("==> RangerHdfsAuthorizer.start()");
	}

	RangerHdfsPlugin plugin = new RangerHdfsPlugin(addlConfigFile);

	plugin.init();

	if (plugin.isOptimizeSubAccessAuthEnabled()) {
		LOG.info(RangerHadoopConstants.RANGER_OPTIMIZE_SUBACCESS_AUTHORIZATION_PROP + " is enabled");
	}

	access2ActionListMapper.put(FsAction.NONE,          new HashSet<String>());
	access2ActionListMapper.put(FsAction.ALL,           Sets.newHashSet(READ_ACCCESS_TYPE, WRITE_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE));
	access2ActionListMapper.put(FsAction.READ,          Sets.newHashSet(READ_ACCCESS_TYPE));
	access2ActionListMapper.put(FsAction.READ_WRITE,    Sets.newHashSet(READ_ACCCESS_TYPE, WRITE_ACCCESS_TYPE));
	access2ActionListMapper.put(FsAction.READ_EXECUTE,  Sets.newHashSet(READ_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE));
	access2ActionListMapper.put(FsAction.WRITE,         Sets.newHashSet(WRITE_ACCCESS_TYPE));
	access2ActionListMapper.put(FsAction.WRITE_EXECUTE, Sets.newHashSet(WRITE_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE));
	access2ActionListMapper.put(FsAction.EXECUTE,       Sets.newHashSet(EXECUTE_ACCCESS_TYPE));

	rangerPlugin = plugin;

	if(LOG.isDebugEnabled()) {
		LOG.debug("<== RangerHdfsAuthorizer.start()");
	}
}
 
Example #16
Source File: TestAclWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is granted to the given fs/user for the given
 * directory.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path directory to check
 * @throws Exception if there is an unexpected error
 */
private static void assertDirPermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    fs.listStatus(pathToCheck);
    fs.access(pathToCheck, FsAction.READ);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
Example #17
Source File: TestBlobMetadata.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that WASB understands the old-style ASV metadata and changes it when
 * it gets the chance.
 */
@Test
public void testOldPermissionMetadata() throws Exception {
  Path selfishFile = new Path("/noOneElse");
  HashMap<String, String> metadata =
      new HashMap<String, String>();
  metadata.put("asv_permission",
      getExpectedPermissionString("rw-------"));
  backingStore.setContent(
      AzureBlobStorageTestAccount.toMockUri(selfishFile),
      new byte[] { },
      metadata, false, 0);
  FsPermission justMe = new FsPermission(
      FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
  FsPermission meAndYou = new FsPermission(
      FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.NONE);
  fs.setPermission(selfishFile, meAndYou);
  metadata =
      backingStore.getMetadata(
          AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-rw----"),
      storedPermission);
  assertNull(metadata.get("asv_permission"));
}
 
Example #18
Source File: TestDistCpWithAcls.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Create a new AclEntry with scope, type, name and permission.
 *
 * @param scope AclEntryScope scope of the ACL entry
 * @param type AclEntryType ACL entry type
 * @param name String optional ACL entry name
 * @param permission FsAction set of permissions in the ACL entry
 * @return AclEntry new AclEntry
 */
private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
    String name, FsAction permission) {
  return new AclEntry.Builder()
    .setScope(scope)
    .setType(type)
    .setName(name)
    .setPermission(permission)
    .build();
}
 
Example #19
Source File: TestUnorderedPartitionedKVWriter.java    From tez with Apache License 2.0 5 votes vote down vote up
private void checkPermissions(Path outputFile, Path indexFile) throws IOException {
  assertEquals("Incorrect output permissions (user)", FsAction.READ_WRITE,
      localFs.getFileStatus(outputFile).getPermission().getUserAction());
  assertEquals("Incorrect output permissions (group)", FsAction.READ,
      localFs.getFileStatus(outputFile).getPermission().getGroupAction());
  assertEquals("Incorrect index permissions (user)", FsAction.READ_WRITE,
      localFs.getFileStatus(indexFile).getPermission().getUserAction());
  assertEquals("Incorrect index permissions (group)", FsAction.READ,
      localFs.getFileStatus(indexFile).getPermission().getGroupAction());
}
 
Example #20
Source File: FSDirMkdirOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm,
    PermissionStatus perm) {
  FsPermission p = parentPerm.getPermission();
  FsPermission ancestorPerm = new FsPermission(
      p.getUserAction().or(FsAction.WRITE_EXECUTE),
      p.getGroupAction(),
      p.getOtherAction());
  return new PermissionStatus(perm.getUserName(), perm.getGroupName(),
      ancestorPerm);
}
 
Example #21
Source File: FTPFileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
private FsPermission getPermissions(FTPFile ftpFile) {
  FsAction user, group, others;
  user = getFsAction(FTPFile.USER_ACCESS, ftpFile);
  group = getFsAction(FTPFile.GROUP_ACCESS, ftpFile);
  others = getFsAction(FTPFile.WORLD_ACCESS, ftpFile);
  return new FsPermission(user, group, others);
}
 
Example #22
Source File: FSDownload.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Returns true if all ancestors of the specified path have the 'execute'
 * permission set for all users (i.e. that other users can traverse
 * the directory hierarchy to the given path)
 */
@VisibleForTesting
static boolean ancestorsHaveExecutePermissions(FileSystem fs,
    Path path, LoadingCache<Path,Future<FileStatus>> statCache)
    throws IOException {
  Path current = path;
  while (current != null) {
    //the subdirs in the path should have execute permissions for others
    if (!checkPermissionOfOther(fs, current, FsAction.EXECUTE, statCache)) {
      return false;
    }
    current = current.getParent();
  }
  return true;
}
 
Example #23
Source File: FSDirAttrOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static HdfsFileStatus setStoragePolicy(
    FSDirectory fsd, BlockManager bm, String src, final String policyName)
    throws IOException {
  if (!fsd.isStoragePolicyEnabled()) {
    throw new IOException(
        "Failed to set storage policy since "
            + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  INodesInPath iip;
  fsd.writeLock();
  try {
    src = FSDirectory.resolvePath(src, pathComponents, fsd);
    iip = fsd.getINodesInPath4Write(src);

    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    // get the corresponding policy and make sure the policy name is valid
    BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
    if (policy == null) {
      throw new HadoopIllegalArgumentException(
          "Cannot find a block policy with the name " + policyName);
    }
    unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
    fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
 
Example #24
Source File: FSDirAttrOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static boolean setReplication(
    FSDirectory fsd, BlockManager bm, String src, final short replication)
    throws IOException {
  bm.verifyReplication(src, replication, null);
  final boolean isFile;
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath4Write(src);
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }

    final short[] blockRepls = new short[2]; // 0: old, 1: new
    final Block[] blocks = unprotectedSetReplication(fsd, src, replication,
                                                     blockRepls);
    isFile = blocks != null;
    if (isFile) {
      fsd.getEditLog().logSetReplication(src, replication);
      bm.setReplication(blockRepls[0], blockRepls[1], src, blocks);
    }
  } finally {
    fsd.writeUnlock();
  }
  return isFile;
}
 
Example #25
Source File: FSPermissionChecker.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkTraverse(INodeAttributes[] inodes, String path, int last
    ) throws AccessControlException {
  for(int j = 0; j <= last; j++) {
    check(inodes[j], path, FsAction.EXECUTE);
  }
}
 
Example #26
Source File: TestDistCacheEmulation.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Validate setupGenerateDistCacheData by validating <li>permissions of the
 * distributed cache directory and <li>content of the generated sequence file.
 * This includes validation of dist cache file paths and their file sizes.
 */
private void doValidateSetupGenDC(
    RecordReader<LongWritable, BytesWritable> reader, FileSystem fs,
    long[] sortedFileSizes) throws IOException, InterruptedException {

  // Validate permissions of dist cache directory
  Path distCacheDir = dce.getDistributedCacheDir();
  assertEquals(
      "Wrong permissions for distributed cache dir " + distCacheDir,
      fs.getFileStatus(distCacheDir).getPermission().getOtherAction()
          .and(FsAction.EXECUTE), FsAction.EXECUTE);

  // Validate the content of the sequence file generated by
  // dce.setupGenerateDistCacheData().
  LongWritable key = new LongWritable();
  BytesWritable val = new BytesWritable();
  for (int i = 0; i < sortedFileSizes.length; i++) {
    assertTrue("Number of files written to the sequence file by "
        + "setupGenerateDistCacheData is less than the expected.",
        reader.nextKeyValue());
    key = reader.getCurrentKey();
    val = reader.getCurrentValue();
    long fileSize = key.get();
    String file = new String(val.getBytes(), 0, val.getLength());

    // Dist Cache files should be sorted based on file size.
    assertEquals("Dist cache file size is wrong.", sortedFileSizes[i],
        fileSize);

    // Validate dist cache file path.

    // parent dir of dist cache file
    Path parent = new Path(file).getParent().makeQualified(fs.getUri(),fs.getWorkingDirectory());
    // should exist in dist cache dir
    assertTrue("Public dist cache file path is wrong.",
        distCacheDir.equals(parent));
  }
}
 
Example #27
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether current user have permissions to access the path. For more
 * details of the parameters, see
 * {@link FSPermissionChecker#checkPermission}.
 */
void checkPermission(FSPermissionChecker pc, INodesInPath iip,
    boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess,
    FsAction access, FsAction subAccess)
  throws AccessControlException {
  checkPermission(pc, iip, doCheckOwner, ancestorAccess,
      parentAccess, access, subAccess, false);
}
 
Example #28
Source File: ViewFs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
    fsState.resolve(getUriPath(path), true);
  res.targetFileSystem.access(res.remainingPath, mode);
}
 
Example #29
Source File: RangerHdfsAuthorizerTest.java    From ranger with Apache License 2.0 5 votes vote down vote up
/**
 * Checks that the <b>directory</b> access is <b>blocked</b> for the given user in the given groups.
 * Throws an exception, if not.
 */
public void checkDirAccessBlocked(FsAction access, String userName, String... groups)
        throws AccessControlException {
    try {
        checkDirAccess(access, userName, groups);
        Assert.fail("Access should be blocked for parent directory of " + path + " access=" + access
                + " for user=" + userName + " groups=" + Arrays.asList(groups));
    } catch (AccessControlException ace) {
        Assert.assertNotNull(ace);
    }
}
 
Example #30
Source File: DremioHadoopFileSystemWrapper.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private void checkAccessAllowed(org.apache.hadoop.fs.Path f, FsAction mode) throws IOException {
  if (!isMapRfs) {
    underlyingFs.access(f, mode);
    return;
  }

  openNonExistentFileInPath(f);
  underlyingFs.access(f, mode);
}