Java Code Examples for org.apache.hadoop.fs.permission.FsAction

The following examples show how to use org.apache.hadoop.fs.permission.FsAction. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: BaseTestHttpFSWith.java    License: Apache License 2.0 6 votes vote down vote up
private void testCreate(Path path, boolean override) throws Exception {
  FileSystem fs = getHttpFSFileSystem();
  FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
  OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
                              (short) 2, 100 * 1024 * 1024, null);
  os.write(1);
  os.close();
  fs.close();

  fs = FileSystem.get(getProxiedFSConf());
  FileStatus status = fs.getFileStatus(path);
  if (!isLocalFS()) {
    Assert.assertEquals(status.getReplication(), 2);
    Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
  }
  Assert.assertEquals(status.getPermission(), permission);
  InputStream is = fs.open(path);
  Assert.assertEquals(is.read(), 1);
  is.close();
  fs.close();
}
 
Example 2
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
Example 3
Source Project: big-c   Source File: TestFSPermissionChecker.java    License: Apache License 2.0 6 votes vote down vote up
private void assertPermissionDenied(UserGroupInformation user, String path,
    FsAction access) throws IOException {
  try {
    INodesInPath iip = dir.getINodesInPath(path, true);
    dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
      false, null, null, access, null, false);
    fail("expected AccessControlException for user + " + user + ", path = " +
      path + ", access = " + access);
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(user.getUserName().toString()));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                new Path(path).getParent().toUri().getPath()));
  }
}
 
Example 4
Source Project: big-c   Source File: FSPermissionChecker.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Whether a cache pool can be accessed by the current context
 *
 * @param pool CachePool being accessed
 * @param access type of action being performed on the cache pool
 * @throws AccessControlException if pool cannot be accessed
 */
public void checkPermission(CachePool pool, FsAction access)
    throws AccessControlException {
  FsPermission mode = pool.getMode();
  if (isSuperUser()) {
    return;
  }
  if (getUser().equals(pool.getOwnerName())
      && mode.getUserAction().implies(access)) {
    return;
  }
  if (getGroups().contains(pool.getGroupName())
      && mode.getGroupAction().implies(access)) {
    return;
  }
  if (mode.getOtherAction().implies(access)) {
    return;
  }
  throw new AccessControlException("Permission denied while accessing pool "
      + pool.getPoolName() + ": user " + getUser() + " does not have "
      + access.toString() + " permissions.");
}
 
Example 5
Source Project: hadoop   Source File: FSDirXAttrOp.java    License: Apache License 2.0 6 votes vote down vote up
private static void checkXAttrChangeAccess(
    FSDirectory fsd, INodesInPath iip, XAttr xAttr,
    FSPermissionChecker pc)
    throws AccessControlException {
  if (fsd.isPermissionEnabled() && xAttr.getNameSpace() == XAttr.NameSpace
      .USER) {
    final INode inode = iip.getLastINode();
    if (inode != null &&
        inode.isDirectory() &&
        inode.getFsPermission().getStickyBit()) {
      if (!pc.isSuperUser()) {
        fsd.checkOwner(pc, iip);
      }
    } else {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }
  }
}
 
Example 6
Source Project: ranger   Source File: HDFSRangerTest.java    License: Apache License 2.0 6 votes vote down vote up
void createFile(String baseDir, Integer index) throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    String dirName = baseDir + (index != null ? String.valueOf(index) : "");
    String fileName = dirName + "/dummy-data";
    final Path file = new Path(fileName);
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();
    }
    out.close();

    // Change permissions to read-only
    fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
}
 
Example 7
Source Project: hadoop   Source File: TestDFSPermission.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
  FileSystem rootFs = FileSystem.get(conf);
  Path p2 = new Path("/p2");
  rootFs.mkdirs(p2);
  rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
  rootFs.setPermission(p2, new FsPermission((short) 0740));
  fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(conf);
    }
  });
  fs.access(p2, FsAction.READ);
  try {
    fs.access(p2, FsAction.EXECUTE);
    fail("The access call should have failed.");
  } catch (AccessControlException e) {
    assertTrue("Permission denied messages must carry the username",
            e.getMessage().contains(USER1_NAME));
    assertTrue("Permission denied messages must carry the path parent",
            e.getMessage().contains(
                p2.getParent().toUri().getPath()));
  }
}
 
Example 8
Source Project: big-c   Source File: FSDownload.java    License: Apache License 2.0 6 votes vote down vote up
private static boolean checkPublicPermsForAll(FileSystem fs, 
      FileStatus status, FsAction dir, FsAction file) 
  throws IOException {
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (status.isDirectory()) {
    if (!otherAction.implies(dir)) {
      return false;
    }
    
    for (FileStatus child : fs.listStatus(status.getPath())) {
      if(!checkPublicPermsForAll(fs, child, dir, file)) {
        return false;
      }
    }
    return true;
  }
  return (otherAction.implies(file));
}
 
Example 9
Source Project: celos   Source File: HiveTableDeployer.java    License: Apache License 2.0 6 votes vote down vote up
private Path createTempHdfsFileForInsertion(FixTable fixTable, TestRun testRun) throws Exception {

        Path pathToParent = new Path(testRun.getHdfsPrefix(), ".hive");
        Path pathTo = new Path(pathToParent, UUID.randomUUID().toString());
        FileSystem fileSystem = testRun.getCiContext().getFileSystem();
        fileSystem.mkdirs(pathTo.getParent());
        FSDataOutputStream outputStream = fileSystem.create(pathTo);

        CSVWriter writer = new CSVWriter(new OutputStreamWriter(outputStream), '\t', CSVWriter.NO_QUOTE_CHARACTER);

        for (FixTable.FixRow fixRow : fixTable.getRows()) {
            List<String> rowData = Lists.newArrayList();
            for (String colName : fixTable.getColumnNames()) {
                rowData.add(fixRow.getCells().get(colName));
            }
            String[] dataArray = rowData.toArray(new String[rowData.size()]);
            writer.writeNext(dataArray);
        }

        writer.close();

        fileSystem.setPermission(pathToParent, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
        fileSystem.setPermission(pathTo, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
        return pathTo;
    }
 
Example 10
Source Project: hadoop   Source File: TestBlobMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testPermissionMetadata() throws Exception {
  FsPermission justMe = new FsPermission(FsAction.READ_WRITE, FsAction.NONE,
      FsAction.NONE);
  Path selfishFile = new Path("/noOneElse");
  fs.create(selfishFile, justMe, true, 4096, fs.getDefaultReplication(),
      fs.getDefaultBlockSize(), null).close();
  HashMap<String, String> metadata = backingStore
      .getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
  assertNotNull(metadata);
  String storedPermission = metadata.get("hdi_permission");
  assertEquals(getExpectedPermissionString("rw-------"), storedPermission);
  FileStatus retrievedStatus = fs.getFileStatus(selfishFile);
  assertNotNull(retrievedStatus);
  assertEquals(justMe, retrievedStatus.getPermission());
  assertEquals(getExpectedOwner(), retrievedStatus.getOwner());
  assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,
      retrievedStatus.getGroup());
}
 
Example 11
Source Project: ranger   Source File: RangerHdfsAuthorizer.java    License: Apache License 2.0 6 votes vote down vote up
public void logHadoopEvent(String path, FsAction action, boolean accessGranted) {
	if(LOG.isDebugEnabled()) {
		LOG.debug("==> RangerHdfsAuditHandler.logHadoopEvent(" + path + ", " + action + ", " + accessGranted + ")");
	}

	if(auditEvent != null) {
		auditEvent.setResultReason(path);
		auditEvent.setAccessResult((short) (accessGranted ? 1 : 0));
		auditEvent.setAccessType(action == null ? null : action.toString());
		auditEvent.setAclEnforcer(hadoopModuleName);
		auditEvent.setPolicyId(-1);
	}

	if(LOG.isDebugEnabled()) {
		LOG.debug("<== RangerHdfsAuditHandler.logHadoopEvent(" + path + ", " + action + ", " + accessGranted + "): " + auditEvent);
	}
}
 
Example 12
Source Project: big-c   Source File: FSDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Check whether current user have permissions to access the path. For more
 * details of the parameters, see
 * {@link FSPermissionChecker#checkPermission}.
 */
void checkPermission(FSPermissionChecker pc, INodesInPath iip,
    boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess,
    FsAction access, FsAction subAccess)
  throws AccessControlException {
  checkPermission(pc, iip, doCheckOwner, ancestorAccess,
      parentAccess, access, subAccess, false);
}
 
Example 13
Source Project: Hi-WAY   Source File: Data.java    License: Apache License 2.0 5 votes vote down vote up
private void mkHdfsDir(Path dir) throws IOException {
	if (dir == null || hdfs.isDirectory(dir))
		return;
	mkHdfsDir(dir.getParent());
	hdfs.mkdirs(dir);
	hdfs.setPermission(dir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
}
 
Example 14
static FsAction getFAction(String sentryPriv) {
  String[] strPrivs = sentryPriv.trim().split(",");
  FsAction retVal = FsAction.NONE;
  for (String strPriv : strPrivs) {
    retVal = retVal.or(ACTION_MAPPING.get(strPriv.toUpperCase()));
  }
  return retVal;
}
 
Example 15
Source Project: EasyML   Source File: HDFSIO.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Make directory in the uri position
 * @param uri target position
 * @return whether success or not
 * @throws IOException
 */
public static boolean mkdirs(String uri) throws IOException {
	Path path = new Path(Constants.NAME_NODE + "/" + uri);
	System.out.println("[mkdirs]" + path.toString());

	FsPermission dirPerm = new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL);
	Boolean flag = fs.mkdirs(path);
	if( flag )
		fs.setPermission(path, new FsPermission(dirPerm));
	return flag;
}
 
Example 16
@Test
public void testWrite() throws Exception {
  String streamString = "testContents";

  FileStatus status = fs.getFileStatus(testTempPath);
  OwnerAndPermission ownerAndPermission =
      new OwnerAndPermission(status.getOwner(), status.getGroup(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
  CopyableFile cf = CopyableFileUtils.getTestCopyableFile(ownerAndPermission);

  CopyableDatasetMetadata metadata = new CopyableDatasetMetadata(new TestCopyableDataset(new Path("/source")));

  WorkUnitState state = TestUtils.createTestWorkUnitState();
  state.setProp(ConfigurationKeys.WRITER_STAGING_DIR, new Path(testTempPath, "staging").toString());
  state.setProp(ConfigurationKeys.WRITER_OUTPUT_DIR, new Path(testTempPath, "output").toString());
  state.setProp(ConfigurationKeys.WRITER_FILE_PATH, RandomStringUtils.randomAlphabetic(5));
  CopySource.serializeCopyEntity(state, cf);
  CopySource.serializeCopyableDataset(state, metadata);

  FileAwareInputStreamDataWriter dataWriter = new FileAwareInputStreamDataWriter(state, 1, 0);

  FileAwareInputStream fileAwareInputStream = FileAwareInputStream.builder().file(cf)
      .inputStream(StreamUtils.convertStream(IOUtils.toInputStream(streamString))).build();
  dataWriter.write(fileAwareInputStream);
  dataWriter.commit();
  Path writtenFilePath = new Path(new Path(state.getProp(ConfigurationKeys.WRITER_OUTPUT_DIR),
      cf.getDatasetAndPartition(metadata).identifier()), cf.getDestination());
  Assert.assertEquals(IOUtils.toString(new FileInputStream(writtenFilePath.toString())), streamString);
}
 
Example 17
Source Project: rya   Source File: MergeTool.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Imports the child files that hold the table data into the parent instance as a temporary table.
 * @param childTableName the name of the child table to import into a temporary parent table.
 * @throws Exception
 */
public void importChildFilesToTempParentTable(final String childTableName) throws Exception {
    // Create a temporary table in the parent instance to import the child files to.  Then run the merge process on the parent table and temp child table.
    final String tempChildTable = childTableName + TEMP_SUFFIX;

    createTempTableIfNeeded(tempChildTable);

    final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
    parentAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
    final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
    final TableOperations parentTableOperations = parentConnector.tableOperations();

    final Path localWorkDir = CopyTool.getPath(localMergeFileImportDir, childTableName);
    final Path hdfsBaseWorkDir = CopyTool.getPath(baseImportDir, childTableName);

    CopyTool.copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir, conf);

    final Path files = CopyTool.getPath(hdfsBaseWorkDir.toString(), "files");
    final Path failures = CopyTool.getPath(hdfsBaseWorkDir.toString(), "failures");
    final FileSystem fs = FileSystem.get(conf);
    // With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
    fs.setPermission(hdfsBaseWorkDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    if (fs.exists(failures)) {
        fs.delete(failures, true);
    }
    fs.mkdirs(failures);

    parentTableOperations.importDirectory(tempChildTable, files.toString(), failures.toString(), false);

    AccumuloRyaUtils.printTablePretty(tempChildTable, conf);
}
 
Example 18
Source Project: big-c   Source File: TestAclWithSnapshot.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is granted to the given fs/user for the given
 * directory.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path directory to check
 * @throws Exception if there is an unexpected error
 */
private static void assertDirPermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    fs.listStatus(pathToCheck);
    fs.access(pathToCheck, FsAction.READ);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
Example 19
Source Project: big-c   Source File: TestCopyMapper.java    License: Apache License 2.0 5 votes vote down vote up
private static void changeUserGroup(String user, String group)
        throws IOException {
  FileSystem fs = cluster.getFileSystem();
  FsPermission changedPermission = new FsPermission(
          FsAction.ALL, FsAction.ALL, FsAction.ALL
  );
  for (Path path : pathList)
    if (fs.isFile(path)) {
      fs.setOwner(path, user, group);
      fs.setPermission(path, changedPermission);
    }
}
 
Example 20
Source Project: big-c   Source File: TestAclCommands.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public FileStatus[] listStatus(Path f) throws IOException {
  FsPermission perm = new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE,
    FsAction.READ_EXECUTE);
  Path path = new Path("/foo");
  FileStatus stat = new FileStatus(1000, true, 3, 1000, 0, 0, perm, "owner",
    "group", path);
  return new FileStatus[] { stat };
}
 
Example 21
Source Project: big-c   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 5 votes vote down vote up
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
 
Example 22
Source Project: big-c   Source File: TestDistCpWithAcls.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create a new AclEntry with scope, type and permission (no name).
 *
 * @param scope AclEntryScope scope of the ACL entry
 * @param type AclEntryType ACL entry type
 * @param permission FsAction set of permissions in the ACL entry
 * @return AclEntry new AclEntry
 */
private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
    FsAction permission) {
  return new AclEntry.Builder()
    .setScope(scope)
    .setType(type)
    .setPermission(permission)
    .build();
}
 
Example 23
@BeforeClass
public static void setupTest() throws Exception {
  GCWatcher.init(0.60);
  JavaHome.checkJavaHome();
  LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
  File testDirectory = new File(TMPDIR, "blur-cluster-test").getAbsoluteFile();
  testDirectory.mkdirs();

  Path directory = new Path(testDirectory.getPath());
  FsPermission dirPermissions = localFS.getFileStatus(directory).getPermission();
  FsAction userAction = dirPermissions.getUserAction();
  FsAction groupAction = dirPermissions.getGroupAction();
  FsAction otherAction = dirPermissions.getOtherAction();

  StringBuilder builder = new StringBuilder();
  builder.append(userAction.ordinal());
  builder.append(groupAction.ordinal());
  builder.append(otherAction.ordinal());
  String dirPermissionNum = builder.toString();
  System.setProperty("dfs.datanode.data.dir.perm", dirPermissionNum);
  testDirectory.delete();
  miniCluster = new MiniCluster();
  miniCluster.startBlurCluster(new File(testDirectory, "cluster").getAbsolutePath(), 2, 3, true, false);

  TEST_ROOT_DIR = new Path(miniCluster.getFileSystemUri().toString() + "/blur_test");
  System.setProperty("hadoop.log.dir", "./target/BlurOutputFormatTest/hadoop_log");
  try {
    fileSystem = TEST_ROOT_DIR.getFileSystem(conf);
  } catch (IOException io) {
    throw new RuntimeException("problem getting local fs", io);
  }

  FileSystem.setDefaultUri(conf, miniCluster.getFileSystemUri());

  miniCluster.startMrMiniCluster();
  conf = miniCluster.getMRConfiguration();

  BufferStore.initNewBuffer(128, 128 * 128);
}
 
Example 24
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public void checkAccess(String src, FsAction mode) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("checkAccess", src);
  try {
    namenode.checkAccess(src, mode);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
        FileNotFoundException.class,
        UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example 25
Source Project: hadoop   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 5 votes vote down vote up
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
 
Example 26
Source Project: ranger   Source File: RangerHdfsAuthorizerTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks that the <b>file</b> access is <b>blocked</b> for the given user in the given groups.
 * Throws an exception, if not.
 */
public void checkAccessBlocked(FsAction access, String userName, String... groups)
        throws AccessControlException {
    try {
        checkAccess(access, userName, groups);
        Assert.fail("Access should be blocked for " + path + " access=" + access + " for user=" + userName
                + " groups=" + Arrays.asList(groups));
    } catch (AccessControlException ace) {
        Assert.assertNotNull(ace);
    }
}
 
Example 27
Source Project: big-c   Source File: ClientDistributedCacheManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks for a given path whether the Other permissions on it 
 * imply the permission in the passed FsAction
 * @param fs
 * @param path
 * @param action
 * @return true if the path in the uri is visible to all, false otherwise
 * @throws IOException
 */
private static boolean checkPermissionOfOther(FileSystem fs, Path path,
    FsAction action, Map<URI, FileStatus> statCache) throws IOException {
  FileStatus status = getFileStatus(fs, path.toUri(), statCache);
  FsPermission perms = status.getPermission();
  FsAction otherAction = perms.getOtherAction();
  if (otherAction.implies(action)) {
    return true;
  }
  return false;
}
 
Example 28
Source Project: hadoop   Source File: FSPermissionChecker.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void checkPermission(String fsOwner, String supergroup,
    UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
    INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
    int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
    FsAction parentAccess, FsAction access, FsAction subAccess,
    boolean ignoreEmptyDir)
    throws AccessControlException {
  for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
      ancestorIndex--);
  checkTraverse(inodeAttrs, path, ancestorIndex);

  final INodeAttributes last = inodeAttrs[inodeAttrs.length - 1];
  if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
      && inodeAttrs.length > 1 && last != null) {
    checkStickyBit(inodeAttrs[inodeAttrs.length - 2], last);
  }
  if (ancestorAccess != null && inodeAttrs.length > 1) {
    check(inodeAttrs, path, ancestorIndex, ancestorAccess);
  }
  if (parentAccess != null && inodeAttrs.length > 1) {
    check(inodeAttrs, path, inodeAttrs.length - 2, parentAccess);
  }
  if (access != null) {
    check(last, path, access);
  }
  if (subAccess != null) {
    INode rawLast = inodes[inodeAttrs.length - 1];
    checkSubAccess(pathByNameArr, inodeAttrs.length - 1, rawLast,
        snapshotId, subAccess, ignoreEmptyDir);
  }
  if (doCheckOwner) {
    checkOwner(last);
  }
}
 
Example 29
Source Project: dremio-oss   Source File: HadoopFileSystemWrapper.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void access(final Path path, final FsAction mode) throws AccessControlException, FileNotFoundException, IOException {
  try (WaitRecorder recorder = OperatorStats.getWaitRecorder(operatorStats)) {
    underlyingFs.access(path, mode);
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
 
Example 30
Source Project: big-c   Source File: ViewFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
    FileNotFoundException, IOException {
  InodeTree.ResolveResult<FileSystem> res =
    fsState.resolve(getUriPath(path), true);
  res.targetFileSystem.access(res.remainingPath, mode);
}