org.apache.hadoop.security.AccessControlException Java Examples
The following examples show how to use
org.apache.hadoop.security.AccessControlException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ViewFsBaseTest.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testGetFileChecksum() throws AccessControlException , UnresolvedLinkException, IOException { AbstractFileSystem mockAFS = Mockito.mock(AbstractFileSystem.class); InodeTree.ResolveResult<AbstractFileSystem> res = new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS , null, new Path("someFile")); @SuppressWarnings("unchecked") InodeTree<AbstractFileSystem> fsState = Mockito.mock(InodeTree.class); Mockito.when(fsState.resolve(Mockito.anyString() , Mockito.anyBoolean())).thenReturn(res); ViewFs vfs = Mockito.mock(ViewFs.class); vfs.fsState = fsState; Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile"))) .thenCallRealMethod(); vfs.getFileChecksum(new Path("/tmp/someFile")); Mockito.verify(mockAFS).getFileChecksum(new Path("someFile")); }
Example #2
Source File: DataNode.java From hadoop with Apache License 2.0 | 6 votes |
/** Check whether the current user is in the superuser group. */ private void checkSuperuserPrivilege() throws IOException, AccessControlException { if (!isPermissionEnabled) { return; } // Try to get the ugi in the RPC call. UserGroupInformation callerUgi = ipcServer.getRemoteUser(); if (callerUgi == null) { // This is not from RPC. callerUgi = UserGroupInformation.getCurrentUser(); } // Is this by the DN user itself? assert dnUserName != null; if (callerUgi.getShortUserName().equals(dnUserName)) { return; } // Is the user a member of the super group? List<String> groups = Arrays.asList(callerUgi.getGroupNames()); if (groups.contains(supergroup)) { return; } // Not a superuser. throw new AccessControlException(); }
Example #3
Source File: XAttrPermissionFilter.java From hadoop with Apache License 2.0 | 6 votes |
static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, boolean isRawPath) throws AccessControlException { final boolean isSuperUser = pc.isSuperUser(); if (xAttr.getNameSpace() == XAttr.NameSpace.USER || (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) { return; } if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath && isSuperUser) { return; } if (XAttrHelper.getPrefixName(xAttr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { if (xAttr.getValue() != null) { throw new AccessControlException("Attempt to set a value for '" + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "'. Values are not allowed for this xattr."); } return; } throw new AccessControlException("User doesn't have permission for xattr: " + XAttrHelper.getPrefixName(xAttr)); }
Example #4
Source File: TestFailoverController.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFailoverWithoutPermission() throws Exception { DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr); Mockito.doThrow(new AccessControlException("Access denied")) .when(svc1.proxy).getServiceStatus(); DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr); Mockito.doThrow(new AccessControlException("Access denied")) .when(svc2.proxy).getServiceStatus(); svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1, svc2, false, false); fail("Can't failover when access is denied"); } catch (FailoverFailedException ffe) { assertTrue(ffe.getCause().getMessage().contains("Access denied")); } }
Example #5
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Set replication for an existing file. * @param src file name * @param replication replication to set the file to * * @see ClientProtocol#setReplication(String, short) */ public boolean setReplication(String src, short replication) throws IOException { TraceScope scope = getPathTraceScope("setReplication", src); try { return namenode.setReplication(src, replication); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, SafeModeException.class, DSQuotaExceededException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } finally { scope.close(); } }
Example #6
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
/** * Rename file or directory. * @see ClientProtocol#rename(String, String) * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead. */ @Deprecated public boolean rename(String src, String dst) throws IOException { checkOpen(); TraceScope scope = getSrcDstTraceScope("rename", src, dst); try { return namenode.rename(src, dst); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } finally { scope.close(); } }
Example #7
Source File: PermissionChecker.java From RDFS with Apache License 2.0 | 6 votes |
private void check(INode inode, FsAction access ) throws AccessControlException { if (inode == null) { return; } FsPermission mode = inode.getFsPermission(); if (user.equals(inode.getUserName())) { //user class if (mode.getUserAction().implies(access)) { return; } } else if (groups.contains(inode.getGroupName())) { //group class if (mode.getGroupAction().implies(access)) { return; } } else { //other class if (mode.getOtherAction().implies(access)) { return; } } throw new AccessControlException("Permission denied: user=" + user + ", access=" + access + ", inode=" + inode); }
Example #8
Source File: ClientNamenodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public LocatedBlock addBlock(String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws AccessControlException, FileNotFoundException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException, IOException { AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder() .setSrc(src).setClientName(clientName).setFileId(fileId); if (previous != null) req.setPrevious(PBHelper.convert(previous)); if (excludeNodes != null) req.addAllExcludeNodes(PBHelper.convert(excludeNodes)); if (favoredNodes != null) { req.addAllFavoredNodes(Arrays.asList(favoredNodes)); } try { return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #9
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
public void removeXAttr(String src, String name) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("removeXAttr", src); try { namenode.removeXAttr(src, XAttrHelper.buildXAttr(name)); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, NSQuotaExceededException.class, SafeModeException.class, SnapshotAccessControlException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example #10
Source File: ViewFs.java From big-c with Apache License 2.0 | 6 votes |
@Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); FileStatus[] statusLst = res.targetFileSystem.listStatus(res.remainingPath); if (!res.isInternalDir()) { // We need to change the name in the FileStatus as described in // {@link #getFileStatus } ChRootedFs targetFs; targetFs = (ChRootedFs) res.targetFileSystem; int i = 0; for (FileStatus status : statusLst) { String suffix = targetFs.stripOutRoot(status.getPath()); statusLst[i++] = new ViewFsFileStatus(status, this.makeQualified( suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix))); } } return statusLst; }
Example #11
Source File: ClientNamenodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void rename2(String src, String dst, Rename... options) throws AccessControlException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, IOException { boolean overwrite = false; if (options != null) { for (Rename option : options) { if (option == Rename.OVERWRITE) { overwrite = true; } } } Rename2RequestProto req = Rename2RequestProto.newBuilder(). setSrc(src). setDst(dst).setOverwriteDest(overwrite). build(); try { rpcProxy.rename2(null, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #12
Source File: FSDirAttrOp.java From hadoop with Apache License 2.0 | 6 votes |
static HdfsFileStatus setOwner( FSDirectory fsd, String src, String username, String group) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { src = fsd.resolvePath(pc, src, pathComponents); iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); if (!pc.isSuperUser()) { if (username != null && !pc.getUser().equals(username)) { throw new AccessControlException("Non-super user cannot change owner"); } if (group != null && !pc.containsGroup(group)) { throw new AccessControlException("User does not belong to " + group); } } unprotectedSetOwner(fsd, src, username, group); } finally { fsd.writeUnlock(); } fsd.getEditLog().logSetOwner(src, username, group); return fsd.getAuditFileInfo(iip); }
Example #13
Source File: FSPermissionChecker.java From hadoop with Apache License 2.0 | 6 votes |
/** * Whether a cache pool can be accessed by the current context * * @param pool CachePool being accessed * @param access type of action being performed on the cache pool * @throws AccessControlException if pool cannot be accessed */ public void checkPermission(CachePool pool, FsAction access) throws AccessControlException { FsPermission mode = pool.getMode(); if (isSuperUser()) { return; } if (getUser().equals(pool.getOwnerName()) && mode.getUserAction().implies(access)) { return; } if (getGroups().contains(pool.getGroupName()) && mode.getGroupAction().implies(access)) { return; } if (mode.getOtherAction().implies(access)) { return; } throw new AccessControlException("Permission denied while accessing pool " + pool.getPoolName() + ": user " + getUser() + " does not have " + access.toString() + " permissions."); }
Example #14
Source File: CrailHDFS.java From crail with Apache License 2.0 | 5 votes |
@Override public void renameInternal(Path src, Path dst) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnresolvedLinkException, IOException { try { CrailNode file = dfs.rename(src.toUri().getRawPath(), dst.toUri().getRawPath()).get(); if (file != null){ file.syncDir(); } } catch(Exception e){ throw new IOException(e); } }
Example #15
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test listing encryption zones as a non super user. */ @Test(timeout = 60000) public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] { "mygroup" }); final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path superPath = new Path(testRoot, "superuseronly"); final Path allPath = new Path(testRoot, "accessall"); fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true); dfsAdmin.createEncryptionZone(superPath, TEST_KEY); fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true); dfsAdmin.createEncryptionZone(allPath, TEST_KEY); user.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { final HdfsAdmin userAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { userAdmin.listEncryptionZones(); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); } return null; } }); }
Example #16
Source File: ViewFsBaseTest.java From big-c with Apache License 2.0 | 5 votes |
@Test(expected=AccessControlException.class) public void testInternalRename2() throws IOException { Assert.assertTrue("linkTODir2 should be a dir", fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory()); fcView.rename(new Path("/internalDir/linkToDir2"), new Path("/internalDir/dir1")); }
Example #17
Source File: NameNode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Check that a request to change this node's HA state is valid. * In particular, verifies that, if auto failover is enabled, non-forced * requests from the HAAdmin CLI are rejected, and vice versa. * * @param req the request to check * @throws AccessControlException if the request is disallowed */ void checkHaStateChange(StateChangeRequestInfo req) throws AccessControlException { boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY, DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT); switch (req.getSource()) { case REQUEST_BY_USER: if (autoHaEnabled) { throw new AccessControlException( "Manual HA control for this NameNode is disallowed, because " + "automatic HA is enabled."); } break; case REQUEST_BY_USER_FORCED: if (autoHaEnabled) { LOG.warn("Allowing manual HA control from " + Server.getRemoteAddress() + " even though automatic HA is enabled, because the user " + "specified the force flag"); } break; case REQUEST_BY_ZKFC: if (!autoHaEnabled) { throw new AccessControlException( "Request from ZK failover controller at " + Server.getRemoteAddress() + " denied since automatic HA " + "is not enabled"); } break; } }
Example #18
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
public void removeBlocks(MiniDFSCluster cluster) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { for (int corruptIdx : blocksToCorrupt) { // Corrupt a block by deleting it ExtendedBlock block = dfsClient.getNamenode().getBlockLocations( name, blockSize * corruptIdx, Long.MAX_VALUE).get(0).getBlock(); for (int i = 0; i < numDataNodes; i++) { File blockFile = cluster.getBlockFile(i, block); if(blockFile != null && blockFile.exists()) { assertTrue(blockFile.delete()); } } } }
Example #19
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Save namespace image. * See {@link ClientProtocol#saveNamespace()} * for more details. * * @see ClientProtocol#saveNamespace() */ void saveNamespace() throws AccessControlException, IOException { try { namenode.saveNamespace(); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class); } }
Example #20
Source File: FSAclBaseTest.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception { Path bruceDir = new Path(path, "bruce"); Path bruceFile = new Path(bruceDir, "file"); fs.mkdirs(bruceDir); fs.setOwner(bruceDir, "bruce", null); fsAsBruce.create(bruceFile).close(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "diana", ALL)); fsAsBruce.modifyAclEntries(bruceFile, aclSpec); fs.modifyAclEntries(bruceFile, aclSpec); fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec); exception.expect(AccessControlException.class); fsAsDiana.modifyAclEntries(bruceFile, aclSpec); }
Example #21
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
public Map<String, byte[]> getXAttrs(String src, List<String> names) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("getXAttrs", src); try { return XAttrHelper.buildXAttrMap(namenode.getXAttrs( src, XAttrHelper.buildXAttrs(names))); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example #22
Source File: ViewFs.java From big-c with Apache License 2.0 | 5 votes |
@Override public RemoteIterator<FileStatus> listStatusIterator(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); final RemoteIterator<FileStatus> fsIter = res.targetFileSystem.listStatusIterator(res.remainingPath); if (res.isInternalDir()) { return fsIter; } return new RemoteIterator<FileStatus>() { final RemoteIterator<FileStatus> myIter; final ChRootedFs targetFs; { // Init myIter = fsIter; targetFs = (ChRootedFs) res.targetFileSystem; } @Override public boolean hasNext() throws IOException { return myIter.hasNext(); } @Override public FileStatus next() throws IOException { FileStatus status = myIter.next(); String suffix = targetFs.stripOutRoot(status.getPath()); return new ViewFsFileStatus(status, makeQualified( suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix))); } }; }
Example #23
Source File: AclTestHelpers.java From big-c with Apache License 2.0 | 5 votes |
/** * Asserts that permission is granted to the given fs/user for the given file. * * @param fs FileSystem to check * @param user UserGroupInformation owner of fs * @param pathToCheck Path file to check * @throws Exception if there is an unexpected error */ public static void assertFilePermissionGranted(FileSystem fs, UserGroupInformation user, Path pathToCheck) throws Exception { try { DFSTestUtil.readFileBuffer(fs, pathToCheck); } catch (AccessControlException e) { fail("expected permission granted for user " + user + ", path = " + pathToCheck); } }
Example #24
Source File: FSNamesystem.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { PermissionChecker pc = new PermissionChecker( fsOwner.getUserName(), supergroup); if (!pc.isSuper) { throw new AccessControlException("Superuser privilege is required"); } } }
Example #25
Source File: AclTestHelpers.java From big-c with Apache License 2.0 | 5 votes |
/** * Asserts that permission is denied to the given fs/user for the given file. * * @param fs FileSystem to check * @param user UserGroupInformation owner of fs * @param pathToCheck Path file to check * @throws Exception if there is an unexpected error */ public static void assertFilePermissionDenied(FileSystem fs, UserGroupInformation user, Path pathToCheck) throws Exception { try { DFSTestUtil.readFileBuffer(fs, pathToCheck); fail("expected AccessControlException for user " + user + ", path = " + pathToCheck); } catch (AccessControlException e) { // expected } }
Example #26
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Get a partial listing of the indicated directory * * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter * if the application wants to fetch a listing starting from * the first entry in the directory * * @see ClientProtocol#getListing(String, byte[], boolean) */ public DirectoryListing listPaths(String src, byte[] startAfter, boolean needLocation) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("listPaths", src); try { return namenode.getListing(src, startAfter, needLocation); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example #27
Source File: TestDelegationToken.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testDelegationTokenSecretManager() throws Exception { Token<DelegationTokenIdentifier> token = generateDelegationToken( "SomeUser", "JobTracker"); // Fake renewer should not be able to renew try { dtSecretManager.renewToken(token, "FakeRenewer"); Assert.fail("should have failed"); } catch (AccessControlException ace) { // PASS } dtSecretManager.renewToken(token, "JobTracker"); DelegationTokenIdentifier identifier = new DelegationTokenIdentifier(); byte[] tokenId = token.getIdentifier(); identifier.readFields(new DataInputStream( new ByteArrayInputStream(tokenId))); Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier)); LOG.info("Sleep to expire the token"); Thread.sleep(6000); //Token should be expired try { dtSecretManager.retrievePassword(identifier); //Should not come here Assert.fail("Token should have expired"); } catch (InvalidToken e) { //Success } dtSecretManager.renewToken(token, "JobTracker"); LOG.info("Sleep beyond the max lifetime"); Thread.sleep(5000); try { dtSecretManager.renewToken(token, "JobTracker"); Assert.fail("should have been expired"); } catch (InvalidToken it) { // PASS } }
Example #28
Source File: ZKFCProtocolClientSideTranslatorPB.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void gracefulFailover() throws IOException, AccessControlException { try { rpcProxy.gracefulFailover(NULL_CONTROLLER, GracefulFailoverRequestProto.getDefaultInstance()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #29
Source File: ViewFs.java From hadoop with Apache License 2.0 | 5 votes |
@Override public Path resolvePath(final Path f) throws FileNotFoundException, AccessControlException, UnresolvedLinkException, IOException { final InodeTree.ResolveResult<AbstractFileSystem> res; res = fsState.resolve(getUriPath(f), true); if (res.isInternalDir()) { return f; } return res.targetFileSystem.resolvePath(res.remainingPath); }
Example #30
Source File: ViewFs.java From hadoop with Apache License 2.0 | 5 votes |
@Override public FileStatus getFileLinkStatus(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), false); // do not follow mount link return res.targetFileSystem.getFileLinkStatus(res.remainingPath); }