org.apache.hadoop.security.UserGroupInformation Java Examples
The following examples show how to use
org.apache.hadoop.security.UserGroupInformation.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CreateDir.java From Transwarp-Sample-Code with MIT License | 7 votes |
public static void main(String[] args) throws IOException { // 通过Java API创建HDFS目录 String rootPath = "hdfs://nameservice1"; Path p = new Path(rootPath + "/tmp/newDir3"); Configuration conf = new Configuration(); conf.addResource("core-site.xml"); conf.addResource("hdfs-site.xml"); conf.addResource("yarn-site.xml"); // 没开kerberos,注释下面两行 UserGroupInformation.setConfiguration(conf); UserGroupInformation.loginUserFromKeytab("hdfs@TDH","E:\\星环\\hdfs.keytab"); FileSystem fs = p.getFileSystem(conf); boolean b = fs.mkdirs(p); System.out.println(b); fs.close(); }
Example #2
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 6 votes |
@Override public HadoopInputSplit[] createInputSplits(int minNumSplits) throws IOException { configuration.setInt("mapreduce.input.fileinputformat.split.minsize", minNumSplits); JobContext jobContext = new JobContextImpl(configuration, new JobID()); jobContext.getCredentials().addAll(this.credentials); Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser()); if (currentUserCreds != null) { jobContext.getCredentials().addAll(currentUserCreds); } List<org.apache.hadoop.mapreduce.InputSplit> splits; try { splits = this.mapreduceInputFormat.getSplits(jobContext); } catch (InterruptedException e) { throw new IOException("Could not get Splits.", e); } HadoopInputSplit[] hadoopInputSplits = new HadoopInputSplit[splits.size()]; for (int i = 0; i < hadoopInputSplits.length; i++) { hadoopInputSplits[i] = new HadoopInputSplit(i, splits.get(i), jobContext); } return hadoopInputSplits; }
Example #3
Source File: SentryWebServer.java From incubator-sentry with Apache License 2.0 | 6 votes |
private static void validateConf(Configuration conf) { String authHandlerName = conf.get(ServerConfig.SENTRY_WEB_SECURITY_TYPE); Preconditions.checkNotNull(authHandlerName, "Web authHandler should not be null."); String allowUsers = conf.get(ServerConfig.SENTRY_WEB_SECURITY_ALLOW_CONNECT_USERS); Preconditions.checkNotNull(allowUsers, "Allow connect user(s) should not be null."); if (ServerConfig.SENTRY_WEB_SECURITY_TYPE_KERBEROS.equalsIgnoreCase(authHandlerName)) { String principal = conf.get(ServerConfig.SENTRY_WEB_SECURITY_PRINCIPAL); Preconditions.checkNotNull(principal, "Kerberos principal should not be null."); Preconditions.checkArgument(principal.length() != 0, "Kerberos principal is not right."); String keytabFile = conf.get(ServerConfig.SENTRY_WEB_SECURITY_KEYTAB); Preconditions.checkNotNull(keytabFile, "Keytab File should not be null."); Preconditions.checkArgument(keytabFile.length() != 0, "Keytab File is not right."); try { UserGroupInformation.setConfiguration(conf); String hostPrincipal = SecurityUtil.getServerPrincipal(principal, ServerConfig.RPC_ADDRESS_DEFAULT); UserGroupInformation.loginUserFromKeytab(hostPrincipal, keytabFile); } catch (IOException ex) { throw new IllegalArgumentException("Can't use Kerberos authentication, principal [" + principal + "] keytab [" + keytabFile + "]", ex); } LOGGER.info("Using Kerberos authentication, principal [" + principal + "] keytab [" + keytabFile + "]"); } }
Example #4
Source File: CachePool.java From big-c with Apache License 2.0 | 6 votes |
/** * Create a new cache pool based on a CachePoolInfo object and the defaults. * We will fill in information that was not supplied according to the * defaults. */ static CachePool createFromInfoAndDefaults(CachePoolInfo info) throws IOException { UserGroupInformation ugi = null; String ownerName = info.getOwnerName(); if (ownerName == null) { ugi = NameNode.getRemoteUser(); ownerName = ugi.getShortUserName(); } String groupName = info.getGroupName(); if (groupName == null) { if (ugi == null) { ugi = NameNode.getRemoteUser(); } groupName = ugi.getPrimaryGroupName(); } FsPermission mode = (info.getMode() == null) ? FsPermission.getCachePoolDefault() : info.getMode(); long limit = info.getLimit() == null ? CachePoolInfo.DEFAULT_LIMIT : info.getLimit(); long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ? CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY : info.getMaxRelativeExpiryMs(); return new CachePool(info.getPoolName(), ownerName, groupName, mode, limit, maxRelativeExpiry); }
Example #5
Source File: HttpFSFileSystem.java From big-c with Apache License 2.0 | 6 votes |
public long renewDelegationToken(final Token<?> token) throws IOException { try { return UserGroupInformation.getCurrentUser().doAs( new PrivilegedExceptionAction<Long>() { @Override public Long run() throws Exception { return authURL.renewDelegationToken(uri.toURL(), authToken); } } ); } catch (Exception ex) { if (ex instanceof IOException) { throw (IOException) ex; } else { throw new IOException(ex); } } }
Example #6
Source File: RangerSystemAccessControl.java From ranger with Apache License 2.0 | 6 votes |
/** HELPER FUNCTIONS **/ private RangerPrestoAccessRequest createAccessRequest(RangerPrestoResource resource, SystemSecurityContext context, PrestoAccessType accessType) { Set<String> userGroups = null; if (useUgi) { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(context.getIdentity().getUser()); String[] groups = ugi != null ? ugi.getGroupNames() : null; if (groups != null && groups.length > 0) { userGroups = new HashSet<>(Arrays.asList(groups)); } } else { userGroups = context.getIdentity().getGroups(); } RangerPrestoAccessRequest request = new RangerPrestoAccessRequest( resource, context.getIdentity().getUser(), userGroups, accessType ); return request; }
Example #7
Source File: CachePool.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a new cache pool based on a CachePoolInfo object and the defaults. * We will fill in information that was not supplied according to the * defaults. */ static CachePool createFromInfoAndDefaults(CachePoolInfo info) throws IOException { UserGroupInformation ugi = null; String ownerName = info.getOwnerName(); if (ownerName == null) { ugi = NameNode.getRemoteUser(); ownerName = ugi.getShortUserName(); } String groupName = info.getGroupName(); if (groupName == null) { if (ugi == null) { ugi = NameNode.getRemoteUser(); } groupName = ugi.getPrimaryGroupName(); } FsPermission mode = (info.getMode() == null) ? FsPermission.getCachePoolDefault() : info.getMode(); long limit = info.getLimit() == null ? CachePoolInfo.DEFAULT_LIMIT : info.getLimit(); long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ? CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY : info.getMaxRelativeExpiryMs(); return new CachePool(info.getPoolName(), ownerName, groupName, mode, limit, maxRelativeExpiry); }
Example #8
Source File: ContainerManagementProtocolProxy.java From hadoop with Apache License 2.0 | 6 votes |
@Private @VisibleForTesting protected ContainerManagementProtocol newProxy(final YarnRPC rpc, String containerManagerBindAddr, ContainerId containerId, Token token) throws InvalidToken { if (token == null) { throw new InvalidToken("No NMToken sent for " + containerManagerBindAddr); } final InetSocketAddress cmAddr = NetUtils.createSocketAddr(containerManagerBindAddr); LOG.info("Opening proxy : " + containerManagerBindAddr); // the user in createRemoteUser in this context has to be ContainerID UserGroupInformation user = UserGroupInformation.createRemoteUser(containerId .getApplicationAttemptId().toString()); org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken = ConverterUtils.convertFromYarn(token, cmAddr); user.addToken(nmToken); return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, user, rpc, cmAddr); }
Example #9
Source File: RMWebServices.java From big-c with Apache License 2.0 | 6 votes |
@POST @Path("/delegation-token") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public Response postDelegationToken(DelegationToken tokenData, @Context HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException, Exception { init(); UserGroupInformation callerUGI; try { callerUGI = createKerberosUserGroupInformation(hsr); } catch (YarnException ye) { return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build(); } return createDelegationToken(tokenData, hsr, callerUGI); }
Example #10
Source File: NativeAzureFileSystemBaseTest.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testSetPermissionOnFile() throws Exception { Path newFile = new Path("testPermission"); OutputStream output = fs.create(newFile); output.write(13); output.close(); FsPermission newPermission = new FsPermission((short) 0700); fs.setPermission(newFile, newPermission); FileStatus newStatus = fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals(newPermission, newStatus.getPermission()); assertEquals("supergroup", newStatus.getGroup()); assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(), newStatus.getOwner()); // Don't check the file length for page blobs. Only block blobs // provide the actual length of bytes written. if (!(this instanceof TestNativeAzureFSPageBlobLive)) { assertEquals(1, newStatus.getLen()); } }
Example #11
Source File: OFSPath.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Return trash root for the given path. * @return trash root for the given path. */ public Path getTrashRoot() { if (!this.isKey()) { throw new RuntimeException("Volume or bucket doesn't have trash root."); } try { String username = UserGroupInformation.getCurrentUser().getUserName(); final Path pathRoot = new Path( OZONE_OFS_URI_SCHEME, authority, OZONE_URI_DELIMITER); final Path pathToVolume = new Path(pathRoot, volumeName); final Path pathToBucket = new Path(pathToVolume, bucketName); final Path pathToTrash = new Path(pathToBucket, TRASH_PREFIX); return new Path(pathToTrash, username); } catch (IOException ex) { throw new RuntimeException("getTrashRoot failed.", ex); } }
Example #12
Source File: ImpersonationUtil.java From Bats with Apache License 2.0 | 6 votes |
/** Helper method to create DrillFileSystem */ private static DrillFileSystem createFileSystem(UserGroupInformation proxyUserUgi, final Configuration fsConf, final OperatorStats stats) { DrillFileSystem fs; try { fs = proxyUserUgi.doAs((PrivilegedExceptionAction<DrillFileSystem>) () -> { logger.trace("Creating DrillFileSystem for proxy user: " + UserGroupInformation.getCurrentUser()); return new DrillFileSystem(fsConf, stats); }); } catch (InterruptedException | IOException e) { final String errMsg = "Failed to create DrillFileSystem for proxy user: " + e.getMessage(); logger.error(errMsg, e); throw new DrillRuntimeException(errMsg, e); } return fs; }
Example #13
Source File: TestInfoServersACL.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testAuthorizedUser() throws Exception { UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI( USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath()); admin.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // Check the expected content is present in the http response String expectedContent = "Get Log Level"; Pair<Integer,String> pair = getLogLevelPage(); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedContent)); return null; } }); }
Example #14
Source File: AbstractHadoopProcessor.java From nifi with Apache License 2.0 | 6 votes |
protected UserGroupInformation getUserGroupInformation() { getLogger().trace("getting UGI instance"); if (hdfsResources.get().getKerberosUser() != null) { // if there's a KerberosUser associated with this UGI, check the TGT and relogin if it is close to expiring KerberosUser kerberosUser = hdfsResources.get().getKerberosUser(); getLogger().debug("kerberosUser is " + kerberosUser); try { getLogger().debug("checking TGT on kerberosUser " + kerberosUser); kerberosUser.checkTGTAndRelogin(); } catch (LoginException e) { throw new ProcessException("Unable to relogin with kerberos credentials for " + kerberosUser.getPrincipal(), e); } } else { getLogger().debug("kerberosUser was null, will not refresh TGT with KerberosUser"); } return hdfsResources.get().getUserGroupInformation(); }
Example #15
Source File: TestAppController.java From big-c with Apache License 2.0 | 6 votes |
/** * Test method 'taskCounters'. Should print message about error or set CountersPage class for rendering */ @Test public void testGetTaskCounters() { when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class))) .thenReturn(false); appController.taskCounters(); verify(appController.response()).setContentType(MimeType.TEXT); assertEquals( "Access denied: User user does not have permission to view job job_01_01", appController.getData()); when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class))) .thenReturn(true); appController.getProperty().remove(AMParams.TASK_ID); appController.taskCounters(); assertEquals( "Access denied: User user does not have permission to view job job_01_01missing task ID", appController.getData()); appController.getProperty().put(AMParams.TASK_ID, "task_01_01_m01_01"); appController.taskCounters(); assertEquals(CountersPage.class, appController.getClazz()); }
Example #16
Source File: HBaseRangerAuthorizationTest.java From ranger with Apache License 2.0 | 6 votes |
@Test public void testReadTablesAsGroupIT() throws Exception { final Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "localhost"); conf.set("hbase.zookeeper.property.clientPort", "" + port); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); String user = "IT"; UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"}); ugi.doAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin(); HTableDescriptor[] tableDescriptors = admin.listTables(); for (HTableDescriptor desc : tableDescriptors) { LOG.info("Found table:[" + desc.getTableName().getNameAsString() + "]"); } Assert.assertEquals(0, tableDescriptors.length); conn.close(); return null; } }); }
Example #17
Source File: StreamingAppMasterService.java From attic-apex-core with Apache License 2.0 | 6 votes |
@Override protected void serviceStop() throws Exception { super.serviceStop(); if (UserGroupInformation.isSecurityEnabled()) { delegationTokenManager.stopThreads(); } if (nmClient != null) { nmClient.stop(); } if (amRmClient != null) { amRmClient.stop(); } if (dnmgr != null) { dnmgr.teardown(); } }
Example #18
Source File: ApplicationHistoryManagerOnTimelineStore.java From hadoop with Apache License 2.0 | 6 votes |
@Override public Map<ApplicationId, ApplicationReport> getAllApplications() throws YarnException, IOException { TimelineEntities entities = timelineDataManager.getEntities( ApplicationMetricsConstants.ENTITY_TYPE, null, null, null, null, null, null, Long.MAX_VALUE, EnumSet.allOf(Field.class), UserGroupInformation.getLoginUser()); Map<ApplicationId, ApplicationReport> apps = new LinkedHashMap<ApplicationId, ApplicationReport>(); if (entities != null && entities.getEntities() != null) { for (TimelineEntity entity : entities.getEntities()) { try { ApplicationReportExt app = generateApplicationReport(entity, ApplicationReportField.ALL); apps.put(app.appReport.getApplicationId(), app.appReport); } catch (Exception e) { LOG.error("Error on generating application report for " + entity.getEntityId(), e); } } } return apps; }
Example #19
Source File: JobQueueClient.java From RDFS with Apache License 2.0 | 6 votes |
private void displayQueueAclsInfoForCurrentUser() throws IOException { QueueAclsInfo[] queueAclsInfoList = jc.getQueueAclsForCurrentUser(); UserGroupInformation ugi = UserGroupInformation.readFrom(getConf()); if (queueAclsInfoList.length > 0) { System.out.println("Queue acls for user : " + ugi.getUserName()); System.out.println("\nQueue Operations"); System.out.println("====================="); for (QueueAclsInfo queueInfo : queueAclsInfoList) { System.out.print(queueInfo.getQueueName() + " "); String[] ops = queueInfo.getOperations(); int max = ops.length - 1; for (int j = 0; j < ops.length; j++) { System.out.print(ops[j].replaceFirst("acl-", "")); if (j < max) { System.out.print(","); } } System.out.println(); } } else { System.out.println("User " + ugi.getUserName() + " does not have access to any queue. \n"); } }
Example #20
Source File: FileChecksumServlets.java From big-c with Apache License 2.0 | 6 votes |
@Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final ServletContext context = getServletContext(); final Configuration conf = NameNodeHttpServer.getConfFromContext(context); final UserGroupInformation ugi = getUGI(request, conf); final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext( context); final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode); try { response.sendRedirect( createRedirectURL(ugi, datanode, request, namenode).toString()); } catch (IOException e) { response.sendError(400, e.getMessage()); } }
Example #21
Source File: LaunchContainerRunnable.java From attic-apex-core with Apache License 2.0 | 6 votes |
public static ByteBuffer getTokens(UserGroupInformation ugi, Token<StramDelegationTokenIdentifier> delegationToken) { try { Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens(); Credentials credentials = new Credentials(); for (Token<? extends TokenIdentifier> token : tokens) { if (!token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { credentials.addToken(token.getService(), token); LOG.debug("Passing container token {}", token); } } credentials.addToken(delegationToken.getService(), delegationToken); DataOutputBuffer dataOutput = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dataOutput); byte[] tokenBytes = dataOutput.getData(); ByteBuffer cTokenBuf = ByteBuffer.wrap(tokenBytes); return cTokenBuf.duplicate(); } catch (IOException e) { throw new RuntimeException("Error generating delegation token", e); } }
Example #22
Source File: AdminService.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void serviceInit(Configuration conf) throws Exception { if (rmContext.isHAEnabled()) { autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf); if (autoFailoverEnabled) { if (HAUtil.isAutomaticFailoverEmbedded(conf)) { embeddedElector = createEmbeddedElectorService(); addIfService(embeddedElector); } } } masterServiceBindAddress = conf.getSocketAddr( YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_PORT); daemonUser = UserGroupInformation.getCurrentUser(); authorizer = YarnAuthorizationProvider.getInstance(conf); authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation .getCurrentUser()); rmId = conf.get(YarnConfiguration.RM_HA_ID); super.serviceInit(conf); }
Example #23
Source File: HiveWriter.java From nifi with Apache License 2.0 | 6 votes |
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf hiveConf) throws StreamingException, IOException, InterruptedException { if (ugi == null) { return new StrictJsonWriter(endPoint, hiveConf); } else { try { return ugi.doAs((PrivilegedExceptionAction<StrictJsonWriter>) () -> new StrictJsonWriter(endPoint, hiveConf)); } catch (UndeclaredThrowableException e) { Throwable cause = e.getCause(); if (cause instanceof StreamingException) { throw (StreamingException) cause; } else { throw e; } } } }
Example #24
Source File: NamenodeWebHdfsMethods.java From hadoop with Apache License 2.0 | 5 votes |
/** Handle HTTP POST request for the root. */ @POST @Path("/") @Consumes({"*/*"}) @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response postRoot( @Context final UserGroupInformation ugi, @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) final DelegationParam delegation, @QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT) final UserParam username, @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) final DoAsParam doAsUser, @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT) final PostOpParam op, @QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT) final ConcatSourcesParam concatSrcs, @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize, @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) final ExcludeDatanodesParam excludeDatanodes, @QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT) final NewLengthParam newLength ) throws IOException, InterruptedException { return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs, bufferSize, excludeDatanodes, newLength); }
Example #25
Source File: RegistrySecurity.java From hadoop with Apache License 2.0 | 5 votes |
/** * Log details about the current Hadoop user at INFO. * Robust against IOEs when trying to get the current user */ public void logCurrentHadoopUser() { try { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); LOG.info("Current user = {}",currentUser); UserGroupInformation realUser = currentUser.getRealUser(); LOG.info("Real User = {}" , realUser); } catch (IOException e) { LOG.warn("Failed to get current user {}, {}", e); } }
Example #26
Source File: HadoopUsersConfTestHelper.java From big-c with Apache License 2.0 | 5 votes |
public static void addUserConf(Configuration conf) { conf.set("hadoop.security.authentication", "simple"); conf.set("hadoop.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper.getHadoopProxyUserHosts()); conf.set("hadoop.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper.getHadoopProxyUserGroups()); for (String user : HadoopUsersConfTestHelper.getHadoopUsers()) { String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user); UserGroupInformation.createUserForTesting(user, groups); } }
Example #27
Source File: TestJobAclsManager.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testClusterNoAdmins() { Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>(); Configuration conf = new Configuration(); String jobOwner = "testuser"; conf.set(JobACL.VIEW_JOB.getAclName(), ""); conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); String noAdminUser = "testuser2"; JobACLsManager aclsManager = new JobACLsManager(conf); tmpJobACLs = aclsManager.constructJobACLs(conf); final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs; UserGroupInformation callerUGI = UserGroupInformation.createUserForTesting( noAdminUser, new String[] {}); // random user should not have access boolean val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner, jobACLs.get(JobACL.VIEW_JOB)); assertFalse("random user should not have view access", val); val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner, jobACLs.get(JobACL.MODIFY_JOB)); assertFalse("random user should not have modify access", val); callerUGI = UserGroupInformation.createUserForTesting(jobOwner, new String[] {}); // Owner should have access val = aclsManager.checkAccess(callerUGI, JobACL.VIEW_JOB, jobOwner, jobACLs.get(JobACL.VIEW_JOB)); assertTrue("owner should have view access", val); val = aclsManager.checkAccess(callerUGI, JobACL.MODIFY_JOB, jobOwner, jobACLs.get(JobACL.MODIFY_JOB)); assertTrue("owner should have modify access", val); }
Example #28
Source File: BlurHiveOutputFormat.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter getMrWorkingPathWriter( final Configuration configuration) throws IOException { PrivilegedExceptionAction<org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter> privilegedExceptionAction = new PrivilegedExceptionAction<org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter>() { @Override public org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter run() throws Exception { String workingPathStr = configuration.get(BlurConstants.BLUR_BULK_UPDATE_WORKING_PATH); Path workingPath = new Path(workingPathStr); Path tmpDir = new Path(workingPath, "tmp"); FileSystem fileSystem = tmpDir.getFileSystem(configuration); String loadId = configuration.get(BlurSerDe.BLUR_MR_LOAD_ID); Path loadPath = new Path(tmpDir, loadId); final Writer writer = new SequenceFile.Writer(fileSystem, configuration, new Path(loadPath, UUID.randomUUID() .toString()), Text.class, BlurRecord.class); return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() { @Override public void write(Writable w) throws IOException { BlurRecord blurRecord = (BlurRecord) w; String rowId = blurRecord.getRowId(); writer.append(new Text(rowId), blurRecord); } @Override public void close(boolean abort) throws IOException { writer.close(); } }; } }; UserGroupInformation userGroupInformation = getUGI(configuration); try { return userGroupInformation.doAs(privilegedExceptionAction); } catch (InterruptedException e) { throw new IOException(e); } }
Example #29
Source File: TestSuperUserQuotaPermissions.java From hbase with Apache License 2.0 | 5 votes |
private <T> T doAsUser(UserGroupInformation ugi, Callable<T> task) throws Exception { return ugi.doAs(new PrivilegedExceptionAction<T>() { @Override public T run() throws Exception { return task.call(); } }); }
Example #30
Source File: MiniRPCBenchmark.java From hadoop with Apache License 2.0 | 5 votes |
void connectToServerAndGetDelegationToken( final Configuration conf, final InetSocketAddress addr) throws IOException { MiniProtocol client = null; try { UserGroupInformation current = UserGroupInformation.getCurrentUser(); UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting( MINI_USER, current, GROUP_NAMES); try { client = proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() { @Override public MiniProtocol run() throws IOException { MiniProtocol p = RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf); Token<TestDelegationTokenIdentifier> token; token = p.getDelegationToken(new Text(RENEWER)); currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, GROUP_NAMES); SecurityUtil.setTokenService(token, addr); currentUgi.addToken(token); return p; } }); } catch (InterruptedException e) { Assert.fail(Arrays.toString(e.getStackTrace())); } } finally { RPC.stopProxy(client); } }