Java Code Examples for org.apache.hadoop.security.UserGroupInformation.getCurrentUser()

The following are Jave code examples for showing how to use getCurrentUser() of the org.apache.hadoop.security.UserGroupInformation class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestLeafQueue.java   View Source Code Vote up 6 votes
@Test
public void testInheritedQueueAcls() throws IOException {
  UserGroupInformation user = UserGroupInformation.getCurrentUser();

  LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
  LeafQueue b = stubLeafQueue((LeafQueue)queues.get(B));
  ParentQueue c = (ParentQueue)queues.get(C);
  LeafQueue c1 = stubLeafQueue((LeafQueue)queues.get(C1));

  assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
  assertTrue(a.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
  assertTrue(b.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
  assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
  assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));

  assertTrue(hasQueueACL(
        a.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
  assertTrue(hasQueueACL(
        b.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
  assertFalse(hasQueueACL(
        c.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
  assertFalse(hasQueueACL(
        c1.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));

}
 
Example 2
Project: hadoop   File: MRClientService.java   View Source Code Vote up 6 votes
@SuppressWarnings("unchecked")
@Override
public KillTaskAttemptResponse killTaskAttempt(
    KillTaskAttemptRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
  String message = "Kill task attempt " + taskAttemptId
      + " received from " + callerUGI + " at "
      + Server.getRemoteAddress();
  LOG.info(message);
  verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
  appContext.getEventHandler().handle(
      new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
  appContext.getEventHandler().handle(
      new TaskAttemptEvent(taskAttemptId, 
          TaskAttemptEventType.TA_KILL));
  KillTaskAttemptResponse response = 
    recordFactory.newRecordInstance(KillTaskAttemptResponse.class);
  return response;
}
 
Example 3
Project: hadoop   File: ClientRMService.java   View Source Code Vote up 6 votes
private String checkReservationACLs(String queueName, String auditConstant)
    throws YarnException {
  UserGroupInformation callerUGI;
  try {
    callerUGI = UserGroupInformation.getCurrentUser();
  } catch (IOException ie) {
    RMAuditLogger.logFailure("UNKNOWN", auditConstant, queueName,
        "ClientRMService", "Error getting UGI");
    throw RPCUtil.getRemoteException(ie);
  }
  // Check if user has access on the managed queue
  if (!queueACLsManager.checkAccess(callerUGI, QueueACL.SUBMIT_APPLICATIONS,
      queueName)) {
    RMAuditLogger.logFailure(
        callerUGI.getShortUserName(),
        auditConstant,
        "User doesn't have permissions to "
            + QueueACL.SUBMIT_APPLICATIONS.toString(), "ClientRMService",
        AuditConstants.UNAUTHORIZED_USER);
    throw RPCUtil.getRemoteException(new AccessControlException("User "
        + callerUGI.getShortUserName() + " cannot perform operation "
        + QueueACL.SUBMIT_APPLICATIONS.name() + " on queue" + queueName));
  }
  return callerUGI.getShortUserName();
}
 
Example 4
Project: hadoop   File: TestGridMixClasses.java   View Source Code Vote up 6 votes
@Test (timeout=30000)
public void testCompareGridmixJob() throws Exception {
  Configuration conf = new Configuration();
  Path outRoot = new Path("target");
  JobStory jobDesc = mock(JobStory.class);
  when(jobDesc.getName()).thenReturn("JobName");
  when(jobDesc.getJobConf()).thenReturn(new JobConf(conf));
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  GridmixJob j1 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
  GridmixJob j2 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 0);
  GridmixJob j3 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);
  GridmixJob j4 = new LoadJob(conf, 1000L, jobDesc, outRoot, ugi, 1);

  assertTrue(j1.equals(j2));
  assertEquals(0, j1.compareTo(j2));
  // Only one parameter matters
  assertFalse(j1.equals(j3));
  // compare id and submissionMillis
  assertEquals(-1, j1.compareTo(j3));
  assertEquals(-1, j1.compareTo(j4));

}
 
Example 5
Project: hadoop   File: DFSZKFailoverController.java   View Source Code Vote up 5 votes
@Override
protected void checkRpcAdminAccess() throws IOException, AccessControlException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
  if (adminAcl.isUserAllowed(ugi) ||
      ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
    LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
    return;
  }
  String msg = "Disallowed RPC access from " + ugi + " at " +
      Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN; 
  LOG.warn(msg);
  throw new AccessControlException(msg);
}
 
Example 6
Project: hadoop-oss   File: MiniRPCBenchmark.java   View Source Code Vote up 5 votes
void connectToServerAndGetDelegationToken(
    final Configuration conf, final InetSocketAddress addr) throws IOException {
  MiniProtocol client = null;
  try {
    UserGroupInformation current = UserGroupInformation.getCurrentUser();
    UserGroupInformation proxyUserUgi = 
      UserGroupInformation.createProxyUserForTesting(
          MINI_USER, current, GROUP_NAMES);
    
    try {
      client =  proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
        @Override
        public MiniProtocol run() throws IOException {
          MiniProtocol p = RPC.getProxy(MiniProtocol.class,
              MiniProtocol.versionID, addr, conf);
          Token<TestDelegationTokenIdentifier> token;
          token = p.getDelegationToken(new Text(RENEWER));
          currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, 
              GROUP_NAMES);
          SecurityUtil.setTokenService(token, addr);
          currentUgi.addToken(token);
          return p;
        }
      });
    } catch (InterruptedException e) {
      Assert.fail(Arrays.toString(e.getStackTrace()));
    }
  } finally {
    RPC.stopProxy(client);
  }
}
 
Example 7
Project: hadoop   File: FairScheduler.java   View Source Code Vote up 5 votes
@Override
public List<QueueUserACLInfo> getQueueUserAclInfo() {
  UserGroupInformation user;
  try {
    user = UserGroupInformation.getCurrentUser();
  } catch (IOException ioe) {
    return new ArrayList<QueueUserACLInfo>();
  }

  return queueMgr.getRootQueue().getQueueUserAclInfo(user);
}
 
Example 8
Project: hadoop   File: HSAdminServer.java   View Source Code Vote up 5 votes
@Override
protected void serviceStart() throws Exception {
  if (UserGroupInformation.isSecurityEnabled()) {
    loginUGI = UserGroupInformation.getLoginUser();
  } else {
    loginUGI = UserGroupInformation.getCurrentUser();
  }
  clientRpcServer.start();
}
 
Example 9
Project: hadoop   File: HistoryClientService.java   View Source Code Vote up 5 votes
private void checkAccess(Job job, JobACL jobOperation)
    throws IOException {

  UserGroupInformation callerUGI;
  callerUGI = UserGroupInformation.getCurrentUser();

  if (!job.checkAccess(callerUGI, jobOperation)) {
    throw new IOException(new AccessControlException("User "
        + callerUGI.getShortUserName() + " cannot perform operation "
        + jobOperation.name() + " on " + job.getID()));
  }
}
 
Example 10
Project: hadoop   File: ClientRMService.java   View Source Code Vote up 5 votes
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
    GetApplicationAttemptsRequest request) throws YarnException, IOException {
  ApplicationId appId = request.getApplicationId();
  UserGroupInformation callerUGI;
  try {
    callerUGI = UserGroupInformation.getCurrentUser();
  } catch (IOException ie) {
    LOG.info("Error getting UGI ", ie);
    throw RPCUtil.getRemoteException(ie);
  }
  RMApp application = this.rmContext.getRMApps().get(appId);
  if (application == null) {
    // If the RM doesn't have the application, throw
    // ApplicationNotFoundException and let client to handle.
    throw new ApplicationNotFoundException("Application with id '" + appId
        + "' doesn't exist in RM.");
  }
  boolean allowAccess = checkAccess(callerUGI, application.getUser(),
      ApplicationAccessType.VIEW_APP, application);
  GetApplicationAttemptsResponse response = null;
  if (allowAccess) {
    Map<ApplicationAttemptId, RMAppAttempt> attempts = application
        .getAppAttempts();
    List<ApplicationAttemptReport> listAttempts = 
      new ArrayList<ApplicationAttemptReport>();
    Iterator<Map.Entry<ApplicationAttemptId, RMAppAttempt>> iter = attempts
        .entrySet().iterator();
    while (iter.hasNext()) {
      listAttempts.add(iter.next().getValue()
          .createApplicationAttemptReport());
    }
    response = GetApplicationAttemptsResponse.newInstance(listAttempts);
  } else {
    throw new YarnException("User " + callerUGI.getShortUserName()
        + " does not have privilage to see this aplication " + appId);
  }
  return response;
}
 
Example 11
Project: hadoop   File: ClientProtocolService.java   View Source Code Vote up 5 votes
@Override
public ReleaseSharedCacheResourceResponse release(
    ReleaseSharedCacheResourceRequest request) throws YarnException,
    IOException {

  ReleaseSharedCacheResourceResponse response =
      recordFactory
          .newRecordInstance(ReleaseSharedCacheResourceResponse.class);

  UserGroupInformation callerUGI;
  try {
    callerUGI = UserGroupInformation.getCurrentUser();
  } catch (IOException ie) {
    LOG.info("Error getting UGI ", ie);
    throw RPCUtil.getRemoteException(ie);
  }

  boolean removed =
      this.store.removeResourceReference(
          request.getResourceKey(),
          new SharedCacheResourceReference(request.getAppId(), callerUGI
              .getShortUserName()), true);

  if (removed) {
    this.metrics.incCacheRelease();
  }

  return response;
}
 
Example 12
Project: hadoop   File: ClientRMService.java   View Source Code Vote up 4 votes
@SuppressWarnings("unchecked")
@Override
public KillApplicationResponse forceKillApplication(
    KillApplicationRequest request) throws YarnException {

  ApplicationId applicationId = request.getApplicationId();

  UserGroupInformation callerUGI;
  try {
    callerUGI = UserGroupInformation.getCurrentUser();
  } catch (IOException ie) {
    LOG.info("Error getting UGI ", ie);
    RMAuditLogger.logFailure("UNKNOWN", AuditConstants.KILL_APP_REQUEST,
        "UNKNOWN", "ClientRMService" , "Error getting UGI",
        applicationId);
    throw RPCUtil.getRemoteException(ie);
  }

  RMApp application = this.rmContext.getRMApps().get(applicationId);
  if (application == null) {
    RMAuditLogger.logFailure(callerUGI.getUserName(),
        AuditConstants.KILL_APP_REQUEST, "UNKNOWN", "ClientRMService",
        "Trying to kill an absent application", applicationId);
    throw new ApplicationNotFoundException("Trying to kill an absent"
        + " application " + applicationId);
  }

  if (!checkAccess(callerUGI, application.getUser(),
      ApplicationAccessType.MODIFY_APP, application)) {
    RMAuditLogger.logFailure(callerUGI.getShortUserName(),
        AuditConstants.KILL_APP_REQUEST,
        "User doesn't have permissions to "
            + ApplicationAccessType.MODIFY_APP.toString(), "ClientRMService",
        AuditConstants.UNAUTHORIZED_USER, applicationId);
    throw RPCUtil.getRemoteException(new AccessControlException("User "
        + callerUGI.getShortUserName() + " cannot perform operation "
        + ApplicationAccessType.MODIFY_APP.name() + " on " + applicationId));
  }

  if (application.isAppFinalStateStored()) {
    RMAuditLogger.logSuccess(callerUGI.getShortUserName(),
        AuditConstants.KILL_APP_REQUEST, "ClientRMService", applicationId);
    return KillApplicationResponse.newInstance(true);
  }

  this.rmContext.getDispatcher().getEventHandler()
      .handle(new RMAppEvent(applicationId, RMAppEventType.KILL));

  // For UnmanagedAMs, return true so they don't retry
  return KillApplicationResponse.newInstance(
      application.getApplicationSubmissionContext().getUnmanagedAM());
}
 
Example 13
Project: hadoop-oss   File: InodeTree.java   View Source Code Vote up 4 votes
/**
 * Create Inode Tree from the specified mount-table specified in Config
 * @param config - the mount table keys are prefixed with 
 *       FsConstants.CONFIG_VIEWFS_PREFIX
 * @param viewName - the name of the mount table - if null use defaultMT name
 * @throws UnsupportedFileSystemException
 * @throws URISyntaxException
 * @throws FileAlreadyExistsException
 * @throws IOException
 */
protected InodeTree(final Configuration config, final String viewName)
    throws UnsupportedFileSystemException, URISyntaxException,
  FileAlreadyExistsException, IOException { 
  String vName = viewName;
  if (vName == null) {
    vName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE;
  }
  homedirPrefix = ConfigUtil.getHomeDirValue(config, vName);
  root = new INodeDir<T>("/", UserGroupInformation.getCurrentUser());
  root.InodeDirFs = getTargetFileSystem(root);
  root.isRoot = true;
  
  final String mtPrefix = Constants.CONFIG_VIEWFS_PREFIX + "." + 
                          vName + ".";
  final String linkPrefix = Constants.CONFIG_VIEWFS_LINK + ".";
  final String linkMergePrefix = Constants.CONFIG_VIEWFS_LINK_MERGE + ".";
  boolean gotMountTableEntry = false;
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  for (Entry<String, String> si : config) {
    final String key = si.getKey();
    if (key.startsWith(mtPrefix)) {
      gotMountTableEntry = true;
      boolean isMergeLink = false;
      String src = key.substring(mtPrefix.length());
      if (src.startsWith(linkPrefix)) {
        src = src.substring(linkPrefix.length());
      } else if (src.startsWith(linkMergePrefix)) { // A merge link
        isMergeLink = true;
        src = src.substring(linkMergePrefix.length());
      } else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) {
        // ignore - we set home dir from config
        continue;
      } else {
        throw new IOException(
        "ViewFs: Cannot initialize: Invalid entry in Mount table in config: "+ 
        src);
      }
      final String target = si.getValue(); // link or merge link
      createLink(src, target, isMergeLink, ugi); 
    }
  }
  if (!gotMountTableEntry) {
    throw new IOException(
        "ViewFs: Cannot initialize: Empty Mount table in config for " +
           "viewfs://" + vName + "/");
  }
}
 
Example 14
Project: hadoop   File: InodeTree.java   View Source Code Vote up 4 votes
/**
 * Create Inode Tree from the specified mount-table specified in Config
 * @param config - the mount table keys are prefixed with 
 *       FsConstants.CONFIG_VIEWFS_PREFIX
 * @param viewName - the name of the mount table - if null use defaultMT name
 * @throws UnsupportedFileSystemException
 * @throws URISyntaxException
 * @throws FileAlreadyExistsException
 * @throws IOException
 */
protected InodeTree(final Configuration config, final String viewName)
    throws UnsupportedFileSystemException, URISyntaxException,
  FileAlreadyExistsException, IOException { 
  String vName = viewName;
  if (vName == null) {
    vName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE;
  }
  homedirPrefix = ConfigUtil.getHomeDirValue(config, vName);
  root = new INodeDir<T>("/", UserGroupInformation.getCurrentUser());
  root.InodeDirFs = getTargetFileSystem(root);
  root.isRoot = true;
  
  final String mtPrefix = Constants.CONFIG_VIEWFS_PREFIX + "." + 
                          vName + ".";
  final String linkPrefix = Constants.CONFIG_VIEWFS_LINK + ".";
  final String linkMergePrefix = Constants.CONFIG_VIEWFS_LINK_MERGE + ".";
  boolean gotMountTableEntry = false;
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  for (Entry<String, String> si : config) {
    final String key = si.getKey();
    if (key.startsWith(mtPrefix)) {
      gotMountTableEntry = true;
      boolean isMergeLink = false;
      String src = key.substring(mtPrefix.length());
      if (src.startsWith(linkPrefix)) {
        src = src.substring(linkPrefix.length());
      } else if (src.startsWith(linkMergePrefix)) { // A merge link
        isMergeLink = true;
        src = src.substring(linkMergePrefix.length());
      } else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) {
        // ignore - we set home dir from config
        continue;
      } else {
        throw new IOException(
        "ViewFs: Cannot initialize: Invalid entry in Mount table in config: "+ 
        src);
      }
      final String target = si.getValue(); // link or merge link
      createLink(src, target, isMergeLink, ugi); 
    }
  }
  if (!gotMountTableEntry) {
    throw new IOException(
        "ViewFs: Cannot initialize: Empty Mount table in config for " + 
           vName == null ? "viewfs:///" : ("viewfs://" + vName + "/"));
  }
}
 
Example 15
Project: hadoop   File: UserProvider.java   View Source Code Vote up 4 votes
private UserProvider() throws IOException {
  user = UserGroupInformation.getCurrentUser();
  credentials = user.getCredentials();
}
 
Example 16
Project: hadoop   File: TestAggregatedLogFormat.java   View Source Code Vote up 4 votes
private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long length,
    Path remoteAppLogFile, Path srcFileRoot, ContainerId testContainerId)
    throws Exception {
  File dir = new File(srcFilePath.toString());
  if (!dir.exists()) {
    if (!dir.mkdirs()) {
      throw new IOException("Unable to create directory : " + dir);
    }
  }

  File outputFile = new File(new File(srcFilePath.toString()), fileName);
  FileOutputStream os = new FileOutputStream(outputFile);
  final OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
  final int ch = filler;

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);

  LogKey logKey = new LogKey(testContainerId);
  LogValue logValue =
      spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
          testContainerId, ugi.getShortUserName()));

  final CountDownLatch latch = new CountDownLatch(1);

  Thread t = new Thread() {
    public void run() {
      try {
        for(int i=0; i < length/3; i++) {
            osw.write(ch);
        }

        latch.countDown();

        for(int i=0; i < (2*length)/3; i++) {
          osw.write(ch);
        }
        osw.close();
      } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }
    }
  };
  t.start();

  //Wait till the osw is partially written
  //aggregation starts once the ows has completed 1/3rd of its work
  latch.await();

  //Aggregate The Logs
  logWriter.append(logKey, logValue);
  logWriter.close();
}
 
Example 17
Project: hadoop   File: KeyAuthorizationKeyProvider.java   View Source Code Vote up 4 votes
private UserGroupInformation getUser() throws IOException {
  return UserGroupInformation.getCurrentUser();
}
 
Example 18
Project: hadoop   File: RPC.java   View Source Code Vote up 3 votes
/**
 * Get a protocol proxy that contains a proxy connection to a remote server
 * and a set of methods that are supported by the server
 * 
 * @param protocol protocol class
 * @param clientVersion client version
 * @param addr remote address
 * @param conf configuration to use
 * @param factory socket factory
 * @return the protocol proxy
 * @throws IOException if the far end through a RemoteException
 */
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
                              long clientVersion,
                              InetSocketAddress addr, Configuration conf,
                              SocketFactory factory) throws IOException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory);
}
 
Example 19
Project: hadoop   File: ViewFileSystem.java   View Source Code Vote up 2 votes
/**
 * This is the  constructor with the signature needed by
 * {@link FileSystem#createFileSystem(URI, Configuration)}
 * 
 * After this constructor is called initialize() is called.
 * @throws IOException 
 */
public ViewFileSystem() throws IOException {
  ugi = UserGroupInformation.getCurrentUser();
  creationTime = Time.now();
}
 
Example 20
Project: hadoop   File: JobClient.java   View Source Code Vote up 2 votes
/**
 * Build a job client, connect to the indicated job tracker.
 * 
 * @param jobTrackAddr the job tracker to connect to.
 * @param conf configuration.
 */
public JobClient(InetSocketAddress jobTrackAddr, 
                 Configuration conf) throws IOException {
  cluster = new Cluster(jobTrackAddr, conf);
  clientUgi = UserGroupInformation.getCurrentUser();
}