Java Code Examples for org.apache.hadoop.security.UserGroupInformation.getLoginUser()

The following are Jave code examples for showing how to use getLoginUser() of the org.apache.hadoop.security.UserGroupInformation class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: angel   File: UGITools.java   View Source Code Vote up 6 votes
public static UserGroupInformation getCurrentUser(Configuration conf) throws IOException,
    ClassNotFoundException, NoSuchFieldException, SecurityException, InstantiationException,
    IllegalAccessException {
  String[] ugiStrs = conf.getStrings(UGI_PROPERTY_NAME);
  if (ugiStrs == null) {
    LOG.info("UGI_PROPERTY_NAME is null ");
    return UserGroupInformation.getCurrentUser();
  } else {
    String[] userPass = ugiStrs[0].split(":");
    Class<?> ugiClass = Class.forName("org.apache.hadoop.security.UserGroupInformation");
    Class<?> userClass = Class.forName("org.apache.hadoop.security.User");
    Field userFiled = ugiClass.getDeclaredField("user");
    userFiled.setAccessible(true);

    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
    Field shortNameFiled = userClass.getDeclaredField("shortName");
    Field fullNameFiled = userClass.getDeclaredField("fullName");

    shortNameFiled.setAccessible(true);
    fullNameFiled.setAccessible(true);
    shortNameFiled.set(userFiled.get(ugi), userPass[0]);
    fullNameFiled.set(userFiled.get(ugi), userPass[0]);

    return ugi;
  }
}
 
Example 2
Project: hadoop-oss   File: ReEncryptionClientProvider.java   View Source Code Vote up 6 votes
public ReEncryptionClientProvider(URI uri, Configuration conf) throws IOException {
  setConf(conf);
  renUrl = createServiceURL(ProviderUtils.unnestUri(uri));
  if ("https".equalsIgnoreCase(new URL(renUrl).getProtocol())) {
    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
    try {
      sslFactory.init();
    } catch (GeneralSecurityException ex) {
      throw new IOException(ex);
    }
  }
  int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
  authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
  configurator = new TimeoutConnConfigurator(timeout, sslFactory);

  authToken = new DelegationTokenAuthenticatedURL.Token();
  UserGroupInformation.AuthenticationMethod authMethod =
      UserGroupInformation.getCurrentUser().getAuthenticationMethod();
  if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
    actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
  } else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
    actualUgi = UserGroupInformation.getLoginUser();
  } else {
    actualUgi =UserGroupInformation.getCurrentUser();
  }
}
 
Example 3
Project: hadoop-oss   File: TestKMS.java   View Source Code Vote up 6 votes
private <T> T doAs(String user, final PrivilegedExceptionAction<T> action)
    throws Exception {
  UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  try {
    return ugi.doAs(action);
  } finally {
    ugi.logoutUserFromKeytab();
  }
}
 
Example 4
Project: hadoop   File: TestSleepJob.java   View Source Code Vote up 6 votes
@Test  (timeout=600000)
public void testMapTasksOnlySleepJobs() throws Exception {
  Configuration configuration = GridmixTestUtils.mrvl.getConfig();

  DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
  configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);

  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  JobStory story;
  int seq = 1;
  while ((story = jobProducer.getNextJob()) != null) {
    GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
            story, new Path("ignored"), ugi, seq++);
    gridmixJob.buildSplits(null);
    Job job = gridmixJob.call();
    assertEquals(0, job.getNumReduceTasks());
  }
  jobProducer.close();
  assertEquals(6, seq);
}
 
Example 5
Project: hadoop   File: TestMiniMRWithDFSWithDistinctUsers.java   View Source Code Vote up 6 votes
@Before
public void setUp() throws Exception {
  dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();

  fs = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return dfs.getFileSystem();
      }
    });
  // Home directories for users
  mkdir(fs, "/user", "nobody", "nogroup", (short)01777);
  mkdir(fs, "/user/alice", "alice", "nogroup", (short)0755);
  mkdir(fs, "/user/bob", "bob", "nogroup", (short)0755);

  // staging directory root with sticky bit
  UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); 
  mkdir(fs, "/staging", MR_UGI.getShortUserName(), "nogroup", (short)01777);

  JobConf mrConf = new JobConf();
  mrConf.set(JTConfig.JT_STAGING_AREA_ROOT, "/staging");

  mr = new MiniMRCluster(0, 0, 4, dfs.getFileSystem().getUri().toString(),
                         1, null, null, MR_UGI, mrConf);
}
 
Example 6
Project: hadoop   File: HttpFSFileSystem.java   View Source Code Vote up 6 votes
/**
 * Called after a new FileSystem instance is constructed.
 *
 * @param name a uri whose authority section names the host, port, etc. for this FileSystem
 * @param conf the configuration
 */
@Override
public void initialize(URI name, Configuration conf) throws IOException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

  //the real use is the one that has the Kerberos credentials needed for
  //SPNEGO to work
  realUser = ugi.getRealUser();
  if (realUser == null) {
    realUser = UserGroupInformation.getLoginUser();
  }
  super.initialize(name, conf);
  try {
    uri = new URI(name.getScheme() + "://" + name.getAuthority());
  } catch (URISyntaxException ex) {
    throw new IOException(ex);
  }

  Class<? extends DelegationTokenAuthenticator> klass =
      getConf().getClass("httpfs.authenticator.class",
          KerberosDelegationTokenAuthenticator.class,
          DelegationTokenAuthenticator.class);
  DelegationTokenAuthenticator authenticator =
      ReflectionUtils.newInstance(klass, getConf());
  authURL = new DelegationTokenAuthenticatedURL(authenticator);
}
 
Example 7
Project: hadoop   File: Gridmix.java   View Source Code Vote up 6 votes
public int run(final String[] argv) throws IOException, InterruptedException {
  int val = -1;
  final Configuration conf = getConf();
  UserGroupInformation.setConfiguration(conf);
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();

  val = ugi.doAs(new PrivilegedExceptionAction<Integer>() {
    public Integer run() throws Exception {
      return runJob(conf, argv);
    }
  });
  
  // print the gridmix summary if the run was successful
  if (val == 0) {
      // print the run summary
      System.out.print("\n\n");
      System.out.println(summarizer.toString());
  }
  
  return val; 
}
 
Example 8
Project: hadoop-oss   File: Client.java   View Source Code Vote up 5 votes
private synchronized boolean shouldAuthenticateOverKrb() throws IOException {
  UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  UserGroupInformation realUser = currentUser.getRealUser();
  if (authMethod == AuthMethod.KERBEROS && loginUser != null &&
  // Make sure user logged in using Kerberos either keytab or TGT
      loginUser.hasKerberosCredentials() &&
      // relogin only in case it is the login user (e.g. JT)
      // or superuser (like oozie).
      (loginUser.equals(currentUser) || loginUser.equals(realUser))) {
    return true;
  }
  return false;
}
 
Example 9
Project: ditb   File: RpcClientImpl.java   View Source Code Vote up 5 votes
private synchronized boolean shouldAuthenticateOverKrb() throws IOException {
  UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
  UserGroupInformation currentUser =
    UserGroupInformation.getCurrentUser();
  UserGroupInformation realUser = currentUser.getRealUser();
  return authMethod == AuthMethod.KERBEROS &&
      loginUser != null &&
      //Make sure user logged in using Kerberos either keytab or TGT
      loginUser.hasKerberosCredentials() &&
      // relogin only in case it is the login user (e.g. JT)
      // or superuser (like oozie).
      (loginUser.equals(currentUser) || loginUser.equals(realUser));
}
 
Example 10
Project: hadoop   File: ClientRMService.java   View Source Code Vote up 5 votes
private String getRenewerForToken(Token<RMDelegationTokenIdentifier> token)
    throws IOException {
  UserGroupInformation user = UserGroupInformation.getCurrentUser();
  UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
  // we can always renew our own tokens
  return loginUser.getUserName().equals(user.getUserName())
      ? token.decodeIdentifier().getRenewer().toString()
      : user.getShortUserName();
}
 
Example 11
Project: hadoop   File: JobSubmissionFiles.java   View Source Code Vote up 5 votes
/**
 * Initializes the staging directory and returns the path. It also
 * keeps track of all necessary ownership and permissions
 * @param cluster
 * @param conf
 */
public static Path getStagingDir(Cluster cluster, Configuration conf) 
throws IOException,InterruptedException {
  Path stagingArea = cluster.getStagingAreaDir();
  FileSystem fs = stagingArea.getFileSystem(conf);
  String realUser;
  String currentUser;
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  realUser = ugi.getShortUserName();
  currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
  if (fs.exists(stagingArea)) {
    FileStatus fsStatus = fs.getFileStatus(stagingArea);
    String owner = fsStatus.getOwner();
    if (!(owner.equals(currentUser) || owner.equals(realUser))) {
       throw new IOException("The ownership on the staging directory " +
                    stagingArea + " is not as expected. " +
                    "It is owned by " + owner + ". The directory must " +
                    "be owned by the submitter " + currentUser + " or " +
                    "by " + realUser);
    }
    if (!fsStatus.getPermission().equals(JOB_DIR_PERMISSION)) {
      LOG.info("Permissions on staging directory " + stagingArea + " are " +
        "incorrect: " + fsStatus.getPermission() + ". Fixing permissions " +
        "to correct value " + JOB_DIR_PERMISSION);
      fs.setPermission(stagingArea, JOB_DIR_PERMISSION);
    }
  } else {
    fs.mkdirs(stagingArea, 
        new FsPermission(JOB_DIR_PERMISSION));
  }
  return stagingArea;
}
 
Example 12
Project: hadoop   File: HSAdminServer.java   View Source Code Vote up 5 votes
@Override
protected void serviceStart() throws Exception {
  if (UserGroupInformation.isSecurityEnabled()) {
    loginUGI = UserGroupInformation.getLoginUser();
  } else {
    loginUGI = UserGroupInformation.getCurrentUser();
  }
  clientRpcServer.start();
}
 
Example 13
Project: dremio-oss   File: ImpersonationUtil.java   View Source Code Vote up 5 votes
/**
 * Return the {@link org.apache.hadoop.security.UserGroupInformation} of user who is running the SabotNode.
 *
 * @return SabotNode process user {@link org.apache.hadoop.security.UserGroupInformation}.
 */
public static UserGroupInformation getProcessUserUGI() {
  try {
    return UserGroupInformation.getLoginUser();
  } catch (IOException e) {
    final String errMsg = "Failed to get process user UserGroupInformation object.";
    logger.error(errMsg, e);
    throw new RuntimeException(errMsg, e);
  }
}
 
Example 14
Project: hadoop   File: DFSZKFailoverController.java   View Source Code Vote up 5 votes
@Override
protected void checkRpcAdminAccess() throws IOException, AccessControlException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
  if (adminAcl.isUserAllowed(ugi) ||
      ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
    LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
    return;
  }
  String msg = "Disallowed RPC access from " + ugi + " at " +
      Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN; 
  LOG.warn(msg);
  throw new AccessControlException(msg);
}
 
Example 15
Project: hadoop   File: GenerateData.java   View Source Code Vote up 5 votes
@Override
public Job call() throws IOException, InterruptedException,
                         ClassNotFoundException {
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  ugi.doAs( new PrivilegedExceptionAction <Job>() {
     public Job run() throws IOException, ClassNotFoundException,
                             InterruptedException {
       // check if compression emulation is enabled
       if (CompressionEmulationUtil
           .isCompressionEmulationEnabled(job.getConfiguration())) {
         CompressionEmulationUtil.configure(job);
       } else {
         configureRandomBytesDataGenerator();
       }
       job.submit();
       return job;
     }
     
     private void configureRandomBytesDataGenerator() {
      job.setMapperClass(GenDataMapper.class);
      job.setNumReduceTasks(0);
      job.setMapOutputKeyClass(NullWritable.class);
      job.setMapOutputValueClass(BytesWritable.class);
      job.setInputFormatClass(GenDataFormat.class);
      job.setOutputFormatClass(RawBytesOutputFormat.class);
      job.setJarByClass(GenerateData.class);
      try {
        FileInputFormat.addInputPath(job, new Path("ignored"));
      } catch (IOException e) {
        LOG.error("Error while adding input path ", e);
      }
    }
  });
  return job;
}
 
Example 16
Project: hadoop   File: Statistics.java   View Source Code Vote up 5 votes
public Statistics(
  final Configuration conf, int pollingInterval, CountDownLatch startFlag)
  throws IOException, InterruptedException {
    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
    this.cluster = ugi.doAs(new PrivilegedExceptionAction<JobClient>() {
      public JobClient run() throws IOException {
        return new JobClient(new JobConf(conf));
      }
    });

  this.jtPollingInterval = pollingInterval;
  maxJobCompletedInInterval = conf.getInt(
    MAX_JOBS_COMPLETED_IN_POLL_INTERVAL_KEY, 1);
  this.startFlag = startFlag;
}
 
Example 17
Project: hadoop   File: Client.java   View Source Code Vote up 5 votes
private synchronized boolean shouldAuthenticateOverKrb() throws IOException {
  UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  UserGroupInformation realUser = currentUser.getRealUser();
  if (authMethod == AuthMethod.KERBEROS && loginUser != null &&
  // Make sure user logged in using Kerberos either keytab or TGT
      loginUser.hasKerberosCredentials() &&
      // relogin only in case it is the login user (e.g. JT)
      // or superuser (like oozie).
      (loginUser.equals(currentUser) || loginUser.equals(realUser))) {
    return true;
  }
  return false;
}
 
Example 18
Project: hadoop-oss   File: KMSClientProvider.java   View Source Code Vote up 4 votes
public KMSClientProvider(URI uri, Configuration conf) throws IOException {
  super(conf);
  kmsUrl = createServiceURL(extractKMSPath(uri));
  if ("https".equalsIgnoreCase(new URL(kmsUrl).getProtocol())) {
    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
    try {
      sslFactory.init();
    } catch (GeneralSecurityException ex) {
      throw new IOException(ex);
    }
  }
  int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
  authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
  configurator = new TimeoutConnConfigurator(timeout, sslFactory);
  encKeyVersionQueue =
      new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
          conf.getInt(
              CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
          conf.getFloat(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
          conf.getInt(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
          conf.getInt(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
          new EncryptedQueueRefiller());
  authToken = new DelegationTokenAuthenticatedURL.Token();
  UserGroupInformation.AuthenticationMethod authMethod =
      UserGroupInformation.getCurrentUser().getAuthenticationMethod();
  if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
    actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
  } else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
    actualUgi = UserGroupInformation.getLoginUser();
  } else {
    actualUgi =UserGroupInformation.getCurrentUser();
  }
}
 
Example 19
Project: hadoop-oss   File: KMSPREClientProvider.java   View Source Code Vote up 4 votes
public KMSPREClientProvider(URI uri, Configuration conf) throws IOException {
  super(conf);
  kmsUrl = createServiceURL(extractKMSPath(uri));
  if ("https".equalsIgnoreCase(new URL(kmsUrl).getProtocol())) {
    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
    try {
      sslFactory.init();
    } catch (GeneralSecurityException ex) {
      throw new IOException(ex);
    }
  }
  int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
  authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
  configurator = new TimeoutConnConfigurator(timeout, sslFactory);
  encKeyVersionQueue =
      new ValueQueue<EncryptedKeyVersion>(
          conf.getInt(
              CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
          conf.getFloat(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
          conf.getInt(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
          conf.getInt(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
          new EncryptedQueueRefiller());
  authToken = new DelegationTokenAuthenticatedURL.Token();
  UserGroupInformation.AuthenticationMethod authMethod =
      UserGroupInformation.getCurrentUser().getAuthenticationMethod();
  if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
    actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
  } else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
    actualUgi = UserGroupInformation.getLoginUser();
  } else {
    actualUgi =UserGroupInformation.getCurrentUser();
  }
}
 
Example 20
Project: hadoop   File: SubmitterUserResolver.java   View Source Code Vote up 4 votes
public SubmitterUserResolver() throws IOException {
  LOG.info(" Current user resolver is SubmitterUserResolver ");
  ugi = UserGroupInformation.getLoginUser();
}