Java Code Examples for org.apache.hadoop.security.UserGroupInformation#getCurrentUser()

The following examples show how to use org.apache.hadoop.security.UserGroupInformation#getCurrentUser() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Utils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
	Credentials credentials = new Credentials();
	// for HDFS
	TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
	// for HBase
	obtainTokenForHBase(credentials, conf);
	// for user
	UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

	Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
	for (Token<? extends TokenIdentifier> token : usrTok) {
		final Text id = new Text(token.getIdentifier());
		LOG.info("Adding user token " + id + " with " + token);
		credentials.addToken(id, token);
	}
	try (DataOutputBuffer dob = new DataOutputBuffer()) {
		credentials.writeTokenStorageToStream(dob);

		if (LOG.isDebugEnabled()) {
			LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
		}

		ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
		amContainer.setTokens(securityTokens);
	}
}
 
Example 2
Source File: MRClientService.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public FailTaskAttemptResponse failTaskAttempt(
    FailTaskAttemptRequest request) throws IOException {
  TaskAttemptId taskAttemptId = request.getTaskAttemptId();
  UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser();
  String message = "Fail task attempt " + taskAttemptId
      + " received from " + callerUGI + " at "
      + Server.getRemoteAddress();
  LOG.info(message);
  verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB);
  appContext.getEventHandler().handle(
      new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message));
  appContext.getEventHandler().handle(
      new TaskAttemptEvent(taskAttemptId, 
          TaskAttemptEventType.TA_FAILMSG));
  FailTaskAttemptResponse response = recordFactory.
    newRecordInstance(FailTaskAttemptResponse.class);
  return response;
}
 
Example 3
Source File: MRDelegationTokenRenewer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected MRClientProtocol instantiateHistoryProxy(final Configuration conf,
    final InetSocketAddress hsAddress) throws IOException {

  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to MRHistoryServer at: " + hsAddress);
  }
  final YarnRPC rpc = YarnRPC.create(conf);
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
    @Override
    public MRClientProtocol run() {
      return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
          hsAddress, conf);
    }
  });
}
 
Example 4
Source File: DatanodeProtocolClientSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
public DatanodeProtocolClientSideTranslatorPB(InetSocketAddress nameNodeAddr,
    Configuration conf) throws IOException {
  RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
      ProtobufRpcEngine.class);
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  rpcProxy = createNamenode(nameNodeAddr, conf, ugi);
}
 
Example 5
Source File: JobContextImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public JobContextImpl(Configuration conf, JobID jobId) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf)conf;
  } else {
    this.conf = new JobConf(conf);
  }
  this.jobId = jobId;
  this.credentials = this.conf.getCredentials();
  try {
    this.ugi = UserGroupInformation.getCurrentUser();
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 6
Source File: ViewFsBaseTest.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testInternalGetAclStatus() throws IOException {
  final UserGroupInformation currentUser =
      UserGroupInformation.getCurrentUser();
  AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
  assertEquals(aclStatus.getOwner(), currentUser.getUserName());
  assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
  assertEquals(aclStatus.getEntries(),
      AclUtil.getMinimalAcl(PERMISSION_555));
  assertFalse(aclStatus.isStickyBit());
}
 
Example 7
Source File: HSAdminServer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  if (UserGroupInformation.isSecurityEnabled()) {
    loginUGI = UserGroupInformation.getLoginUser();
  } else {
    loginUGI = UserGroupInformation.getCurrentUser();
  }
  clientRpcServer.start();
}
 
Example 8
Source File: HBaseClient.java    From hbase-tools with Apache License 2.0 5 votes vote down vote up
private static synchronized void login(Args args, Configuration conf) throws Exception {
    if (args.has(Args.OPTION_DEBUG)) {
        System.setProperty("sun.security.krb5.debug", "true");
        System.setProperty("sun.security.spnego.debug", "true");
    }

    System.setProperty("java.security.auth.login.config", createJaasConfigFile(args));
    System.setProperty("java.security.krb5.conf", kerberosConfigFile(args));

    Config krbConfig = Config.getInstance();
    final String realm;
    if (args.has(Args.OPTION_REALM)) {
        realm = (String) args.valueOf(Args.OPTION_REALM);
        System.setProperty("java.security.krb5.realm", realm);
        System.setProperty("java.security.krb5.kdc", krbConfig.getKDCList(realm));
        Config.refresh();
    } else {
        realm = krbConfig.getDefaultRealm();
    }

    updateConf(conf, realm);

    if (args.has(Args.OPTION_KEY_TAB, Args.OPTION_KEY_TAB_SHORT)) {
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab(principal(args), (String) args.valueOf(Args.OPTION_KEY_TAB, Args.OPTION_KEY_TAB_SHORT));
    } else {
        loginWithPassword(args, conf);
    }

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    System.out.println(currentUser + "\n");
}
 
Example 9
Source File: KerberosFactory.java    From Bats with Apache License 2.0 5 votes vote down vote up
@Override
public UserGroupInformation createAndLoginUser(final Map<String, ?> properties) throws IOException {
  final Configuration conf = new SecurityConfiguration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      UserGroupInformation.AuthenticationMethod.KERBEROS.toString());
  UserGroupInformation.setConfiguration(conf);

  final String keytab = (String) properties.get(DrillProperties.KEYTAB);
  final boolean assumeSubject = properties.containsKey(DrillProperties.KERBEROS_FROM_SUBJECT) &&
      Boolean.parseBoolean((String) properties.get(DrillProperties.KERBEROS_FROM_SUBJECT));
  try {
    final UserGroupInformation ugi;
    if (assumeSubject) {
      ugi = UserGroupInformation.getUGIFromSubject(Subject.getSubject(AccessController.getContext()));
      logger.debug("Assuming subject for {}.", ugi.getShortUserName());
    } else {
      if (keytab != null) {
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            (String) properties.get(DrillProperties.USER), keytab);
        logger.debug("Logged in {} using keytab.", ugi.getShortUserName());
      } else {
        // includes Kerberos ticket login
        ugi = UserGroupInformation.getCurrentUser();
        logger.debug("Logged in {} using ticket.", ugi.getShortUserName());
      }
    }
    return ugi;
  } catch (final IOException e) {
    logger.debug("Login failed.", e);
    final Throwable cause = e.getCause();
    if (cause instanceof LoginException) {
      throw new SaslException("Failed to login.", cause);
    }
    throw new SaslException("Unexpected failure trying to login.", cause);
  }
}
 
Example 10
Source File: HistoryClientService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void checkAccess(Job job, JobACL jobOperation)
    throws IOException {

  UserGroupInformation callerUGI;
  callerUGI = UserGroupInformation.getCurrentUser();

  if (!job.checkAccess(callerUGI, jobOperation)) {
    throw new IOException(new AccessControlException("User "
        + callerUGI.getShortUserName() + " cannot perform operation "
        + jobOperation.name() + " on " + job.getID()));
  }
}
 
Example 11
Source File: HiveMetaStoreBridge.java    From incubator-atlas with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws AtlasHookException {
    try {
    Configuration atlasConf = ApplicationProperties.get();
    String[] atlasEndpoint = atlasConf.getStringArray(ATLAS_ENDPOINT);
    if (atlasEndpoint == null || atlasEndpoint.length == 0){
        atlasEndpoint = new String[] { DEFAULT_DGI_URL };
    }
    AtlasClient atlasClient;

    if (!AuthenticationUtil.isKerberosAuthenticationEnabled()) {
        String[] basicAuthUsernamePassword = AuthenticationUtil.getBasicAuthenticationInput();
        atlasClient = new AtlasClient(atlasEndpoint, basicAuthUsernamePassword);
    } else {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        atlasClient = new AtlasClient(ugi, ugi.getShortUserName(), atlasEndpoint);
    }

    Options options = new Options();
    CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse( options, args);

    boolean failOnError = false;
    if (cmd.hasOption("failOnError")) {
        failOnError = true;
    }

    HiveMetaStoreBridge hiveMetaStoreBridge = new HiveMetaStoreBridge(atlasConf, new HiveConf(), atlasClient);
    hiveMetaStoreBridge.importHiveMetadata(failOnError);
    }
    catch(Exception e) {
        throw new AtlasHookException("HiveMetaStoreBridge.main() failed.", e);
    }
}
 
Example 12
Source File: YarnEntrypointUtils.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static void logYarnEnvironmentInformation(Map<String, String> env, Logger log) throws IOException {
	final String yarnClientUsername = env.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
	Preconditions.checkArgument(
		yarnClientUsername != null,
		"YARN client user name environment variable %s not set",
		YarnConfigKeys.ENV_HADOOP_USER_NAME);

	UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

	log.info("YARN daemon is running as: {} Yarn client user obtainer: {}",
		currentUser.getShortUserName(), yarnClientUsername);
}
 
Example 13
Source File: TestVertexManager.java    From tez with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testVertexManagerPluginCtorAccessUserPayload() throws IOException, TezException {
  byte[] randomUserPayload = {1,2,3};
  UserPayload userPayload = UserPayload.create(ByteBuffer.wrap(randomUserPayload));
  VertexManager vm =
      new VertexManager(
          VertexManagerPluginDescriptor.create(CheckUserPayloadVertexManagerPlugin.class
              .getName()).setUserPayload(userPayload), UserGroupInformation.getCurrentUser(),
          mockVertex, mockAppContext, mock(StateChangeNotifier.class));
}
 
Example 14
Source File: FileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
Key(URI uri, Configuration conf, long unique) throws IOException {
  scheme = uri.getScheme()==null ?
      "" : StringUtils.toLowerCase(uri.getScheme());
  authority = uri.getAuthority()==null ?
      "" : StringUtils.toLowerCase(uri.getAuthority());
  this.unique = unique;
  
  this.ugi = UserGroupInformation.getCurrentUser();
}
 
Example 15
Source File: JobContextImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
public JobContextImpl(Configuration conf, JobID jobId) {
  if (conf instanceof JobConf) {
    this.conf = (JobConf)conf;
  } else {
    this.conf = new JobConf(conf);
  }
  this.jobId = jobId;
  this.credentials = this.conf.getCredentials();
  try {
    this.ugi = UserGroupInformation.getCurrentUser();
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 16
Source File: NamenodeWebHdfsMethods.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static StreamingOutput getListingStream(final NamenodeProtocols np, 
    final String p) throws IOException {
  // allows exceptions like FNF or ACE to prevent http response of 200 for
  // a failure since we can't (currently) return error responses in the
  // middle of a streaming operation
  final DirectoryListing firstDirList = getDirectoryListing(np, p,
      HdfsFileStatus.EMPTY_NAME);

  // must save ugi because the streaming object will be executed outside
  // the remote user's ugi
  final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return new StreamingOutput() {
    @Override
    public void write(final OutputStream outstream) throws IOException {
      final PrintWriter out = new PrintWriter(new OutputStreamWriter(
          outstream, Charsets.UTF_8));
      out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
          + FileStatus.class.getSimpleName() + "\":[");

      try {
        // restore remote user's ugi
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws IOException {
            long n = 0;
            for (DirectoryListing dirList = firstDirList; ;
                 dirList = getDirectoryListing(np, p, dirList.getLastName())
            ) {
              // send each segment of the directory listing
              for (HdfsFileStatus s : dirList.getPartialListing()) {
                if (n++ > 0) {
                  out.println(',');
                }
                out.print(JsonUtil.toJsonString(s, false));
              }
              // stop if last segment
              if (!dirList.hasMore()) {
                break;
              }
            }
            return null;
          }
        });
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
      
      out.println();
      out.println("]}}");
      out.flush();
    }
  };
}
 
Example 17
Source File: TestAggregatedLogFormat.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=10000)
public void testContainerLogsFileAccess() throws IOException {
  // This test will run only if NativeIO is enabled as SecureIOUtils 
  // require it to be enabled.
  Assume.assumeTrue(NativeIO.isAvailable());
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);
  File workDir = new File(testWorkDir, "testContainerLogsFileAccess1");
  Path remoteAppLogFile =
      new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
  Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");

  String data = "Log File content for container : ";
  // Creating files for container1. Log aggregator will try to read log files
  // with illegal user.
  ApplicationId applicationId = ApplicationId.newInstance(1, 1);
  ApplicationAttemptId applicationAttemptId =
      ApplicationAttemptId.newInstance(applicationId, 1);
  ContainerId testContainerId1 =
      ContainerId.newContainerId(applicationAttemptId, 1);
  Path appDir =
      new Path(srcFileRoot, testContainerId1.getApplicationAttemptId()
          .getApplicationId().toString());
  Path srcFilePath1 = new Path(appDir, testContainerId1.toString());
  String stdout = "stdout";
  String stderr = "stderr";
  writeSrcFile(srcFilePath1, stdout, data + testContainerId1.toString()
      + stdout);
  writeSrcFile(srcFilePath1, stderr, data + testContainerId1.toString()
      + stderr);

  UserGroupInformation ugi =
      UserGroupInformation.getCurrentUser();
  LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);

  LogKey logKey = new LogKey(testContainerId1);
  String randomUser = "randomUser";
  LogValue logValue =
      spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),
          testContainerId1, randomUser));
  
  // It is trying simulate a situation where first log file is owned by
  // different user (probably symlink) and second one by the user itself.
  // The first file should not be aggregated. Because this log file has the invalid
  // user name.
  when(logValue.getUser()).thenReturn(randomUser).thenReturn(
      ugi.getShortUserName());
  logWriter.append(logKey, logValue);

  logWriter.close();
  
  BufferedReader in =
      new BufferedReader(new FileReader(new File(remoteAppLogFile
          .toUri().getRawPath())));
  String line;
  StringBuffer sb = new StringBuffer("");
  while ((line = in.readLine()) != null) {
    LOG.info(line);
    sb.append(line);
  }
  line = sb.toString();

  String expectedOwner = ugi.getShortUserName();
  if (Path.WINDOWS) {
    final String adminsGroupString = "Administrators";
    if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
      expectedOwner = adminsGroupString;
    }
  }

  // This file: stderr should not be aggregated.
  // And we will not aggregate the log message.
  String stdoutFile1 =
      StringUtils.join(
          File.separator,
          Arrays.asList(new String[] {
              workDir.getAbsolutePath(), "srcFiles",
              testContainerId1.getApplicationAttemptId().getApplicationId()
                  .toString(), testContainerId1.toString(), stderr }));

  // The file: stdout is expected to be aggregated.
  String stdoutFile2 =
      StringUtils.join(
          File.separator,
          Arrays.asList(new String[] {
              workDir.getAbsolutePath(), "srcFiles",
              testContainerId1.getApplicationAttemptId().getApplicationId()
                  .toString(), testContainerId1.toString(), stdout }));
  String message2 =
      "Owner '" + expectedOwner + "' for path "
          + stdoutFile2 + " did not match expected owner '"
          + ugi.getShortUserName() + "'";
  
  Assert.assertFalse(line.contains(message2));
  Assert.assertFalse(line.contains(data + testContainerId1.toString()
      + stderr));
  Assert.assertTrue(line.contains(data + testContainerId1.toString()
      + stdout));
}
 
Example 18
Source File: TestAggregatedLogFormat.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void testReadAcontainerLog(boolean logUploadedTime) throws Exception {
  Configuration conf = new Configuration();
  File workDir = new File(testWorkDir, "testReadAcontainerLogs1");
  Path remoteAppLogFile =
      new Path(workDir.getAbsolutePath(), "aggregatedLogFile");
  Path srcFileRoot = new Path(workDir.getAbsolutePath(), "srcFiles");
  ContainerId testContainerId = TestContainerId.newContainerId(1, 1, 1, 1);
  Path t =
      new Path(srcFileRoot, testContainerId.getApplicationAttemptId()
          .getApplicationId().toString());
  Path srcFilePath = new Path(t, testContainerId.toString());

  int numChars = 80000;

  // create a sub-folder under srcFilePath
  // and create file logs in this sub-folder.
  // We only aggregate top level files.
  // So, this log file should be ignored.
  Path subDir = new Path(srcFilePath, "subDir");
  fs.mkdirs(subDir);
  writeSrcFile(subDir, "logs", numChars);

  // create file stderr and stdout in containerLogDir
  writeSrcFile(srcFilePath, "stderr", numChars);
  writeSrcFile(srcFilePath, "stdout", numChars);

  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  LogWriter logWriter = new LogWriter(conf, remoteAppLogFile, ugi);

  LogKey logKey = new LogKey(testContainerId);
  LogValue logValue =
      new LogValue(Collections.singletonList(srcFileRoot.toString()),
          testContainerId, ugi.getShortUserName());

  // When we try to open FileInputStream for stderr, it will throw out an IOException.
  // Skip the log aggregation for stderr.
  LogValue spyLogValue = spy(logValue);
  File errorFile = new File((new Path(srcFilePath, "stderr")).toString());
  doThrow(new IOException("Mock can not open FileInputStream")).when(
    spyLogValue).secureOpenFile(errorFile);

  logWriter.append(logKey, spyLogValue);
  logWriter.close();

  // make sure permission are correct on the file
  FileStatus fsStatus =  fs.getFileStatus(remoteAppLogFile);
  Assert.assertEquals("permissions on log aggregation file are wrong",  
    FsPermission.createImmutable((short) 0640), fsStatus.getPermission()); 

  LogReader logReader = new LogReader(conf, remoteAppLogFile);
  LogKey rLogKey = new LogKey();
  DataInputStream dis = logReader.next(rLogKey);
  Writer writer = new StringWriter();

  if (logUploadedTime) {
    LogReader.readAcontainerLogs(dis, writer, System.currentTimeMillis());
  } else {
    LogReader.readAcontainerLogs(dis, writer);
  }

  // We should only do the log aggregation for stdout.
  // Since we could not open the fileInputStream for stderr, this file is not
  // aggregated.
  String s = writer.toString();
  int expectedLength =
      "LogType:stdout".length()
          + (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
            .currentTimeMillis())).length() : 0)
          + ("\nLogLength:" + numChars).length()
          + "\nLog Contents:\n".length() + numChars + "\n".length()
          + "End of LogType:stdout\n".length();
  Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
  Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr"));
  Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs"));
  Assert.assertTrue("LogLength not matched", s.contains("LogLength:" + numChars));
  Assert.assertTrue("Log Contents not matched", s.contains("Log Contents"));
  
  StringBuilder sb = new StringBuilder();
  for (int i = 0 ; i < numChars ; i++) {
    sb.append(filler);
  }
  String expectedContent = sb.toString();
  Assert.assertTrue("Log content incorrect", s.contains(expectedContent));
  
  Assert.assertEquals(expectedLength, s.length());
}
 
Example 19
Source File: RPC.java    From big-c with Apache License 2.0 3 votes vote down vote up
/**
 * Get a protocol proxy that contains a proxy connection to a remote server
 * and a set of methods that are supported by the server
 * 
 * @param protocol protocol class
 * @param clientVersion client version
 * @param addr remote address
 * @param conf configuration to use
 * @param factory socket factory
 * @return the protocol proxy
 * @throws IOException if the far end through a RemoteException
 */
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
                              long clientVersion,
                              InetSocketAddress addr, Configuration conf,
                              SocketFactory factory) throws IOException {
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
  return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory);
}
 
Example 20
Source File: ViewFileSystem.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * This is the  constructor with the signature needed by
 * {@link FileSystem#createFileSystem(URI, Configuration)}
 * 
 * After this constructor is called initialize() is called.
 * @throws IOException 
 */
public ViewFileSystem() throws IOException {
  ugi = UserGroupInformation.getCurrentUser();
  creationTime = Time.now();
}