Java Code Examples for org.apache.hadoop.security.authorize.ProxyUsers

The following examples show how to use org.apache.hadoop.security.authorize.ProxyUsers. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: AdminService.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
    RefreshSuperUserGroupsConfigurationRequest request)
    throws YarnException, IOException {
  String argName = "refreshSuperUserGroupsConfiguration";
  UserGroupInformation user = checkAcls(argName);

  checkRMStatus(user.getShortUserName(), argName, "refresh super-user-groups.");

  // Accept hadoop common configs in core-site.xml as well as RM specific
  // configurations in yarn-site.xml
  Configuration conf =
      getConfiguration(new Configuration(false),
          YarnConfiguration.CORE_SITE_CONFIGURATION_FILE,
          YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
  RMServerUtils.processRMProxyUsersConf(conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  RMAuditLogger.logSuccess(user.getShortUserName(),
      argName, "AdminService");
  
  return recordFactory.newRecordInstance(
      RefreshSuperUserGroupsConfigurationResponse.class);
}
 
Example 2
Source Project: hadoop   Source File: TestRMProxyUsersConf.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testProxyUserConfiguration() throws Exception {
  MockRM rm = null;
  try {
    rm = new MockRM(conf);
    rm.start();
    // wait for web server starting
    Thread.sleep(10000);
    UserGroupInformation proxyUser =
        UserGroupInformation.createProxyUser(
            BAR_USER.getShortUserName(), FOO_USER);
    try {
      ProxyUsers.getDefaultImpersonationProvider().authorize(proxyUser,
          ipAddress);
    } catch (AuthorizationException e) {
      // Exception is not expected
      Assert.fail();
    }
  } finally {
    if (rm != null) {
      rm.stop();
      rm.close();
    }
  }
}
 
Example 3
Source Project: hadoop   Source File: TestClientAccessPrivilege.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {

  String currentUser = System.getProperty("user.name");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserIpConfKey(currentUser), "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use ephemeral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
}
 
Example 4
Source Project: hadoop   Source File: TestDelegationTokenForProxyUser.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
 
Example 5
Source Project: hadoop   Source File: TestJspHelper.java    License: Apache License 2.0 6 votes vote down vote up
private String getRemoteAddr(String clientAddr, String proxyAddr, boolean trusted) {
  HttpServletRequest req = mock(HttpServletRequest.class);
  when(req.getRemoteAddr()).thenReturn("1.2.3.4");

  Configuration conf = new Configuration();
  if (proxyAddr == null) {
    when(req.getRemoteAddr()).thenReturn(clientAddr);
  } else {
    when(req.getRemoteAddr()).thenReturn(proxyAddr);
    when(req.getHeader("X-Forwarded-For")).thenReturn(clientAddr);
    if (trusted) {
      conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, proxyAddr);
    }
  }
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  return JspHelper.getRemoteAddr(req);
}
 
Example 6
@Override
public void init(FilterConfig filterConfig) throws ServletException {
  super.init(filterConfig);
  AuthenticationHandler handler = getAuthenticationHandler();
  AbstractDelegationTokenSecretManager dtSecretManager =
      (AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
          getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
  if (dtSecretManager != null && handler
      instanceof DelegationTokenAuthenticationHandler) {
    DelegationTokenAuthenticationHandler dtHandler =
        (DelegationTokenAuthenticationHandler) getAuthenticationHandler();
    dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager);
  }
  if (handler instanceof PseudoAuthenticationHandler ||
      handler instanceof PseudoDelegationTokenAuthenticationHandler) {
    setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE);
  }
  if (handler instanceof KerberosAuthenticationHandler ||
      handler instanceof KerberosDelegationTokenAuthenticationHandler) {
    setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS);
  }

  // proxyuser configuration
  Configuration conf = getProxyuserConfiguration(filterConfig);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
}
 
Example 7
Source Project: hadoop   Source File: Server.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Authorize proxy users to access this server
 * @throws WrappedRpcServerException - user is not allowed to proxy
 */
private void authorizeConnection() throws WrappedRpcServerException {
  try {
    // If auth method is TOKEN, the token was obtained by the
    // real user for the effective user, therefore not required to
    // authorize real user. doAs is allowed only for simple or kerberos
    // authentication
    if (user != null && user.getRealUser() != null
        && (authMethod != AuthMethod.TOKEN)) {
      ProxyUsers.authorize(user, this.getHostAddress());
    }
    authorize(user, protocolName, getHostInetAddress());
    if (LOG.isDebugEnabled()) {
      LOG.debug("Successfully authorized " + connectionContext);
    }
    rpcMetrics.incrAuthorizationSuccesses();
  } catch (AuthorizationException ae) {
    LOG.info("Connection from " + this
        + " for protocol " + connectionContext.getProtocol()
        + " is unauthorized for user " + user);
    rpcMetrics.incrAuthorizationFailures();
    throw new WrappedRpcServerException(
        RpcErrorCodeProto.FATAL_UNAUTHORIZED, ae);
  }
}
 
Example 8
Source Project: big-c   Source File: AdminService.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
    RefreshSuperUserGroupsConfigurationRequest request)
    throws YarnException, IOException {
  String argName = "refreshSuperUserGroupsConfiguration";
  UserGroupInformation user = checkAcls(argName);

  checkRMStatus(user.getShortUserName(), argName, "refresh super-user-groups.");

  // Accept hadoop common configs in core-site.xml as well as RM specific
  // configurations in yarn-site.xml
  Configuration conf =
      getConfiguration(new Configuration(false),
          YarnConfiguration.CORE_SITE_CONFIGURATION_FILE,
          YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
  RMServerUtils.processRMProxyUsersConf(conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  RMAuditLogger.logSuccess(user.getShortUserName(),
      argName, "AdminService");
  
  return recordFactory.newRecordInstance(
      RefreshSuperUserGroupsConfigurationResponse.class);
}
 
Example 9
Source Project: big-c   Source File: TestRMProxyUsersConf.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testProxyUserConfiguration() throws Exception {
  MockRM rm = null;
  try {
    rm = new MockRM(conf);
    rm.start();
    // wait for web server starting
    Thread.sleep(10000);
    UserGroupInformation proxyUser =
        UserGroupInformation.createProxyUser(
            BAR_USER.getShortUserName(), FOO_USER);
    try {
      ProxyUsers.getDefaultImpersonationProvider().authorize(proxyUser,
          ipAddress);
    } catch (AuthorizationException e) {
      // Exception is not expected
      Assert.fail();
    }
  } finally {
    if (rm != null) {
      rm.stop();
      rm.close();
    }
  }
}
 
Example 10
Source Project: big-c   Source File: TestClientAccessPrivilege.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {

  String currentUser = System.getProperty("user.name");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(DefaultImpersonationProvider.getTestProvider()
      .getProxySuperuserIpConfKey(currentUser), "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use ephemeral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
}
 
Example 11
Source Project: big-c   Source File: TestDelegationTokenForProxyUser.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
 
Example 12
Source Project: big-c   Source File: TestJspHelper.java    License: Apache License 2.0 6 votes vote down vote up
private String getRemoteAddr(String clientAddr, String proxyAddr, boolean trusted) {
  HttpServletRequest req = mock(HttpServletRequest.class);
  when(req.getRemoteAddr()).thenReturn("1.2.3.4");

  Configuration conf = new Configuration();
  if (proxyAddr == null) {
    when(req.getRemoteAddr()).thenReturn(clientAddr);
  } else {
    when(req.getRemoteAddr()).thenReturn(proxyAddr);
    when(req.getHeader("X-Forwarded-For")).thenReturn(clientAddr);
    if (trusted) {
      conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, proxyAddr);
    }
  }
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  return JspHelper.getRemoteAddr(req);
}
 
Example 13
Source Project: big-c   Source File: DelegationTokenAuthenticationFilter.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void init(FilterConfig filterConfig) throws ServletException {
  super.init(filterConfig);
  AuthenticationHandler handler = getAuthenticationHandler();
  AbstractDelegationTokenSecretManager dtSecretManager =
      (AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
          getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
  if (dtSecretManager != null && handler
      instanceof DelegationTokenAuthenticationHandler) {
    DelegationTokenAuthenticationHandler dtHandler =
        (DelegationTokenAuthenticationHandler) getAuthenticationHandler();
    dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager);
  }
  if (handler instanceof PseudoAuthenticationHandler ||
      handler instanceof PseudoDelegationTokenAuthenticationHandler) {
    setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE);
  }
  if (handler instanceof KerberosAuthenticationHandler ||
      handler instanceof KerberosDelegationTokenAuthenticationHandler) {
    setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS);
  }

  // proxyuser configuration
  Configuration conf = getProxyuserConfiguration(filterConfig);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
}
 
Example 14
Source Project: big-c   Source File: Server.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Authorize proxy users to access this server
 * @throws WrappedRpcServerException - user is not allowed to proxy
 */
private void authorizeConnection() throws WrappedRpcServerException {
  try {
    // If auth method is TOKEN, the token was obtained by the
    // real user for the effective user, therefore not required to
    // authorize real user. doAs is allowed only for simple or kerberos
    // authentication
    if (user != null && user.getRealUser() != null
        && (authMethod != AuthMethod.TOKEN)) {
      ProxyUsers.authorize(user, this.getHostAddress());
    }
    authorize(user, protocolName, getHostInetAddress());
    if (LOG.isDebugEnabled()) {
      LOG.debug("Successfully authorized " + connectionContext);
    }
    rpcMetrics.incrAuthorizationSuccesses();
  } catch (AuthorizationException ae) {
    LOG.info("Connection from " + this
        + " for protocol " + connectionContext.getProtocol()
        + " is unauthorized for user " + user);
    rpcMetrics.incrAuthorizationFailures();
    throw new WrappedRpcServerException(
        RpcErrorCodeProto.FATAL_UNAUTHORIZED, ae);
  }
}
 
Example 15
Source Project: hbase   Source File: ServerRpcConnection.java    License: Apache License 2.0 6 votes vote down vote up
private boolean authorizeConnection() throws IOException {
  try {
    // If auth method is DIGEST, the token was obtained by the
    // real user for the effective user, therefore not required to
    // authorize real user. doAs is allowed only for simple or kerberos
    // authentication
    if (ugi != null && ugi.getRealUser() != null
        && provider.supportsProtocolAuthentication()) {
      ProxyUsers.authorize(ugi, this.getHostAddress(), this.rpcServer.conf);
    }
    this.rpcServer.authorize(ugi, connectionHeader, getHostInetAddress());
    this.rpcServer.metrics.authorizationSuccess();
  } catch (AuthorizationException ae) {
    if (RpcServer.LOG.isDebugEnabled()) {
      RpcServer.LOG.debug("Connection authorization failed: " + ae.getMessage(), ae);
    }
    this.rpcServer.metrics.authorizationFailure();
    doRespond(getErrorResponse(ae.getMessage(), new AccessDeniedException(ae)));
    return false;
  }
  return true;
}
 
Example 16
Source Project: hbase   Source File: RESTServlet.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Constructor with existing configuration
 * @param conf existing configuration
 * @param userProvider the login user provider
 * @throws IOException
 */
RESTServlet(final Configuration conf,
    final UserProvider userProvider) throws IOException {
  this.realUser = userProvider.getCurrent().getUGI();
  this.conf = conf;
  registerCustomFilter(conf);

  int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
  int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
  connectionCache = new ConnectionCache(
    conf, userProvider, cleanInterval, maxIdleTime);
  if (supportsProxyuser()) {
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }

  metrics = new MetricsREST();

  pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource());
  pauseMonitor.start();
}
 
Example 17
Source Project: hadoop   Source File: TestNonExistentJob.java    License: Apache License 2.0 5 votes vote down vote up
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
 
Example 18
Source Project: hadoop   Source File: HSAdminServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void refreshSuperUserGroupsConfiguration() throws IOException {
  UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration");

  ProxyUsers.refreshSuperUserGroupsConfiguration(createConf());

  HSAuditLogger.logSuccess(user.getShortUserName(),
      "refreshSuperUserGroupsConfiguration", HISTORY_ADMIN_SERVER);
}
 
Example 19
Source Project: hadoop   Source File: TestReaddir.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");
  config.set(
          DefaultImpersonationProvider.getTestProvider().
              getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(
          DefaultImpersonationProvider.getTestProvider().
              getProxySuperuserIpConfKey(currentUser), "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
}
 
Example 20
Source Project: hadoop   Source File: TestAuditLogger.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  DummyAuditLogger.initialized = false;
  DummyAuditLogger.logCount = 0;
  DummyAuditLogger.remoteAddr = null;

  Configuration conf = new HdfsConfiguration();
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);    
}
 
Example 21
Source Project: big-c   Source File: TestNonExistentJob.java    License: Apache License 2.0 5 votes vote down vote up
protected void setUp() throws Exception {
  super.setUp();
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "/tmp");
  }
  int taskTrackers = 2;
  int dataNodes = 2;
  String proxyUser = System.getProperty("user.name");
  String proxyGroup = "g";
  StringBuilder sb = new StringBuilder();
  sb.append("127.0.0.1,localhost");
  for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    sb.append(",").append(i.getCanonicalHostName());
  }

  JobConf conf = new JobConf();
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes)
      .build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  String nnURI = fileSystem.getUri().toString();
  int numDirs = 1;
  String[] racks = null;
  String[] hosts = null;
  mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
 
Example 22
Source Project: big-c   Source File: HSAdminServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void refreshSuperUserGroupsConfiguration() throws IOException {
  UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration");

  ProxyUsers.refreshSuperUserGroupsConfiguration(createConf());

  HSAuditLogger.logSuccess(user.getShortUserName(),
      "refreshSuperUserGroupsConfiguration", HISTORY_ADMIN_SERVER);
}
 
Example 23
Source Project: big-c   Source File: TestReaddir.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
  String currentUser = System.getProperty("user.name");
  config.set(
          DefaultImpersonationProvider.getTestProvider().
              getProxySuperuserGroupConfKey(currentUser), "*");
  config.set(
          DefaultImpersonationProvider.getTestProvider().
              getProxySuperuserIpConfKey(currentUser), "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
  nn = cluster.getNameNode();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);
  
  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

  securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
}
 
Example 24
Source Project: big-c   Source File: TestAuditLogger.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  DummyAuditLogger.initialized = false;
  DummyAuditLogger.logCount = 0;
  DummyAuditLogger.remoteAddr = null;

  Configuration conf = new HdfsConfiguration();
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);    
}
 
Example 25
Source Project: hbase   Source File: HBasePolicyProvider.java    License: Apache License 2.0 5 votes vote down vote up
public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
  // set service-level authorization security policy
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    authManager.refresh(conf, new HBasePolicyProvider());
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
 
Example 26
Source Project: hbase   Source File: RpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void refreshAuthManager(Configuration conf, PolicyProvider pp) {
  // Ignore warnings that this should be accessed in a static way instead of via an instance;
  // it'll break if you go via static route.
  System.setProperty("hadoop.policy.file", "hbase-policy.xml");
  this.authManager.refresh(conf, pp);
  LOG.info("Refreshed hbase-policy.xml successfully");
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  LOG.info("Refreshed super and proxy users successfully");
}
 
Example 27
Source Project: hbase   Source File: RESTServletContainer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This container is used only if authentication and
 * impersonation is enabled. The remote request user is used
 * as a proxy user for impersonation in invoking any REST service.
 */
@Override
public void service(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  final String doAsUserFromQuery = request.getParameter("doAs");
  RESTServlet servlet = RESTServlet.getInstance();
  if (doAsUserFromQuery != null) {
    Configuration conf = servlet.getConfiguration();
    if (!servlet.supportsProxyuser()) {
      throw new ServletException("Support for proxyuser is not configured");
    }
    // Authenticated remote user is attempting to do 'doAs' proxy user.
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser());
    // create and attempt to authorize a proxy user (the client is attempting
    // to do proxy user)
    ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
    // validate the proxy user authorization
    try {
      ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
    } catch(AuthorizationException e) {
      throw new ServletException(e.getMessage());
    }
    servlet.setEffectiveUser(doAsUserFromQuery);
  } else {
    String effectiveUser = request.getRemoteUser();
    servlet.setEffectiveUser(effectiveUser);
  }
  super.service(request, response);
}
 
Example 28
Source Project: knox   Source File: HadoopAuthFilter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void init(FilterConfig filterConfig) throws ServletException {
  Configuration conf = getProxyuserConfiguration(filterConfig);
  ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);

  Collection<String> ignoredServices = null;

  // Look for GatewayConfig.PROXYUSER_SERVICES_IGNORE_DOAS value in the filter context, which was created
  // using the relevant topology file...
  String configValue = filterConfig.getInitParameter(GatewayConfig.PROXYUSER_SERVICES_IGNORE_DOAS);
  if (configValue != null) {
    configValue = configValue.trim();
    if (!configValue.isEmpty()) {
      ignoredServices = Arrays.asList(configValue.toLowerCase(Locale.ROOT).split("\\s*,\\s*"));
    }
  }

  // If not set in the topology, look for GatewayConfig.PROXYUSER_SERVICES_IGNORE_DOAS in the
  // gateway site context
  if (ignoredServices == null) {
    Object attributeValue = filterConfig.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
    if (attributeValue instanceof GatewayConfig) {
      ignoredServices = ((GatewayConfig) attributeValue).getServicesToIgnoreDoAs();
    }
  }

  if (ignoredServices != null) {
    ignoreDoAs.addAll(ignoredServices);
  }

  super.init(filterConfig);
}
 
Example 29
Source Project: hadoop   Source File: ResourceManager.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  this.conf = conf;
  this.rmContext = new RMContextImpl();
  
  this.configurationProvider =
      ConfigurationProviderFactory.getConfigurationProvider(conf);
  this.configurationProvider.init(this.conf);
  rmContext.setConfigurationProvider(configurationProvider);

  // load core-site.xml
  InputStream coreSiteXMLInputStream =
      this.configurationProvider.getConfigurationInputStream(this.conf,
          YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
  if (coreSiteXMLInputStream != null) {
    this.conf.addResource(coreSiteXMLInputStream);
  }

  // Do refreshUserToGroupsMappings with loaded core-site.xml
  Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(this.conf)
      .refresh();

  // Do refreshSuperUserGroupsConfiguration with loaded core-site.xml
  // Or use RM specific configurations to overwrite the common ones first
  // if they exist
  RMServerUtils.processRMProxyUsersConf(conf);
  ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf);

  // load yarn-site.xml
  InputStream yarnSiteXMLInputStream =
      this.configurationProvider.getConfigurationInputStream(this.conf,
          YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
  if (yarnSiteXMLInputStream != null) {
    this.conf.addResource(yarnSiteXMLInputStream);
  }

  validateConfigs(this.conf);
  
  // Set HA configuration should be done before login
  this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf));
  if (this.rmContext.isHAEnabled()) {
    HAUtil.verifyAndSetConfiguration(this.conf);
  }
  
  // Set UGI and do login
  // If security is enabled, use login user
  // If security is not enabled, use current user
  this.rmLoginUGI = UserGroupInformation.getCurrentUser();
  try {
    doSecureLogin();
  } catch(IOException ie) {
    throw new YarnRuntimeException("Failed to login", ie);
  }

  // register the handlers for all AlwaysOn services using setupDispatcher().
  rmDispatcher = setupDispatcher();
  addIfService(rmDispatcher);
  rmContext.setDispatcher(rmDispatcher);

  adminService = createAdminService();
  addService(adminService);
  rmContext.setRMAdminService(adminService);
  
  rmContext.setYarnConfiguration(conf);
  
  createAndInitActiveServices();

  webAppAddress = WebAppUtils.getWebAppBindURL(this.conf,
                    YarnConfiguration.RM_BIND_HOST,
                    WebAppUtils.getRMWebAppURLWithoutScheme(this.conf));

  super.serviceInit(this.conf);
}
 
Example 30
Source Project: hadoop   Source File: TestRMAdminService.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void
    testRefreshSuperUserGroupsWithFileSystemBasedConfigurationProvider()
    throws IOException, YarnException {
  configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
      "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");

  //upload default configurations
  uploadDefaultConfiguration();

  try {
    rm = new MockRM(configuration);
    rm.init(configuration);
    rm.start();
  } catch(Exception ex) {
    fail("Should not get any exceptions");
  }

  Configuration coreConf = new Configuration(false);
  coreConf.set("hadoop.proxyuser.test.groups", "test_groups");
  coreConf.set("hadoop.proxyuser.test.hosts", "test_hosts");
  uploadConfiguration(coreConf, "core-site.xml");

  rm.adminService.refreshSuperUserGroupsConfiguration(
      RefreshSuperUserGroupsConfigurationRequest.newInstance());
  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups()
      .get("hadoop.proxyuser.test.groups").size() == 1);
  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups()
      .get("hadoop.proxyuser.test.groups").contains("test_groups"));

  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts()
      .get("hadoop.proxyuser.test.hosts").size() == 1);
  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts()
      .get("hadoop.proxyuser.test.hosts").contains("test_hosts"));

  Configuration yarnConf = new Configuration(false);
  yarnConf.set("yarn.resourcemanager.proxyuser.test.groups", "test_groups_1");
  yarnConf.set("yarn.resourcemanager.proxyuser.test.hosts", "test_hosts_1");
  uploadConfiguration(yarnConf, "yarn-site.xml");

  // RM specific configs will overwrite the common ones
  rm.adminService.refreshSuperUserGroupsConfiguration(
      RefreshSuperUserGroupsConfigurationRequest.newInstance());
  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups()
      .get("hadoop.proxyuser.test.groups").size() == 1);
  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups()
      .get("hadoop.proxyuser.test.groups").contains("test_groups_1"));

  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts()
      .get("hadoop.proxyuser.test.hosts").size() == 1);
  Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts()
      .get("hadoop.proxyuser.test.hosts").contains("test_hosts_1"));
}