Java Code Examples for org.apache.hadoop.security.authorize.ProxyUsers#refreshSuperUserGroupsConfiguration()
The following examples show how to use
org.apache.hadoop.security.authorize.ProxyUsers#refreshSuperUserGroupsConfiguration() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AdminService.java From hadoop with Apache License 2.0 | 6 votes |
@Override public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( RefreshSuperUserGroupsConfigurationRequest request) throws YarnException, IOException { String argName = "refreshSuperUserGroupsConfiguration"; UserGroupInformation user = checkAcls(argName); checkRMStatus(user.getShortUserName(), argName, "refresh super-user-groups."); // Accept hadoop common configs in core-site.xml as well as RM specific // configurations in yarn-site.xml Configuration conf = getConfiguration(new Configuration(false), YarnConfiguration.CORE_SITE_CONFIGURATION_FILE, YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); RMServerUtils.processRMProxyUsersConf(conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService"); return recordFactory.newRecordInstance( RefreshSuperUserGroupsConfigurationResponse.class); }
Example 2
Source File: TestJspHelper.java From big-c with Apache License 2.0 | 6 votes |
private String getRemoteAddr(String clientAddr, String proxyAddr, boolean trusted) { HttpServletRequest req = mock(HttpServletRequest.class); when(req.getRemoteAddr()).thenReturn("1.2.3.4"); Configuration conf = new Configuration(); if (proxyAddr == null) { when(req.getRemoteAddr()).thenReturn(clientAddr); } else { when(req.getRemoteAddr()).thenReturn(proxyAddr); when(req.getHeader("X-Forwarded-For")).thenReturn(clientAddr); if (trusted) { conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, proxyAddr); } } ProxyUsers.refreshSuperUserGroupsConfiguration(conf); return JspHelper.getRemoteAddr(req); }
Example 3
Source File: RESTServlet.java From hbase with Apache License 2.0 | 6 votes |
/** * Constructor with existing configuration * @param conf existing configuration * @param userProvider the login user provider * @throws IOException */ RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException { this.realUser = userProvider.getCurrent().getUGI(); this.conf = conf; registerCustomFilter(conf); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); connectionCache = new ConnectionCache( conf, userProvider, cleanInterval, maxIdleTime); if (supportsProxyuser()) { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } metrics = new MetricsREST(); pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource()); pauseMonitor.start(); }
Example 4
Source File: AdminService.java From big-c with Apache License 2.0 | 6 votes |
@Override public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( RefreshSuperUserGroupsConfigurationRequest request) throws YarnException, IOException { String argName = "refreshSuperUserGroupsConfiguration"; UserGroupInformation user = checkAcls(argName); checkRMStatus(user.getShortUserName(), argName, "refresh super-user-groups."); // Accept hadoop common configs in core-site.xml as well as RM specific // configurations in yarn-site.xml Configuration conf = getConfiguration(new Configuration(false), YarnConfiguration.CORE_SITE_CONFIGURATION_FILE, YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); RMServerUtils.processRMProxyUsersConf(conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService"); return recordFactory.newRecordInstance( RefreshSuperUserGroupsConfigurationResponse.class); }
Example 5
Source File: DelegationTokenAuthenticationFilter.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void init(FilterConfig filterConfig) throws ServletException { super.init(filterConfig); AuthenticationHandler handler = getAuthenticationHandler(); AbstractDelegationTokenSecretManager dtSecretManager = (AbstractDelegationTokenSecretManager) filterConfig.getServletContext(). getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR); if (dtSecretManager != null && handler instanceof DelegationTokenAuthenticationHandler) { DelegationTokenAuthenticationHandler dtHandler = (DelegationTokenAuthenticationHandler) getAuthenticationHandler(); dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager); } if (handler instanceof PseudoAuthenticationHandler || handler instanceof PseudoDelegationTokenAuthenticationHandler) { setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE); } if (handler instanceof KerberosAuthenticationHandler || handler instanceof KerberosDelegationTokenAuthenticationHandler) { setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS); } // proxyuser configuration Configuration conf = getProxyuserConfiguration(filterConfig); ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX); }
Example 6
Source File: TestClientAccessPrivilege.java From big-c with Apache License 2.0 | 6 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); // Use ephemeral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); }
Example 7
Source File: TestClientAccessPrivilege.java From hadoop with Apache License 2.0 | 6 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); // Use ephemeral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); }
Example 8
Source File: TestNonExistentJob.java From big-c with Apache License 2.0 | 5 votes |
protected void setUp() throws Exception { super.setUp(); if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "/tmp"); } int taskTrackers = 2; int dataNodes = 2; String proxyUser = System.getProperty("user.name"); String proxyGroup = "g"; StringBuilder sb = new StringBuilder(); sb.append("127.0.0.1,localhost"); for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) { sb.append(",").append(i.getCanonicalHostName()); } JobConf conf = new JobConf(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes) .build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); String nnURI = fileSystem.getUri().toString(); int numDirs = 1; String[] racks = null; String[] hosts = null; mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
Example 9
Source File: HSAdminServer.java From big-c with Apache License 2.0 | 5 votes |
@Override public void refreshSuperUserGroupsConfiguration() throws IOException { UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration"); ProxyUsers.refreshSuperUserGroupsConfiguration(createConf()); HSAuditLogger.logSuccess(user.getShortUserName(), "refreshSuperUserGroupsConfiguration", HISTORY_ADMIN_SERVER); }
Example 10
Source File: TestAuditLogger.java From hadoop with Apache License 2.0 | 5 votes |
@Before public void setup() { DummyAuditLogger.initialized = false; DummyAuditLogger.logCount = 0; DummyAuditLogger.remoteAddr = null; Configuration conf = new HdfsConfiguration(); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
Example 11
Source File: HadoopAuthFilter.java From knox with Apache License 2.0 | 5 votes |
@Override public void init(FilterConfig filterConfig) throws ServletException { Configuration conf = getProxyuserConfiguration(filterConfig); ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX); Collection<String> ignoredServices = null; // Look for GatewayConfig.PROXYUSER_SERVICES_IGNORE_DOAS value in the filter context, which was created // using the relevant topology file... String configValue = filterConfig.getInitParameter(GatewayConfig.PROXYUSER_SERVICES_IGNORE_DOAS); if (configValue != null) { configValue = configValue.trim(); if (!configValue.isEmpty()) { ignoredServices = Arrays.asList(configValue.toLowerCase(Locale.ROOT).split("\\s*,\\s*")); } } // If not set in the topology, look for GatewayConfig.PROXYUSER_SERVICES_IGNORE_DOAS in the // gateway site context if (ignoredServices == null) { Object attributeValue = filterConfig.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE); if (attributeValue instanceof GatewayConfig) { ignoredServices = ((GatewayConfig) attributeValue).getServicesToIgnoreDoAs(); } } if (ignoredServices != null) { ignoreDoAs.addAll(ignoredServices); } super.init(filterConfig); }
Example 12
Source File: TestReaddir.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start nfs Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); }
Example 13
Source File: HBasePolicyProvider.java From hbase with Apache License 2.0 | 5 votes |
public static void init(Configuration conf, ServiceAuthorizationManager authManager) { // set service-level authorization security policy System.setProperty("hadoop.policy.file", "hbase-policy.xml"); if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { authManager.refresh(conf, new HBasePolicyProvider()); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } }
Example 14
Source File: TestNonExistentJob.java From hadoop with Apache License 2.0 | 5 votes |
protected void setUp() throws Exception { super.setUp(); if (System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "/tmp"); } int taskTrackers = 2; int dataNodes = 2; String proxyUser = System.getProperty("user.name"); String proxyGroup = "g"; StringBuilder sb = new StringBuilder(); sb.append("127.0.0.1,localhost"); for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) { sb.append(",").append(i.getCanonicalHostName()); } JobConf conf = new JobConf(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataNodes) .build(); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); String nnURI = fileSystem.getUri().toString(); int numDirs = 1; String[] racks = null; String[] hosts = null; mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
Example 15
Source File: TestRpcProgramNfs3.java From big-c with Apache License 2.0 | 4 votes |
@BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); dfsAdmin = new HdfsAdmin(cluster.getURI(), config); // Use ephemeral ports in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start NFS with allowed.hosts set to "* rw" config.set("dfs.nfs.exports.allowed.hosts", "* rw"); nfs = new Nfs3(config); nfs.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); }
Example 16
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 4 votes |
private void initMiniDFSCluster( Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy, boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, boolean skipFsyncForTesting) throws IOException { boolean success = false; try { ExitUtil.disableSystemExit(); // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052 FileSystem.enableSymlinks(); synchronized (MiniDFSCluster.class) { instanceId = instanceCount++; } this.conf = conf; base_dir = new File(determineDfsBaseDir()); data_dir = new File(base_dir, "data"); this.waitSafeMode = waitSafeMode; this.checkExitOnShutdown = checkExitOnShutdown; int replication = conf.getInt(DFS_REPLICATION_KEY, 3); conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); int safemodeExtension = conf.getInt( DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0); conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension); conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); // In an HA cluster, in order for the StandbyNode to perform checkpoints, // it needs to know the HTTP port of the Active. So, if ephemeral ports // are chosen, disable checkpoints for the test. if (!nnTopology.allHttpPortsSpecified() && nnTopology.isHA()) { LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " + "since no HTTP ports have been specified."); conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false); } if (!nnTopology.allIpcPortsSpecified() && nnTopology.isHA()) { LOG.info("MiniDFSCluster disabling log-roll triggering in the " + "Standby node since no IPC ports have been specified."); conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1); } EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting); federation = nnTopology.isFederated(); try { createNameNodesAndSetConf( nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs, enableManagedDfsDirsRedundancy, format, startOpt, clusterId, conf); } catch (IOException ioe) { LOG.error("IOE creating namenodes. Permissions dump:\n" + createPermissionsDiagnosisString(data_dir), ioe); throw ioe; } if (format) { if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { throw new IOException("Cannot remove data directory: " + data_dir + createPermissionsDiagnosisString(data_dir)); } } if (startOpt == StartupOption.RECOVER) { return; } // Start the DataNodes startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs, dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); success = true; } finally { if (!success) { shutdown(); } } }
Example 17
Source File: NameNodeRpcServer.java From hadoop with Apache License 2.0 | 4 votes |
@Override // RefreshAuthorizationPolicyProtocol public void refreshSuperUserGroupsConfiguration() { LOG.info("Refreshing SuperUser proxy group mapping list "); ProxyUsers.refreshSuperUserGroupsConfiguration(); }
Example 18
Source File: TestWrites.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testOOOWrites() throws IOException, InterruptedException { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; final int bufSize = 32; final int numOOO = 3; SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config); HdfsFileStatus status = dfsClient.getFileInfo("/"); FileHandle rootHandle = new FileHandle(status.getFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.serialize(createXdr); CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle = createRsp.getObjHandle(); byte[][] oooBuf = new byte[numOOO][bufSize]; for (int i = 0; i < numOOO; i++) { Arrays.fill(oooBuf[i], (byte) i); } for (int i = 0; i < numOOO; i++) { final long offset = (numOOO - 1 - i) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i])); XDR writeXdr = new XDR(); writeReq.serialize(writeXdr); nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); } waitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.serialize(readXdr); READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT))); assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array())); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 19
Source File: TestDoAsEffectiveUser.java From hadoop with Apache License 2.0 | 4 votes |
private void refreshConf(Configuration conf) throws IOException { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); }
Example 20
Source File: ResourceManager.java From big-c with Apache License 2.0 | 4 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { this.conf = conf; this.rmContext = new RMContextImpl(); this.configurationProvider = ConfigurationProviderFactory.getConfigurationProvider(conf); this.configurationProvider.init(this.conf); rmContext.setConfigurationProvider(configurationProvider); // load core-site.xml InputStream coreSiteXMLInputStream = this.configurationProvider.getConfigurationInputStream(this.conf, YarnConfiguration.CORE_SITE_CONFIGURATION_FILE); if (coreSiteXMLInputStream != null) { this.conf.addResource(coreSiteXMLInputStream); } // Do refreshUserToGroupsMappings with loaded core-site.xml Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(this.conf) .refresh(); // Do refreshSuperUserGroupsConfiguration with loaded core-site.xml // Or use RM specific configurations to overwrite the common ones first // if they exist RMServerUtils.processRMProxyUsersConf(conf); ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf); // load yarn-site.xml InputStream yarnSiteXMLInputStream = this.configurationProvider.getConfigurationInputStream(this.conf, YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); if (yarnSiteXMLInputStream != null) { this.conf.addResource(yarnSiteXMLInputStream); } validateConfigs(this.conf); // Set HA configuration should be done before login this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf)); if (this.rmContext.isHAEnabled()) { HAUtil.verifyAndSetConfiguration(this.conf); } // Set UGI and do login // If security is enabled, use login user // If security is not enabled, use current user this.rmLoginUGI = UserGroupInformation.getCurrentUser(); try { doSecureLogin(); } catch(IOException ie) { throw new YarnRuntimeException("Failed to login", ie); } // register the handlers for all AlwaysOn services using setupDispatcher(). rmDispatcher = setupDispatcher(); addIfService(rmDispatcher); rmContext.setDispatcher(rmDispatcher); adminService = createAdminService(); addService(adminService); rmContext.setRMAdminService(adminService); rmContext.setYarnConfiguration(conf); createAndInitActiveServices(); webAppAddress = WebAppUtils.getWebAppBindURL(this.conf, YarnConfiguration.RM_BIND_HOST, WebAppUtils.getRMWebAppURLWithoutScheme(this.conf)); super.serviceInit(this.conf); }