Java Code Examples for org.apache.hadoop.hbase.security.UserProvider#instantiate()
The following examples show how to use
org.apache.hadoop.hbase.security.UserProvider#instantiate() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseTablespace.java From tajo with Apache License 2.0 | 6 votes |
HConnectionKey(Configuration conf) { Map<String, String> m = new HashMap<>(); if (conf != null) { for (String property : CONNECTION_PROPERTIES) { String value = conf.get(property); if (value != null) { m.put(property, value); } } } this.properties = Collections.unmodifiableMap(m); try { UserProvider provider = UserProvider.instantiate(conf); User currentUser = provider.getCurrent(); if (currentUser != null) { username = currentUser.getName(); } } catch (IOException ioe) { LOG.warn("Error obtaining current user, skipping username in HConnectionKey", ioe); } }
Example 2
Source File: TableMapReduceUtil.java From hbase with Apache License 2.0 | 6 votes |
public static void initCredentials(JobConf job) throws IOException { UserProvider userProvider = UserProvider.instantiate(job); if (userProvider.isHadoopSecurityEnabled()) { // propagate delegation related props from launcher job to MR job if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) { job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION")); } } if (userProvider.isHBaseSecurityEnabled()) { Connection conn = ConnectionFactory.createConnection(job); try { // login the server principal (if using secure Hadoop) User user = userProvider.getCurrent(); TokenUtil.addTokenForJob(conn, job, user); } catch (InterruptedException ie) { LOG.error("Interrupted obtaining user authentication token", ie); Thread.currentThread().interrupt(); } finally { conn.close(); } } }
Example 3
Source File: SnapshotScannerHDFSAclController.java From hbase with Apache License 2.0 | 6 votes |
@Override public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException { if (c.getEnvironment().getConfiguration() .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) { MasterCoprocessorEnvironment mEnv = c.getEnvironment(); if (!(mEnv instanceof HasMasterServices)) { throw new IOException("Does not implement HMasterServices"); } masterServices = ((HasMasterServices) mEnv).getMasterServices(); hdfsAclHelper = new SnapshotScannerHDFSAclHelper(masterServices.getConfiguration(), masterServices.getConnection()); pathHelper = hdfsAclHelper.getPathHelper(); hdfsAclHelper.setCommonDirectoryPermission(); initialized = true; userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration()); } else { LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure " + "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE + " is false."); } }
Example 4
Source File: TestGetAndPutResource.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testMetrics() throws IOException { final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; Response response = client.put(path, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_4)); assertEquals(200, response.getCode()); Thread.yield(); response = client.get(path, Constants.MIMETYPE_JSON); assertEquals(200, response.getCode()); assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); response = deleteRow(TABLE, ROW_4); assertEquals(200, response.getCode()); UserProvider userProvider = UserProvider.instantiate(conf); METRICS_ASSERT.assertCounterGt("requests", 2L, RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); METRICS_ASSERT.assertCounterGt("successfulGet", 0L, RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); METRICS_ASSERT.assertCounterGt("successfulPut", 0L, RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); METRICS_ASSERT.assertCounterGt("successfulDelete", 0L, RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); }
Example 5
Source File: TableMapReduceUtil.java From hbase with Apache License 2.0 | 6 votes |
/** * Obtain an authentication token, for the specified cluster, on behalf of the current user * and add it to the credentials for the given map reduce job. * * @param job The job that requires the permission. * @param conf The configuration to use in connecting to the peer cluster * @throws IOException When the authentication token cannot be obtained. */ public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(job.getConfiguration()); if (userProvider.isHBaseSecurityEnabled()) { try { Connection peerConn = ConnectionFactory.createConnection(conf); try { TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job); } finally { peerConn.close(); } } catch (InterruptedException e) { LOG.info("Interrupted obtaining user authentication token"); Thread.interrupted(); } } }
Example 6
Source File: RpcServer.java From hbase with Apache License 2.0 | 5 votes |
/** * Constructs a server listening on the named port and address. * @param server hosting instance of {@link Server}. We will do authentications if an * instance else pass null for no authentication check. * @param name Used keying this rpc servers' metrics and for naming the Listener thread. * @param services A list of services. * @param bindAddress Where to listen * @param conf * @param scheduler * @param reservoirEnabled Enable ByteBufferPool or not. */ public RpcServer(final Server server, final String name, final List<BlockingServiceAndInterface> services, final InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { this.bbAllocator = ByteBuffAllocator.create(conf, reservoirEnabled); this.server = server; this.services = services; this.bindAddress = bindAddress; this.conf = conf; // See declaration above for documentation on what this size is. this.maxQueueSizeInBytes = this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE); this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE); this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); this.tcpNoDelay = conf.getBoolean("hbase.ipc.server.tcpnodelay", true); this.tcpKeepAlive = conf.getBoolean("hbase.ipc.server.tcpkeepalive", true); this.cellBlockBuilder = new CellBlockBuilder(conf); this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); this.userProvider = UserProvider.instantiate(conf); this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled(); if (isSecurityEnabled) { saslProps = SaslUtil.initSaslProperties(conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT))); } else { saslProps = Collections.emptyMap(); } this.scheduler = scheduler; }
Example 7
Source File: HFileReplicator.java From hbase with Apache License 2.0 | 5 votes |
public HFileReplicator(Configuration sourceClusterConf, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath, Map<String, List<Pair<byte[], List<String>>>> tableQueueMap, Configuration conf, AsyncClusterConnection connection, List<String> sourceClusterIds) throws IOException { this.sourceClusterConf = sourceClusterConf; this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath; this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath; this.bulkLoadHFileMap = tableQueueMap; this.conf = conf; this.connection = connection; this.sourceClusterIds = sourceClusterIds; userProvider = UserProvider.instantiate(conf); fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); this.hbaseStagingDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); this.exec = Threads.getBoundedCachedThreadPool(maxCopyThreads, 60, TimeUnit.SECONDS, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath). build()); this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); sinkFs = FileSystem.get(conf); }
Example 8
Source File: TestThriftServer.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testGetThriftServerType() throws Exception { ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(), UserProvider.instantiate(UTIL.getConfiguration())); assertEquals(TThriftServerType.ONE, handler.getThriftServerType()); }
Example 9
Source File: BulkLoadHFilesTool.java From hbase with Apache License 2.0 | 5 votes |
public BulkLoadHFilesTool(Configuration conf) { // make a copy, just to be sure we're not overriding someone else's config super(new Configuration(conf)); // disable blockcache for tool invocation, see HBASE-10500 conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0); userProvider = UserProvider.instantiate(conf); fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true); maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32); nrThreads = conf.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors()); bulkLoadByFamily = conf.getBoolean(BULK_LOAD_HFILES_BY_FAMILY, false); }
Example 10
Source File: PhoenixAccessController.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void start(CoprocessorEnvironment env) throws IOException { Configuration conf = env.getConfiguration(); this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED, QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED); if (!this.accessCheckEnabled) { LOGGER.warn( "PhoenixAccessController has been loaded with authorization checks disabled."); } this.execPermissionsCheckEnabled = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS); if (env instanceof PhoenixMetaDataControllerEnvironment) { this.env = (PhoenixMetaDataControllerEnvironment)env; } else { throw new IllegalArgumentException( "Not a valid environment, should be loaded by PhoenixMetaDataControllerEnvironment"); } ZKWatcher zk = null; RegionCoprocessorEnvironment regionEnv = this.env.getRegionCoprocessorEnvironment(); if (regionEnv instanceof HasRegionServerServices) { zk = ((HasRegionServerServices) regionEnv).getRegionServerServices().getZooKeeper(); } accessChecker = new AccessChecker(env.getConfiguration(), zk); // set the user-provider. this.userProvider = UserProvider.instantiate(env.getConfiguration()); // init superusers and add the server principal (if using security) // or process owner as default super user. Superusers.initialize(env.getConfiguration()); }
Example 11
Source File: TestThriftServer.java From hbase with Apache License 2.0 | 5 votes |
/** * Tests adding a series of Mutations and BatchMutations, including a * delete mutation. Also tests data retrieval, and getting back multiple * versions. */ public void doTestTableMutations() throws Exception { ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(), UserProvider.instantiate(UTIL.getConfiguration())); doTestTableMutations(handler); }
Example 12
Source File: TestThriftServer.java From hbase with Apache License 2.0 | 5 votes |
/** * Tests for creating, enabling, disabling, and deleting tables. Also * tests that creating a table with an invalid column name yields an * IllegalArgument exception. */ public void doTestTableCreateDrop() throws Exception { ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(UTIL.getConfiguration(), UserProvider.instantiate(UTIL.getConfiguration())); doTestTableCreateDrop(handler); }
Example 13
Source File: AuthUtil.java From hbase with Apache License 2.0 | 5 votes |
/** * For kerberized cluster, return login user (from kinit or from keytab if specified). * For non-kerberized cluster, return system user. * @param conf configuartion file * @return user * @throws IOException login exception */ @InterfaceAudience.Private public static User loginClient(Configuration conf) throws IOException { UserProvider provider = UserProvider.instantiate(conf); User user = provider.getCurrent(); boolean securityOn = provider.isHBaseSecurityEnabled() && provider.isHadoopSecurityEnabled(); if (securityOn) { boolean fromKeytab = provider.shouldLoginFromKeytab(); if (user.getUGI().hasKerberosCredentials()) { // There's already a login user. // But we should avoid misuse credentials which is a dangerous security issue, // so here check whether user specified a keytab and a principal: // 1. Yes, check if user principal match. // a. match, just return. // b. mismatch, login using keytab. // 2. No, user may login through kinit, this is the old way, also just return. if (fromKeytab) { return checkPrincipalMatch(conf, user.getUGI().getUserName()) ? user : loginFromKeytabAndReturnUser(provider); } return user; } else if (fromKeytab) { // Kerberos is on and client specify a keytab and principal, but client doesn't login yet. return loginFromKeytabAndReturnUser(provider); } } return user; }
Example 14
Source File: SecureBulkLoadManager.java From hbase with Apache License 2.0 | 5 votes |
public void start() throws IOException { random = new SecureRandom(); userProvider = UserProvider.instantiate(conf); ugiReferenceCounter = new ConcurrentHashMap<>(); fs = FileSystem.get(conf); baseStagingDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); if (conf.get("hbase.bulkload.staging.dir") != null) { LOG.warn("hbase.bulkload.staging.dir " + " is deprecated. Bulkload staging directory is " + baseStagingDir); } if (!fs.exists(baseStagingDir)) { fs.mkdirs(baseStagingDir, PERM_HIDDEN); } }
Example 15
Source File: TestRSGroupsWithACL.java From hbase with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); // Enable security enableSecurity(conf); // Verify enableSecurity sets up what we require verifyConfiguration(conf); // Enable rsgroup RSGroupUtil.enableRSGroup(conf); TEST_UTIL.startMiniCluster(); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); // create a set of test users SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]); USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]); USER_RO = User.createUserForTesting(conf, "rouser", new String[0]); USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]); USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN }); USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE }); USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ }); USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); master = TEST_UTIL.getHBaseCluster().getMaster(); accessChecker = master.getAccessChecker(); userProvider = UserProvider.instantiate(TEST_UTIL.getConfiguration()); }
Example 16
Source File: HbaseRestLocalCluster.java From hadoop-mini-clusters with Apache License 2.0 | 4 votes |
@Override public void start() throws Exception { VersionInfo.logVersion(); Configuration conf = builder.getHbaseConfiguration(); conf.set("hbase.rest.port", hbaseRestPort.toString()); conf.set("hbase.rest.readonly", (hbaseRestReadOnly == null) ? "true" : hbaseRestReadOnly.toString()); conf.set("hbase.rest.info.port", (hbaseRestInfoPort == null) ? "8085" : hbaseRestInfoPort.toString()); String hbaseRestHost = (this.hbaseRestHost == null) ? "0.0.0.0" : this.hbaseRestHost; Integer hbaseRestThreadMax = (this.hbaseRestThreadMax == null) ? 100 : this.hbaseRestThreadMax; Integer hbaseRestThreadMin = (this.hbaseRestThreadMin == null) ? 2 : this.hbaseRestThreadMin; UserProvider userProvider = UserProvider.instantiate(conf); Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf); FilterHolder authFilter = pair.getFirst(); Class<? extends ServletContainer> containerClass = pair.getSecond(); RESTServlet.getInstance(conf, userProvider); // set up the Jersey servlet container for Jetty ServletHolder sh = new ServletHolder(containerClass); sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", ResourceConfig.class.getCanonicalName()); sh.setInitParameter("com.sun.jersey.config.property.packages", "jetty"); ServletHolder shPojoMap = new ServletHolder(containerClass); Map<String, String> shInitMap = sh.getInitParameters(); for (Map.Entry<String, String> e : shInitMap.entrySet()) { shPojoMap.setInitParameter(e.getKey(), e.getValue()); } shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true"); // set up Jetty and run the embedded server server = new Server(); Connector connector = new SelectChannelConnector(); if (conf.getBoolean(RESTServer.REST_SSL_ENABLED, false)) { SslSelectChannelConnector sslConnector = new SslSelectChannelConnector(); String keystore = conf.get(RESTServer.REST_SSL_KEYSTORE_STORE); String password = HBaseConfiguration.getPassword(conf, RESTServer.REST_SSL_KEYSTORE_PASSWORD, null); String keyPassword = HBaseConfiguration.getPassword(conf, RESTServer.REST_SSL_KEYSTORE_KEYPASSWORD, password); sslConnector.setKeystore(keystore); sslConnector.setPassword(password); sslConnector.setKeyPassword(keyPassword); connector = sslConnector; } connector.setPort(hbaseRestPort); connector.setHost(hbaseRestHost); connector.setHeaderBufferSize(8192); server.addConnector(connector); QueuedThreadPool threadPool = new QueuedThreadPool(hbaseRestThreadMax); threadPool.setMinThreads(hbaseRestThreadMin); server.setThreadPool(threadPool); server.setSendServerVersion(false); server.setSendDateHeader(false); server.setStopAtShutdown(true); // set up context Context context = new Context(server, "/", Context.SESSIONS); context.addServlet(shPojoMap, "/status/cluster"); context.addServlet(sh, "/*"); if (authFilter != null) { context.addFilter(authFilter, "/*", 1); } HttpServerUtil.constrainHttpMethods(context); // Put up info server. int port = (hbaseRestInfoPort == null) ? 8085 : hbaseRestInfoPort; if (port >= 0) { conf.setLong("startcode", System.currentTimeMillis()); String a = hbaseRestHost; infoServer = new InfoServer("rest", a, port, false, conf); infoServer.setAttribute("hbase.conf", conf); infoServer.start(); } // start server server.start(); }
Example 17
Source File: TestThriftHBaseServiceHandler.java From hbase with Apache License 2.0 | 4 votes |
/** * Tests keeping a HBase scanner alive for long periods of time. Each call to getScannerRow() * should reset the ConnectionCache timeout for the scanner's connection. */ @org.junit.Ignore @Test // Flakey. Diasabled by HBASE-24079. Renable with Fails with HBASE-24083. // Caused by: java.util.concurrent.RejectedExecutionException: // Task org.apache.hadoop.hbase.client.ResultBoundedCompletionService$QueueingFuture@e385431 // rejected from java.util.concurrent.ThreadPoolExecutor@ 52b027d[Terminated, pool size = 0, // active threads = 0, queued tasks = 0, completed tasks = 1] // at org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler. // testLongLivedScan(TestThriftHBaseServiceHandler.java:804) public void testLongLivedScan() throws Exception { int numTrials = 6; int trialPause = 1000; int cleanUpInterval = 100; Configuration conf = new Configuration(UTIL.getConfiguration()); // Set the ConnectionCache timeout to trigger halfway through the trials conf.setInt(MAX_IDLETIME, (numTrials / 2) * trialPause); conf.setInt(CLEANUP_INTERVAL, cleanUpInterval); ThriftHBaseServiceHandler handler = new ThriftHBaseServiceHandler(conf, UserProvider.instantiate(conf)); ByteBuffer table = wrap(tableAname); // insert data TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)); List<TColumnValue> columnValues = new ArrayList<>(1); columnValues.add(columnValue); for (int i = 0; i < numTrials; i++) { TPut put = new TPut(wrap(Bytes.toBytes("testScan" + i)), columnValues); handler.put(table, put); } // create scan instance TScan scan = new TScan(); List<TColumn> columns = new ArrayList<>(1); TColumn column = new TColumn(); column.setFamily(familyAname); column.setQualifier(qualifierAname); columns.add(column); scan.setColumns(columns); scan.setStartRow(Bytes.toBytes("testScan")); scan.setStopRow(Bytes.toBytes("testScan\uffff")); // Prevent the scanner from caching results scan.setCaching(1); // get scanner and rows int scanId = handler.openScanner(table, scan); for (int i = 0; i < numTrials; i++) { // Make sure that the Scanner doesn't throw an exception after the ConnectionCache timeout List<TResult> results = handler.getScannerRows(scanId, 1); assertArrayEquals(Bytes.toBytes("testScan" + i), results.get(0).getRow()); Thread.sleep(trialPause); } }
Example 18
Source File: MetricsUserAggregateImpl.java From hbase with Apache License 2.0 | 4 votes |
public MetricsUserAggregateImpl(Configuration conf) { source = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) .getUserAggregate(); userMetricLossyCounting = new LossyCounting<>("userMetrics", conf, source::deregister); this.userProvider = UserProvider.instantiate(conf); }
Example 19
Source File: ThriftServer.java From hbase with Apache License 2.0 | 4 votes |
protected void setupParamters() throws IOException { // login the server principal (if using secure Hadoop) UserProvider userProvider = UserProvider.instantiate(conf); securityEnabled = userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled(); if (securityEnabled) { host = Strings.domainNamePointerToHostName(DNS.getDefaultHost( conf.get(THRIFT_DNS_INTERFACE_KEY, "default"), conf.get(THRIFT_DNS_NAMESERVER_KEY, "default"))); userProvider.login(THRIFT_KEYTAB_FILE_KEY, THRIFT_KERBEROS_PRINCIPAL_KEY, host); // Setup the SPNEGO user for HTTP if configured String spnegoPrincipal = getSpengoPrincipal(conf, host); String spnegoKeytab = getSpnegoKeytab(conf); UserGroupInformation.setConfiguration(conf); // login the SPNEGO principal using UGI to avoid polluting the login user this.httpUGI = UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipal, spnegoKeytab); } this.serviceUGI = userProvider.getCurrent().getUGI(); if (httpUGI == null) { this.httpUGI = serviceUGI; } this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT); this.metrics = createThriftMetrics(conf); this.pauseMonitor = new JvmPauseMonitor(conf, this.metrics.getSource()); this.hbaseServiceHandler = createHandler(conf, userProvider); this.hbaseServiceHandler.initMetrics(metrics); this.processor = createProcessor(); httpEnabled = conf.getBoolean(USE_HTTP_CONF_KEY, false); doAsEnabled = conf.getBoolean(THRIFT_SUPPORT_PROXYUSER_KEY, false); if (doAsEnabled && !httpEnabled) { LOG.warn("Fail to enable the doAs feature. " + USE_HTTP_CONF_KEY + " is not configured"); } String strQop = conf.get(THRIFT_QOP_KEY); if (strQop != null) { this.qop = SaslUtil.getQop(strQop); } if (qop != null) { if (qop != SaslUtil.QualityOfProtection.AUTHENTICATION && qop != SaslUtil.QualityOfProtection.INTEGRITY && qop != SaslUtil.QualityOfProtection.PRIVACY) { throw new IOException(String.format("Invalid %s: It must be one of %s, %s, or %s.", THRIFT_QOP_KEY, SaslUtil.QualityOfProtection.AUTHENTICATION.name(), SaslUtil.QualityOfProtection.INTEGRITY.name(), SaslUtil.QualityOfProtection.PRIVACY.name())); } checkHttpSecurity(qop, conf); if (!securityEnabled) { throw new IOException("Thrift server must run in secure mode to support authentication"); } } registerFilters(conf); pauseMonitor.start(); }
Example 20
Source File: RangerAuthorizationCoprocessor.java From ranger with Apache License 2.0 | 4 votes |
@Override public void start(CoprocessorEnvironment env) throws IOException { String appType = "unknown"; shouldCheckExecPermission = env.getConfiguration().getBoolean( AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS); if (env instanceof MasterCoprocessorEnvironment) { coprocessorType = MASTER_COPROCESSOR_TYPE; appType = "hbaseMaster"; } else if (env instanceof RegionServerCoprocessorEnvironment) { coprocessorType = REGIONAL_SERVER_COPROCESSOR_TYPE; appType = "hbaseRegional"; } else if (env instanceof RegionCoprocessorEnvironment) { regionEnv = (RegionCoprocessorEnvironment) env; coprocessorType = REGIONAL_COPROCESSOR_TYPE; appType = "hbaseRegional"; } this.userProvider = UserProvider.instantiate(env.getConfiguration()); Configuration conf = env.getConfiguration(); HbaseFactory.initialize(conf); // create and initialize the plugin class RangerHBasePlugin plugin = hbasePlugin; if(plugin == null) { synchronized(RangerAuthorizationCoprocessor.class) { plugin = hbasePlugin; if(plugin == null) { plugin = new RangerHBasePlugin(appType); plugin.init(); UpdateRangerPoliciesOnGrantRevoke = plugin.getConfig().getBoolean(RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_PROP, RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_DEFAULT_VALUE); hbasePlugin = plugin; } } } if (LOG.isDebugEnabled()) { LOG.debug("Start of Coprocessor: [" + coprocessorType + "]"); } }