org.apache.hadoop.hbase.security.UserProvider Java Examples

The following examples show how to use org.apache.hadoop.hbase.security.UserProvider. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSecureBulkLoadHFiles.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());
  util.getConfiguration().setInt(BulkLoadHFiles.MAX_FILES_PER_REGION_PER_FAMILY,
    MAX_FILES_PER_REGION_PER_FAMILY);
  // change default behavior so that tag values are returned with normal rpcs
  util.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
    KeyValueCodecWithTags.class.getCanonicalName());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);

  setupNamespace();
}
 
Example #2
Source File: TestMobSecureExportSnapshot.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  setUpBaseConf(TEST_UTIL.getConfiguration());
  // Setup separate test-data directory for MR cluster and set corresponding configurations.
  // Otherwise, different test classes running MR cluster can step on each other.
  TEST_UTIL.getDataTestDir();

  // set the always on security provider
  UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);

  // setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster(3);
  TEST_UTIL.startMiniMapReduceCluster();

  // Wait for the ACL table to become available
  TEST_UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);
}
 
Example #3
Source File: TestSecureExportSnapshot.java    From hbase with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  setUpBaseConf(TEST_UTIL.getConfiguration());
  // Setup separate test-data directory for MR cluster and set corresponding configurations.
  // Otherwise, different test classes running MR cluster can step on each other.
  TEST_UTIL.getDataTestDir();

  // set the always on security provider
  UserProvider.setUserProviderForTesting(TEST_UTIL.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);

  // setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster(3);
  TEST_UTIL.startMiniMapReduceCluster();

  // Wait for the ACL table to become available
  TEST_UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);
}
 
Example #4
Source File: TestThriftServer.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Check that checkAndPut fails if the cell does not exist, then put in the cell, then check that
 * the checkAndPut succeeds.
 */
public static void doTestCheckAndPut() throws Exception {
  ThriftHBaseServiceHandler handler =
    new ThriftHBaseServiceHandler(UTIL.getConfiguration(),
      UserProvider.instantiate(UTIL.getConfiguration()));
  handler.createTable(tableAname, getColumnDescriptors());
  try {
    List<Mutation> mutations = new ArrayList<>(1);
    mutations.add(new Mutation(false, columnAname, valueAname, true));
    Mutation putB = (new Mutation(false, columnBname, valueBname, true));

    assertFalse(handler.checkAndPut(tableAname, rowAname, columnAname, valueAname, putB, null));

    handler.mutateRow(tableAname, rowAname, mutations, null);

    assertTrue(handler.checkAndPut(tableAname, rowAname, columnAname, valueAname, putB, null));

    TRowResult rowResult = handler.getRow(tableAname, rowAname, null).get(0);
    assertEquals(rowAname, rowResult.row);
    assertEquals(valueBname, rowResult.columns.get(columnBname).value);
  } finally {
    handler.disableTable(tableAname);
    handler.deleteTable(tableAname);
  }
}
 
Example #5
Source File: TestSecureExport.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Sets the security firstly for getting the correct default realm.
 */
@BeforeClass
public static void beforeClass() throws Exception {
  UserProvider.setUserProviderForTesting(UTIL.getConfiguration(),
      HadoopSecurityEnabledUserProviderForTesting.class);
  setUpKdcServer();
  SecureTestUtil.enableSecurity(UTIL.getConfiguration());
  UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
  VisibilityTestUtil.enableVisiblityLabels(UTIL.getConfiguration());
  SecureTestUtil.verifyConfiguration(UTIL.getConfiguration());
  setUpClusterKdc();
  UTIL.startMiniCluster();
  UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME);
  UTIL.waitUntilAllRegionsAssigned(VisibilityConstants.LABELS_TABLE_NAME);
  UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME, 50000);
  UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME, 50000);
  SecureTestUtil.grantGlobal(UTIL, USER_ADMIN,
          Permission.Action.ADMIN,
          Permission.Action.CREATE,
          Permission.Action.EXEC,
          Permission.Action.READ,
          Permission.Action.WRITE);
  addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER),
          Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET));
}
 
Example #6
Source File: TableMapReduceUtil.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * @param job The job that requires the permission.
 * @param conf The configuration to use in connecting to the peer cluster
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, Configuration conf)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Connection peerConn = ConnectionFactory.createConnection(conf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
 
Example #7
Source File: SnapshotScannerHDFSAclController.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> c)
    throws IOException {
  if (c.getEnvironment().getConfiguration()
      .getBoolean(SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE, false)) {
    MasterCoprocessorEnvironment mEnv = c.getEnvironment();
    if (!(mEnv instanceof HasMasterServices)) {
      throw new IOException("Does not implement HMasterServices");
    }
    masterServices = ((HasMasterServices) mEnv).getMasterServices();
    hdfsAclHelper = new SnapshotScannerHDFSAclHelper(masterServices.getConfiguration(),
        masterServices.getConnection());
    pathHelper = hdfsAclHelper.getPathHelper();
    hdfsAclHelper.setCommonDirectoryPermission();
    initialized = true;
    userProvider = UserProvider.instantiate(c.getEnvironment().getConfiguration());
  } else {
    LOG.warn("Try to initialize the coprocessor SnapshotScannerHDFSAclController but failure "
        + "because the config " + SnapshotScannerHDFSAclHelper.ACL_SYNC_TO_HDFS_ENABLE
        + " is false.");
  }
}
 
Example #8
Source File: HBaseFsck.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void preCheckPermission() throws IOException {
  if (shouldIgnorePreCheckPermission()) {
    return;
  }

  Path hbaseDir = CommonFSUtils.getRootDir(getConf());
  FileSystem fs = hbaseDir.getFileSystem(getConf());
  UserProvider userProvider = UserProvider.instantiate(getConf());
  UserGroupInformation ugi = userProvider.getCurrent().getUGI();
  FileStatus[] files = fs.listStatus(hbaseDir);
  for (FileStatus file : files) {
    try {
      fs.access(file.getPath(), FsAction.WRITE);
    } catch (AccessControlException ace) {
      LOG.warn("Got AccessDeniedException when preCheckPermission ", ace);
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + ugi.getUserName()
        + " does not have write perms to " + file.getPath()
        + ". Please rerun hbck as hdfs user " + file.getOwner());
      throw ace;
    }
  }
}
 
Example #9
Source File: TableMapReduceUtil.java    From hbase with Apache License 2.0 6 votes vote down vote up
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      LOG.error("Interrupted obtaining user authentication token", ie);
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
 
Example #10
Source File: RESTServlet.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Constructor with existing configuration
 * @param conf existing configuration
 * @param userProvider the login user provider
 * @throws IOException
 */
RESTServlet(final Configuration conf,
    final UserProvider userProvider) throws IOException {
  this.realUser = userProvider.getCurrent().getUGI();
  this.conf = conf;
  registerCustomFilter(conf);

  int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
  int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
  connectionCache = new ConnectionCache(
    conf, userProvider, cleanInterval, maxIdleTime);
  if (supportsProxyuser()) {
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }

  metrics = new MetricsREST();

  pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource());
  pauseMonitor.start();
}
 
Example #11
Source File: TestConnectionCache.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * test for ConnectionCache cleaning expired Connection
 */
@Test
public void testConnectionChore() throws Exception {
  UTIL.startMiniCluster();

  //1s for clean interval & 5s for maxIdleTime
  ConnectionCache cache = new ConnectionCache(UTIL.getConfiguration(),
      UserProvider.instantiate(UTIL.getConfiguration()), 1000, 5000);
  ConnectionCache.ConnectionInfo info = cache.getCurrentConnection();

  assertEquals(false, info.connection.isClosed());

  Thread.sleep(7000);

  assertEquals(true, info.connection.isClosed());
  UTIL.shutdownMiniCluster();
}
 
Example #12
Source File: TestGetAndPutResource.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Test
public void testMetrics() throws IOException {
  final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
  Response response = client.put(path, Constants.MIMETYPE_BINARY,
      Bytes.toBytes(VALUE_4));
  assertEquals(200, response.getCode());
  Thread.yield();
  response = client.get(path, Constants.MIMETYPE_JSON);
  assertEquals(200, response.getCode());
  assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type"));
  response = deleteRow(TABLE, ROW_4);
  assertEquals(200, response.getCode());

  UserProvider userProvider = UserProvider.instantiate(conf);
  METRICS_ASSERT.assertCounterGt("requests", 2L,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());

  METRICS_ASSERT.assertCounterGt("successfulGet", 0L,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());

  METRICS_ASSERT.assertCounterGt("successfulPut", 0L,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());

  METRICS_ASSERT.assertCounterGt("successfulDelete", 0L,
    RESTServlet.getInstance(conf, userProvider).getMetrics().getSource());
}
 
Example #13
Source File: HBaseTablespace.java    From tajo with Apache License 2.0 6 votes vote down vote up
HConnectionKey(Configuration conf) {
  Map<String, String> m = new HashMap<>();
  if (conf != null) {
    for (String property : CONNECTION_PROPERTIES) {
      String value = conf.get(property);
      if (value != null) {
        m.put(property, value);
      }
    }
  }
  this.properties = Collections.unmodifiableMap(m);

  try {
    UserProvider provider = UserProvider.instantiate(conf);
    User currentUser = provider.getCurrent();
    if (currentUser != null) {
      username = currentUser.getName();
    }
  } catch (IOException ioe) {
    LOG.warn("Error obtaining current user, skipping username in HConnectionKey", ioe);
  }
}
 
Example #14
Source File: HBaseSecurityUtil.java    From storm-hbase with Apache License 2.0 6 votes vote down vote up
public static UserProvider login(Map conf, Configuration hbaseConfig) throws IOException {
    UserProvider provider = UserProvider.instantiate(hbaseConfig);
    if (UserGroupInformation.isSecurityEnabled()) {
        String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY);
        if (keytab != null) {
            hbaseConfig.set(STORM_KEYTAB_FILE_KEY, keytab);
        }
        String userName = (String) conf.get(STORM_USER_NAME_KEY);
        if (userName != null) {
            hbaseConfig.set(STORM_USER_NAME_KEY, userName);
        }
        provider.login(STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY, 
            InetAddress.getLocalHost().getCanonicalHostName());
    }
    return provider;
}
 
Example #15
Source File: RESTServlet.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * @param conf Existing configuration to use in rest servlet
 * @param userProvider the login user provider
 * @return the RESTServlet singleton instance
 * @throws IOException
 */
public synchronized static RESTServlet getInstance(Configuration conf,
    UserProvider userProvider) throws IOException {
  if (INSTANCE == null) {
    INSTANCE = new RESTServlet(conf, userProvider);
  }
  return INSTANCE;
}
 
Example #16
Source File: TestThriftHBaseServiceHandler.java    From hbase with Apache License 2.0 5 votes vote down vote up
private ThriftHBaseServiceHandler createHandler() throws TException {
  try {
    Configuration conf = UTIL.getConfiguration();
    return new ThriftHBaseServiceHandler(conf, UserProvider.instantiate(conf));
  } catch (IOException ie) {
    throw new TException(ie);
  }
}
 
Example #17
Source File: PhoenixAccessController.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public void start(CoprocessorEnvironment env) throws IOException {
    Configuration conf = env.getConfiguration();
    this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
            QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
        if (!this.accessCheckEnabled) {
            LOGGER.warn(
                    "PhoenixAccessController has been loaded with authorization checks disabled.");
        }
    this.execPermissionsCheckEnabled = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
            AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
    if (env instanceof PhoenixMetaDataControllerEnvironment) {
        this.env = (PhoenixMetaDataControllerEnvironment)env;
    } else {
        throw new IllegalArgumentException(
                "Not a valid environment, should be loaded by PhoenixMetaDataControllerEnvironment");
    }

    ZKWatcher zk = null;
    RegionCoprocessorEnvironment regionEnv = this.env.getRegionCoprocessorEnvironment();
    if (regionEnv instanceof HasRegionServerServices) {
        zk = ((HasRegionServerServices) regionEnv).getRegionServerServices().getZooKeeper();
    }
    accessChecker = new AccessChecker(env.getConfiguration(), zk);
    // set the user-provider.
    this.userProvider = UserProvider.instantiate(env.getConfiguration());
    // init superusers and add the server principal (if using security)
    // or process owner as default super user.
    Superusers.initialize(env.getConfiguration());
}
 
Example #18
Source File: Export.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void start(CoprocessorEnvironment environment) throws IOException {
  if (environment instanceof RegionCoprocessorEnvironment) {
    env = (RegionCoprocessorEnvironment) environment;
    userProvider = UserProvider.instantiate(env.getConfiguration());
  } else {
    throw new CoprocessorException("Must be loaded on a table region!");
  }
}
 
Example #19
Source File: Export.java    From hbase with Apache License 2.0 5 votes vote down vote up
SecureWriter(final Configuration conf, final UserProvider userProvider,
    final Token userToken, final List<SequenceFile.Writer.Option> opts)
    throws IOException {
  User user = getActiveUser(userProvider, userToken);
  try {
    SequenceFile.Writer sequenceFileWriter =
        user.runAs((PrivilegedExceptionAction<SequenceFile.Writer>) () ->
            SequenceFile.createWriter(conf,
                opts.toArray(new SequenceFile.Writer.Option[opts.size()])));
    privilegedWriter = new PrivilegedWriter(user, sequenceFileWriter);
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
Example #20
Source File: TestSecureBulkLoadHFilesSplitRecovery.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {
  util = new HBaseTestingUtility();
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME);
}
 
Example #21
Source File: Export.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static User getActiveUser(final UserProvider userProvider, final Token userToken)
    throws IOException {
  User user = RpcServer.getRequestUser().orElse(userProvider.getCurrent());
  if (user == null && userToken != null) {
    LOG.warn("No found of user credentials, but a token was got from user request");
  } else if (user != null && userToken != null) {
    user.addToken(userToken);
  }
  return user;
}
 
Example #22
Source File: HFileReplicator.java    From hbase with Apache License 2.0 5 votes vote down vote up
public HFileReplicator(Configuration sourceClusterConf,
    String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath,
    Map<String, List<Pair<byte[], List<String>>>> tableQueueMap, Configuration conf,
    AsyncClusterConnection connection, List<String> sourceClusterIds) throws IOException {
  this.sourceClusterConf = sourceClusterConf;
  this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath;
  this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath;
  this.bulkLoadHFileMap = tableQueueMap;
  this.conf = conf;
  this.connection = connection;
  this.sourceClusterIds = sourceClusterIds;

  userProvider = UserProvider.instantiate(conf);
  fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
  this.hbaseStagingDir =
    new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME);
  this.maxCopyThreads =
      this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY,
        REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT);
  this.exec = Threads.getBoundedCachedThreadPool(maxCopyThreads, 60, TimeUnit.SECONDS,
      new ThreadFactoryBuilder().setDaemon(true)
          .setNameFormat("HFileReplicationCopier-%1$d-" + this.sourceBaseNamespaceDirPath).
        build());
  this.copiesPerThread =
      conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY,
        REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT);

  sinkFs = FileSystem.get(conf);
}
 
Example #23
Source File: ReplicationSink.java    From hbase with Apache License 2.0 5 votes vote down vote up
private AsyncClusterConnection getConnection() throws IOException {
  // See https://en.wikipedia.org/wiki/Double-checked_locking
  AsyncClusterConnection connection = sharedConn;
  if (connection == null) {
    synchronized (sharedConnLock) {
      connection = sharedConn;
      if (connection == null) {
        connection = ClusterConnectionFactory.createAsyncClusterConnection(conf, null,
          UserProvider.instantiate(conf).getCurrent());
        sharedConn = connection;
      }
    }
  }
  return connection;
}
 
Example #24
Source File: RpcServer.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Constructs a server listening on the named port and address.
 * @param server hosting instance of {@link Server}. We will do authentications if an
 * instance else pass null for no authentication check.
 * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
 * @param services A list of services.
 * @param bindAddress Where to listen
 * @param conf
 * @param scheduler
 * @param reservoirEnabled Enable ByteBufferPool or not.
 */
public RpcServer(final Server server, final String name,
    final List<BlockingServiceAndInterface> services,
    final InetSocketAddress bindAddress, Configuration conf,
    RpcScheduler scheduler, boolean reservoirEnabled) throws IOException {
  this.bbAllocator = ByteBuffAllocator.create(conf, reservoirEnabled);
  this.server = server;
  this.services = services;
  this.bindAddress = bindAddress;
  this.conf = conf;
  // See declaration above for documentation on what this size is.
  this.maxQueueSizeInBytes =
    this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);

  this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);
  this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE);
  this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT,
      DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT);
  this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE);

  this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this));
  this.tcpNoDelay = conf.getBoolean("hbase.ipc.server.tcpnodelay", true);
  this.tcpKeepAlive = conf.getBoolean("hbase.ipc.server.tcpkeepalive", true);

  this.cellBlockBuilder = new CellBlockBuilder(conf);

  this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false);
  this.userProvider = UserProvider.instantiate(conf);
  this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled();
  if (isSecurityEnabled) {
    saslProps = SaslUtil.initSaslProperties(conf.get("hbase.rpc.protection",
      QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)));
  } else {
    saslProps = Collections.emptyMap();
  }

  this.scheduler = scheduler;
}
 
Example #25
Source File: TableMapReduceUtil.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void initCredentials(Job job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.getConfiguration().set("mapreduce.job.credentials.binary",
                                 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // init credentials for remote cluster
      String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
      User user = userProvider.getCurrent();
      if (quorumAddress != null) {
        Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
            quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
        Connection peerConn = ConnectionFactory.createConnection(peerConf);
        try {
          TokenUtil.addTokenForJob(peerConn, user, job);
        } finally {
          peerConn.close();
        }
      }

      Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
      try {
        TokenUtil.addTokenForJob(conn, user, job);
      } finally {
        conn.close();
      }
    } catch (InterruptedException ie) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.currentThread().interrupt();
    }
  }
}
 
Example #26
Source File: AccessChecker.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void initGroupService(Configuration conf) {
  if (groupService == null) {
    if (conf.getBoolean(User.TestingGroups.TEST_CONF, false)) {
      UserProvider.setGroups(new User.TestingGroups(UserProvider.getGroups()));
      groupService = UserProvider.getGroups();
    } else {
      groupService = Groups.getUserToGroupsMappingService(conf);
    }
  }
}
 
Example #27
Source File: SecureBulkLoadManager.java    From hbase with Apache License 2.0 5 votes vote down vote up
public void start() throws IOException {
  random = new SecureRandom();
  userProvider = UserProvider.instantiate(conf);
  ugiReferenceCounter = new ConcurrentHashMap<>();
  fs = FileSystem.get(conf);
  baseStagingDir = new Path(CommonFSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME);

  if (conf.get("hbase.bulkload.staging.dir") != null) {
    LOG.warn("hbase.bulkload.staging.dir " + " is deprecated. Bulkload staging directory is "
        + baseStagingDir);
  }
  if (!fs.exists(baseStagingDir)) {
    fs.mkdirs(baseStagingDir, PERM_HIDDEN);
  }
}
 
Example #28
Source File: BulkLoadHFilesTool.java    From hbase with Apache License 2.0 5 votes vote down vote up
public BulkLoadHFilesTool(Configuration conf) {
  // make a copy, just to be sure we're not overriding someone else's config
  super(new Configuration(conf));
  // disable blockcache for tool invocation, see HBASE-10500
  conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
  userProvider = UserProvider.instantiate(conf);
  fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
  assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
  maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
  nrThreads = conf.getInt("hbase.loadincremental.threads.max",
    Runtime.getRuntime().availableProcessors());
  bulkLoadByFamily = conf.getBoolean(BULK_LOAD_HFILES_BY_FAMILY, false);
}
 
Example #29
Source File: HMaster.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * For compatibility, if failed with regionserver credentials, try the master one
 */
@Override
protected void login(UserProvider user, String host) throws IOException {
  try {
    super.login(user, host);
  } catch (IOException ie) {
    user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE,
            SecurityConstants.MASTER_KRB_PRINCIPAL, host);
  }
}
 
Example #30
Source File: TestRSGroupsWithACL.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // setup configuration
  conf = TEST_UTIL.getConfiguration();
  // Enable security
  enableSecurity(conf);
  // Verify enableSecurity sets up what we require
  verifyConfiguration(conf);
  // Enable rsgroup
  RSGroupUtil.enableRSGroup(conf);

  TEST_UTIL.startMiniCluster();
  // Wait for the ACL table to become available
  TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME);

  // create a set of test users
  SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
  USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]);
  USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
  USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
  USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
  USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]);
  USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]);

  USER_GROUP_ADMIN =
      User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN });
  USER_GROUP_CREATE =
      User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE });
  USER_GROUP_READ =
      User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ });
  USER_GROUP_WRITE =
      User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE });

  systemUserConnection = TEST_UTIL.getConnection();
  setUpTableAndUserPermissions();
  master = TEST_UTIL.getHBaseCluster().getMaster();
  accessChecker = master.getAccessChecker();
  userProvider = UserProvider.instantiate(TEST_UTIL.getConfiguration());
}