Java Code Examples for org.apache.hadoop.security.UserGroupInformation#setConfiguration()
The following examples show how to use
org.apache.hadoop.security.UserGroupInformation#setConfiguration() .
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Transwarp-Sample-Code File: Delete.java License: MIT License | 6 votes |
public static void main(String[] args) { String rootPath = "hdfs://nameservice1"; Path p = new Path(rootPath + "/tmp/file.txt"); Configuration conf = new Configuration(); conf.addResource("core-site.xml"); conf.addResource("hdfs-site.xml"); conf.addResource("yarn-site.xml"); try { // 没开kerberos,注释下面两行 UserGroupInformation.setConfiguration(conf); UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab"); FileSystem fs = p.getFileSystem(conf); boolean b = fs.delete(p, true); System.out.println(b); fs.close(); } catch (IOException e) { e.printStackTrace(); } }
Example 2
Source Project: hbase File: TestUsersOperationsWithSecureHadoop.java License: Apache License 2.0 | 6 votes |
@Test public void testLoginWithUserKeytabAndPrincipal() throws Exception { String clientKeytab = getClientKeytabForTesting(); String clientPrincipal = getClientPrincipalForTesting(); assertNotNull("Path for client keytab is not specified.", clientKeytab); assertNotNull("Client principal is not specified.", clientPrincipal); Configuration conf = getSecuredConfiguration(); conf.set(AuthUtil.HBASE_CLIENT_KEYTAB_FILE, clientKeytab); conf.set(AuthUtil.HBASE_CLIENT_KERBEROS_PRINCIPAL, clientPrincipal); UserGroupInformation.setConfiguration(conf); UserProvider provider = UserProvider.instantiate(conf); assertTrue("Client principal or keytab is empty", provider.shouldLoginFromKeytab()); provider.login(AuthUtil.HBASE_CLIENT_KEYTAB_FILE, AuthUtil.HBASE_CLIENT_KERBEROS_PRINCIPAL); User loginUser = provider.getCurrent(); assertEquals(CLIENT_NAME, loginUser.getShortName()); assertEquals(getClientPrincipalForTesting(), loginUser.getName()); }
Example 3
Source Project: dk-fitting File: HdfsUtils.java License: Apache License 2.0 | 6 votes |
public static FileSystem getFs(String krb5_conf, String principal, String keytab) throws Exception { if (fs != null) { return fs; } else { System.out.println("hdfs_site:" + Prop.getProperty("datasource.hdfs_xml_path")); System.out.println("core_site:" + Prop.getProperty("datasource.core_xml_path")); conf.addResource(new Path(Prop.getProperty("datasource.hdfs_xml_path"))); conf.addResource(new Path(Prop.getProperty("datasource.core_xml_path"))); conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER"); conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true"); //conf.setBoolean("fs.hdfs.impl.disable.cache", true); if (StringUtils.isNotBlank(krb5_conf) && StringUtils.isNotBlank(principal) && StringUtils.isNotBlank(keytab)) { System.setProperty("java.security.krb5.conf", krb5_conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation.loginUserFromKeytab(principal, keytab); UserGroupInformation.getLoginUser(); } fs = FileSystem.get(conf); return fs; } }
Example 4
Source Project: datacollector File: DefaultLoginUgiProvider.java License: Apache License 2.0 | 6 votes |
@Override public UserGroupInformation getLoginUgi(Configuration hdfsConfiguration) throws IOException { AccessControlContext accessContext = AccessController.getContext(); Subject subject = Subject.getSubject(accessContext); UserGroupInformation loginUgi; //HADOOP-13805 HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(hdfsConfiguration); UserGroupInformation.setConfiguration(hdfsConfiguration); if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } if (LOG.isDebugEnabled()) { LOG.debug( "Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi ); } return loginUgi; }
Example 5
Source Project: hadoop File: TestRMHA.java License: Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { configuration = new Configuration(); UserGroupInformation.setConfiguration(configuration); configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID); for (String confKey : YarnConfiguration .getServiceAddressConfKeys(configuration)) { configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS); configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS); configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS); } // Enable webapp to test web-services also configuration.setBoolean(MockRM.ENABLE_WEBAPP, true); configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); ClusterMetrics.destroy(); QueueMetrics.clearQueueMetrics(); DefaultMetricsSystem.shutdown(); }
Example 6
Source Project: nifi File: Kerberos.java License: Apache License 2.0 | 5 votes |
@Override public AtlasClientV2 createClient(String[] baseUrls) { final Configuration hadoopConf = new Configuration(); hadoopConf.set("hadoop.security.authentication", "kerberos"); UserGroupInformation.setConfiguration(hadoopConf); final UserGroupInformation ugi; try { UserGroupInformation.loginUserFromKeytab(principal, keytab); ugi = UserGroupInformation.getCurrentUser(); } catch (IOException e) { throw new RuntimeException("Failed to login with Kerberos due to: " + e, e); } return new AtlasClientV2(ugi, null, baseUrls); }
Example 7
Source Project: celos File: CelosCiContext.java License: Apache License 2.0 | 5 votes |
private Configuration setupConfiguration(String username, CelosCiTarget target) throws Exception { JScpWorker jscpWorker = new JScpWorker(username); Configuration conf = new Configuration(); conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName()); conf.addResource(jscpWorker.getFileObjectByUri(target.getPathToHdfsSite()).getContent().getInputStream()); conf.addResource(jscpWorker.getFileObjectByUri(target.getPathToCoreSite()).getContent().getInputStream()); UserGroupInformation.setConfiguration(conf); return conf; }
Example 8
Source Project: sqoop-on-spark File: KerberosAuthenticationHandler.java License: Apache License 2.0 | 5 votes |
public void secureLogin() { MapContext mapContext = SqoopConfiguration.getInstance().getContext(); String keytab = mapContext.getString( SecurityConstants.AUTHENTICATION_KERBEROS_KEYTAB).trim(); if (keytab.length() == 0) { throw new SqoopException(SecurityError.AUTH_0001, SecurityConstants.AUTHENTICATION_KERBEROS_KEYTAB); } keytabFile = keytab; String principal = mapContext.getString( SecurityConstants.AUTHENTICATION_KERBEROS_PRINCIPAL).trim(); if (principal.length() == 0) { throw new SqoopException(SecurityError.AUTH_0002, SecurityConstants.AUTHENTICATION_KERBEROS_PRINCIPAL); } keytabPrincipal = principal; Configuration conf = new Configuration(); conf.set(get_hadoop_security_authentication(), SecurityConstants.TYPE.KERBEROS.name()); UserGroupInformation.setConfiguration(conf); try { String hostPrincipal = SecurityUtil.getServerPrincipal(principal, "0.0.0.0"); UserGroupInformation.loginUserFromKeytab(hostPrincipal, keytab); } catch (IOException ex) { throw new SqoopException(SecurityError.AUTH_0003, ex); } LOG.info("Using Kerberos authentication, principal [" + principal + "] keytab [" + keytab + "]"); }
Example 9
Source Project: sqoop-on-spark File: SimpleAuthenticationHandler.java License: Apache License 2.0 | 5 votes |
public void secureLogin() { //no secureLogin, just set configurations Configuration conf = new Configuration(); conf.set(get_hadoop_security_authentication(), SecurityConstants.TYPE.SIMPLE.name()); UserGroupInformation.setConfiguration(conf); LOG.info("Using simple/pseudo authentication, principal [" + System.getProperty("user.name") + "]"); }
Example 10
Source Project: griffin File: HiveMetaStoreServiceJdbcImpl.java License: Apache License 2.0 | 5 votes |
@PostConstruct public void init() { if (needKerberos != null && needKerberos.equalsIgnoreCase("true")) { LOGGER.info("Hive need Kerberos Auth."); Configuration conf = new Configuration(); conf.set("hadoop.security.authentication", "Kerberos"); UserGroupInformation.setConfiguration(conf); try { UserGroupInformation.loginUserFromKeytab(keytabUser, keytabPath); } catch (IOException e) { LOGGER.error("Register Kerberos has error. {}", e.getMessage()); } } }
Example 11
Source Project: hadoop File: TestApplicationCleanup.java License: Apache License 2.0 | 5 votes |
@Before public void setup() throws UnknownHostException { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); conf.set(YarnConfiguration.RECOVERY_ENABLED, "true"); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1); }
Example 12
Source Project: zeppelin File: JDBCSecurityImpl.java License: Apache License 2.0 | 5 votes |
/*** * @param properties */ public static void createSecureConfiguration(Properties properties, AuthenticationMethod authType) { switch (authType) { case KERBEROS: Configuration conf = new org.apache.hadoop.conf.Configuration(); conf.set("hadoop.security.authentication", KERBEROS.toString()); UserGroupInformation.setConfiguration(conf); try { // Check TGT before calling login // Ref: https://github.com/apache/hadoop/blob/release-3.0.1-RC1/hadoop-common-project/ // hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java#L1232 if (!UserGroupInformation.isSecurityEnabled() || UserGroupInformation.getCurrentUser().getAuthenticationMethod() != KERBEROS || !UserGroupInformation.isLoginKeytabBased()) { UserGroupInformation.loginUserFromKeytab( properties.getProperty("zeppelin.jdbc.principal"), properties.getProperty("zeppelin.jdbc.keytab.location")); } else { LOGGER.info("The user has already logged in using Keytab and principal, " + "no action required"); } } catch (IOException e) { LOGGER.error("Failed to get either keytab location or principal name in the " + "interpreter", e); } } }
Example 13
Source Project: hadoop File: TestRMDelegationTokens.java License: Apache License 2.0 | 5 votes |
@Before public void setup() { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); ExitUtil.disableSystemExit(); conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName()); }
Example 14
Source Project: hadoop File: TestRMRestart.java License: Apache License 2.0 | 5 votes |
@Before public void setup() throws IOException { conf = getConf(); Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); UserGroupInformation.setConfiguration(conf); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); rmAddr = new InetSocketAddress("localhost", 8032); Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1); }
Example 15
Source Project: big-c File: TestRMRestart.java License: Apache License 2.0 | 5 votes |
@Before public void setup() throws IOException { conf = getConf(); Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); UserGroupInformation.setConfiguration(conf); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); rmAddr = new InetSocketAddress("localhost", 8032); Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1); }
Example 16
Source Project: hadoop File: TestWebDelegationToken.java License: Apache License 2.0 | 5 votes |
@After public void cleanUp() throws Exception { jetty.stop(); // resetting hadoop security to simple org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration(); UserGroupInformation.setConfiguration(conf); }
Example 17
Source Project: hadoop File: TestTaskAttemptContainerRequest.java License: Apache License 2.0 | 4 votes |
@Test public void testAttemptContainerRequest() throws Exception { final Text SECRET_KEY_ALIAS = new Text("secretkeyalias"); final byte[] SECRET_KEY = ("secretkey").getBytes(); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(1); acls.put(ApplicationAccessType.VIEW_APP, "otheruser"); ApplicationId appId = ApplicationId.newInstance(1, 1); JobId jobId = MRBuilderUtils.newJobId(appId, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); Path jobFile = mock(Path.class); EventHandler eventHandler = mock(EventHandler.class); TaskAttemptListener taListener = mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0)); JobConf jobConf = new JobConf(); jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache", true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, ""); // setup UGI for security so tokens and keys are preserved jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(jobConf); Credentials credentials = new Credentials(); credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY); Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>( ("tokenid").getBytes(), ("tokenpw").getBytes(), new Text("tokenkind"), new Text("tokenservice")); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, mock(TaskSplitMetaInfo.class), jobConf, taListener, jobToken, credentials, new SystemClock(), null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString()); ContainerLaunchContext launchCtx = TaskAttemptImpl.createContainerLaunchContext(acls, jobConf, jobToken, taImpl.createRemoteTask(), TypeConverter.fromYarn(jobId), mock(WrappedJvmID.class), taListener, credentials); Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs()); Credentials launchCredentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); dibb.reset(launchCtx.getTokens()); launchCredentials.readTokenStorageStream(dibb); // verify all tokens specified for the task attempt are in the launch context for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) { Token<? extends TokenIdentifier> launchToken = launchCredentials.getToken(token.getService()); Assert.assertNotNull("Token " + token.getService() + " is missing", launchToken); Assert.assertEquals("Token " + token.getService() + " mismatch", token, launchToken); } // verify the secret key is in the launch context Assert.assertNotNull("Secret key missing", launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY, launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); }
Example 18
Source Project: big-c File: TestDFSUtil.java License: Apache License 2.0 | 4 votes |
/** * Reset to default UGI settings since some tests change them. */ @Before public void resetUGI() { UserGroupInformation.setConfiguration(new Configuration()); }
Example 19
Source Project: big-c File: SecondaryNameNode.java License: Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(final Configuration conf, CommandLineOpts commandLineOpts) throws IOException { final InetSocketAddress infoSocAddr = getHttpAddress(conf); final String infoBindAddress = infoSocAddr.getHostName(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { SecurityUtil.login(conf, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress); } // initiate Java VM metrics DefaultMetricsSystem.initialize("SecondaryNameNode"); JvmMetrics.create("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getServiceAddress(conf, true); this.conf = conf; this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs); checkpointImage.recoverCreate(commandLineOpts.shouldFormat()); checkpointImage.deleteTempEdits(); namesystem = new FSNamesystem(conf, checkpointImage, true); // Disable quota checks namesystem.dir.disableQuotaChecks(); // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); final InetSocketAddress httpAddr = infoSocAddr; final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "secondary", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); nameNodeStatusBeanName = MBeans.register("SecondaryNameNode", "SecondaryNameNodeInfo", this); infoServer = builder.build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, ImageServlet.class, true); infoServer.start(); LOG.info("Web server init done"); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); int connIdx = 0; if (policy.isHttpEnabled()) { InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NetUtils.getHostPortString(httpAddress)); } if (policy.isHttpsEnabled()) { InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, NetUtils.getHostPortString(httpsAddress)); } legacyOivImageDir = conf.get( DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + "(" + checkpointConf.getPeriod() / 60 + " min)"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); }
Example 20
Source Project: hbase File: TestAccessController.java License: Apache License 2.0 | 4 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); // Up the handlers; this test needs more than usual. conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, MyShellBasedUnixGroupsMapping.class.getName()); UserGroupInformation.setConfiguration(conf); // Enable security enableSecurity(conf); // In this particular test case, we can't use SecureBulkLoadEndpoint because its doAs will fail // to move a file for a random user conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName()); // Verify enableSecurity sets up what we require verifyConfiguration(conf); // Enable EXEC permission checking conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); TEST_UTIL.startMiniCluster(); MasterCoprocessorHost masterCpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); masterCpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = masterCpHost.findCoprocessor(AccessController.class); CP_ENV = masterCpHost.createEnvironment( ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsCpHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); RSCP_ENV = rsCpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME); // create a set of test users SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]); USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]); USER_RO = User.createUserForTesting(conf, "rouser", new String[0]); USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]); USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); USER_ADMIN_CF = User.createUserForTesting(conf, "col_family_admin", new String[0]); USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN }); USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE }); USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ }); USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); }