Java Code Examples for org.apache.hadoop.hive.metastore.HiveMetaStoreClient#close()

The following examples show how to use org.apache.hadoop.hive.metastore.HiveMetaStoreClient#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 6 votes vote down vote up
@Test
public void dropTable() throws Exception {
  HiveServer2Core server = setupServer();
  String tableName = "my_table";
  createUnpartitionedTable(DATABASE, tableName, server);

  try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl());
      Statement statement = connection.createStatement()) {
    String dropHql = String.format("DROP TABLE %s.%s", DATABASE, tableName);
    statement.execute(dropHql);
  }

  HiveMetaStoreClient client = server.getCore().newClient();
  try {
    client.getTable(DATABASE, tableName);
    fail(String.format("Table %s.%s was not deleted", DATABASE, tableName));
  } catch (NoSuchObjectException e) {
    // expected
  } finally {
    client.close();
  }
  server.shutdown();
}
 
Example 2
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 6 votes vote down vote up
@Test
public void dropDatabase() throws Exception {
  HiveServer2Core server = setupServer();
  String databaseName = "Another_DB";

  server.getCore().createDatabase(databaseName);
  try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl());
      Statement statement = connection.createStatement()) {
    String dropHql = String.format("DROP DATABASE %s", databaseName);
    statement.execute(dropHql);
  }

  HiveMetaStoreClient client = server.getCore().newClient();
  try {
    client.getDatabase(databaseName);
    fail(String.format("Database %s was not deleted", databaseName));
  } catch (NoSuchObjectException e) {
    // expected
  } finally {
    client.close();
  }
  server.shutdown();
}
 
Example 3
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 6 votes vote down vote up
@Test
public void addPartition() throws Exception {
  HiveServer2Core server = setupServer();
  String tableName = "my_table";
  createPartitionedTable(DATABASE, tableName, server);

  try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl());
      Statement statement = connection.createStatement()) {
    String addPartitionHql = String.format("ALTER TABLE %s.%s ADD PARTITION (partcol=1)", DATABASE, tableName);
    statement.execute(addPartitionHql);
  }

  HiveMetaStoreClient client = server.getCore().newClient();
  try {
    List<Partition> partitions = client.listPartitions(DATABASE, tableName, (short) -1);
    assertThat(partitions.size(), is(1));
    assertThat(partitions.get(0).getDbName(), is(DATABASE));
    assertThat(partitions.get(0).getTableName(), is(tableName));
    assertThat(partitions.get(0).getValues(), is(Arrays.asList("1")));
    assertThat(partitions.get(0).getSd().getLocation(),
        is(String.format("file:%s/%s/%s/partcol=1", server.getCore().tempDir(), DATABASE, tableName)));
  } finally {
    client.close();
  }
  server.shutdown();
}
 
Example 4
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 6 votes vote down vote up
private Table createUnpartitionedTable(String databaseName, String tableName, HiveServer2Core server)
    throws Exception {
  Table table = new Table();
  table.setDbName(databaseName);
  table.setTableName(tableName);
  table.setSd(new StorageDescriptor());
  table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
  table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  table.getSd().setSerdeInfo(new SerDeInfo());
  table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  HiveMetaStoreClient client = server.getCore().newClient();
  client.createTable(table);
  client.close();
  return table;
}
 
Example 5
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 6 votes vote down vote up
private Table createPartitionedTable(String databaseName, String tableName, HiveServer2Core server) throws Exception {
  Table table = new Table();
  table.setDbName(DATABASE);
  table.setTableName(tableName);
  table.setPartitionKeys(Arrays.asList(new FieldSchema("partcol", "int", null)));
  table.setSd(new StorageDescriptor());
  table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
  table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  table.getSd().setSerdeInfo(new SerDeInfo());
  table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  HiveMetaStoreClient client = server.getCore().newClient();
  client.createTable(table);
  client.close();
  return table;
}
 
Example 6
Source File: TestMetaStoreWithPigHCat.java    From incubator-sentry with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME);
  FileOutputStream to = new FileOutputStream(dataFile);
  Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to);
  to.close();

  policyFile = setAdminOnServer1(ADMINGROUP);
  policyFile
      .addRolesToGroup(USERGROUP1, db_all_role)
      .addRolesToGroup(USERGROUP2, "read_db_role")
      .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName)
      .addPermissionsToRole("read_db_role",
          "server=server1->db=" + dbName + "->table=" + tabName2 + "->action=SELECT")
      .setUserGroupMapping(StaticUserGroup.getStaticMapping());
  writePolicyFile(policyFile);

  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  client.dropDatabase(dbName, true, true, true);
  createMetastoreDB(client, dbName);
  client.close();
}
 
Example 7
Source File: HiveClientPool.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Override
protected HiveMetaStoreClient reconnect(HiveMetaStoreClient client) {
  try {
    client.close();
    client.reconnect();
  } catch (MetaException e) {
    throw new RuntimeMetaException(e, "Failed to reconnect to Hive Metastore");
  }
  return client;
}
 
Example 8
Source File: BeejuCore.java    From beeju with Apache License 2.0 5 votes vote down vote up
/**
 * Create a new database with the specified name.
 *
 * @param databaseName Database name.
 * @throws TException If an error occurs creating the database.
 */
public void createDatabase(String databaseName) throws TException {
  File tempFile = tempDir.toFile();
  HiveMetaStoreClient client = new HiveMetaStoreClient(new HiveConf(conf));
  String databaseFolder = new File(tempFile, databaseName).toURI().toString();
  try {
    client.createDatabase(new Database(databaseName, null, databaseFolder, null));
  } finally {
    client.close();
  }
}
 
Example 9
Source File: BeejuCoreTest.java    From beeju with Apache License 2.0 5 votes vote down vote up
@Test
public void createDatabase() throws Exception {
  String databaseName = "Another_DB";

  defaultCore.createDatabase(databaseName);
  HiveMetaStoreClient client = defaultCore.newClient();
  Database db = client.getDatabase(databaseName);
  client.close();

  assertThat(db, is(notNullValue()));
  assertThat(db.getName(), is(databaseName.toLowerCase()));
  assertThat(db.getLocationUri(), is(String.format("file:%s/%s", defaultCore.tempDir(), databaseName)));
}
 
Example 10
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 5 votes vote down vote up
@Test
public void createTable() throws Exception {
  HiveServer2Core server = setupServer();
  String tableName = "my_test_table";

  try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl());
      Statement statement = connection.createStatement()) {
    String createHql = new StringBuilder()
        .append("CREATE TABLE `" + DATABASE + "." + tableName + "`(`id` int, `name` string) ")
        .append("ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ")
        .append("STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' ")
        .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'")
        .toString();
    statement.execute(createHql);
  }

  HiveMetaStoreClient client = server.getCore().newClient();
  Table table = client.getTable(DATABASE, tableName);
  client.close();
  assertThat(table.getDbName(), is(DATABASE));
  assertThat(table.getTableName(), is(tableName));
  assertThat(table.getSd().getCols(),
      is(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null))));
  assertThat(table.getSd().getInputFormat(), is("org.apache.hadoop.mapred.TextInputFormat"));
  assertThat(table.getSd().getOutputFormat(), is("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"));
  assertThat(table.getSd().getSerdeInfo().getSerializationLib(),
      is("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"));
  server.shutdown();
}
 
Example 11
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 5 votes vote down vote up
@Test
public void dropPartition() throws Exception {
  HiveServer2Core server = setupServer();
  String tableName = "my_table";
  HiveMetaStoreClient client = server.getCore().newClient();

  try {
    Table table = createPartitionedTable(DATABASE, tableName, server);

    Partition partition = new Partition();
    partition.setDbName(DATABASE);
    partition.setTableName(tableName);
    partition.setValues(Arrays.asList("1"));
    partition.setSd(new StorageDescriptor(table.getSd()));
    partition.getSd().setLocation(
        String.format("file:%s/%s/%s/partcol=1", server.getCore().tempDir(), DATABASE, tableName));
    client.add_partition(partition);

    try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl());
        Statement statement = connection.createStatement()) {
      String dropPartitionHql = String.format("ALTER TABLE %s.%s DROP PARTITION (partcol=1)", DATABASE, tableName);
      statement.execute(dropPartitionHql);
    }

    List<Partition> partitions = client.listPartitions(DATABASE, tableName, (short) -1);
    assertThat(partitions.size(), is(0));
  } finally {
    client.close();
  }
  server.shutdown();
}
 
Example 12
Source File: TestMetastoreEndToEnd.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
@Override
@Before
public void setup() throws Exception {
  policyFile = setAdminOnServer1(ADMINGROUP);
  policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping());
  writePolicyFile(policyFile);
  super.setup();

  dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME);
  FileOutputStream to = new FileOutputStream(dataFile);
  Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to);
  to.close();

  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  client.dropDatabase(dbName, true, true, true);
  createMetastoreDB(client, dbName);
  client.close();

  policyFile
          .addRolesToGroup(USERGROUP1, db_all_role)
          .addRolesToGroup(USERGROUP2, "read_db_role")
          .addRolesToGroup(USERGROUP2, tab1_all_role)
          .addRolesToGroup(USERGROUP2, tab2_all_role)
          .addRolesToGroup(USERGROUP3, tab1_read_role)
          .addRolesToGroup(USERGROUP3, tab2_read_role)
          .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName)
          .addPermissionsToRole("read_db_role",
                  "server=server1->db=" + dbName + "->action=SELECT")
          .addPermissionsToRole(tab1_all_role,
                  "server=server1->db=" + dbName + "->table=" + tabName1)
          .addPermissionsToRole(tab2_all_role,
                  "server=server1->db=" + dbName + "->table=" + tabName2)
          .addPermissionsToRole(tab1_read_role,
                  "server=server1->db=" + dbName + "->table=" + tabName1 + "->action=SELECT")
          .addPermissionsToRole(tab2_read_role,
                  "server=server1->db=" + dbName + "->table=" + tabName2 + "->action=SELECT")
          .setUserGroupMapping(StaticUserGroup.getStaticMapping());
  writePolicyFile(policyFile);
}
 
Example 13
Source File: TestMetastoreEndToEnd.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
/**
 * Verify alter table privileges
 * @throws Exception
 */
@Test
public void testAlterTablePrivileges() throws Exception {

  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  createMetastoreTable(client, dbName, tabName1,
      Lists.newArrayList(new FieldSchema("col1", "int", "")));
  client.close();

  // verify group1 users with DDL privileges can alter tables in db_1
  client = context.getMetaStoreClient(USER1_1);
  Table metaTable2 = client.getTable(dbName, tabName1);
  metaTable2.getSd().setCols(
      Lists.newArrayList(new FieldSchema("col2", "double", "")));
  client.alter_table(dbName, tabName1, metaTable2);
  Table metaTable3 = client.getTable(dbName, tabName1);
  assertEquals(metaTable2.getSd().getCols(), metaTable3.getSd().getCols());

  // verify group1 users with DDL privileges can alter tables in db_1
  client = context.getMetaStoreClient(USER2_1);
  metaTable2 = client.getTable(dbName, tabName1);
  metaTable2.getSd().setCols(
      Lists.newArrayList(new FieldSchema("col3", "string", "")));
  client.alter_table(dbName, tabName1, metaTable2);
  metaTable3 = client.getTable(dbName, tabName1);
  assertEquals(metaTable2.getSd().getCols(), metaTable3.getSd().getCols());

  // verify group3 users can't alter tables in db_1
  client = context.getMetaStoreClient(USER3_1);
  metaTable2 = client.getTable(dbName, tabName1);
  metaTable2.getSd().setCols(
      Lists.newArrayList(new FieldSchema("col3", "string", "")));
  try {
    client.alter_table(dbName, tabName1, metaTable2);
    fail("alter table should have failed for non-privilege user");
  } catch (MetaException e) {
    Context.verifyMetastoreAuthException(e);
  }
  client.close();
}
 
Example 14
Source File: TestMetaStoreWithPigHCat.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
/**
 * Verify add partition via Pig+HCatStore
 *
 * *** Disabled due to HCat inputformat compatibility issue in Hive 1.1.0
 */
@Ignore
@Test
public void testPartionLoad() throws Exception {
  execHiveSQL("CREATE TABLE " + dbName + "." + tabName1
      + " (id int) PARTITIONED BY (part_col STRING)", ADMIN1);
  execHiveSQL("CREATE TABLE " + dbName + "." + tabName2
      + " (id int) PARTITIONED BY (part_col STRING)", ADMIN1);

  // user with ALL on DB should be able to add partion using Pig/HCatStore
  PigServer pigServer = context.getPigServer(USER1_1, ExecType.LOCAL);
  execPigLatin(USER1_1, pigServer, "A = load '" + dataFile.getPath()
      + "' as (id:int);");
  execPigLatin(USER1_1, pigServer, "store A into '" + dbName + "." + tabName1
      + "' using " + HCatStorer.class.getName() + " ('part_col=part1');");
  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  assertEquals(1, client.listPartitionNames(dbName, tabName1, (short) 10)
      .size());

  // user without select on DB should NOT be able to add partition with Pig/HCatStore
  pigServer = context.getPigServer(USER2_1, ExecType.LOCAL);
  execPigLatin(USER2_1, pigServer, "A = load '" + dataFile.getPath()
      + "' as (id:int);");
  // This action won't be successful because of no permission, but there is no exception will
  // be thrown in this thread. The detail exception can be found in
  // sentry-tests/sentry-tests-hive/target/surefire-reports/org.apache.sentry.tests.e2e.metastore.TestMetaStoreWithPigHCat-output.txt.
  execPigLatin(USER2_1, pigServer, "store A into '" + dbName + "." + tabName2 + "' using "
      + HCatStorer.class.getName() + " ('part_col=part2');");
  // The previous action is failed, and there will be no data.
  assertEquals(0, client.listPartitionNames(dbName, tabName2, (short) 10).size());
  client.close();
}
 
Example 15
Source File: TestMetastoreEndToEnd.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
private void verifyPartitionExists(String dbName, String tabName,
    String partVal) throws Exception {
  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  Partition newPartition = client.getPartition(dbName, tabName,
      Lists.newArrayList(partVal));
  Assert.assertNotNull(newPartition);
  client.close();
}
 
Example 16
Source File: TestAuthorizingObjectStore.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
@Override
@Before
public void setup() throws Exception {
  policyFile = setAdminOnServer1(ADMINGROUP);
  // add user ACCESSAllMETAUSER for the test case testPrivilegesForUserNameCaseSensitive
  policyFile.addGroupsToUser(userWithoutAccess.toUpperCase(), "tempGroup").setUserGroupMapping(
      StaticUserGroup.getStaticMapping());
  writePolicyFile(policyFile);
  super.setup();

  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  client.dropDatabase(dbName1, true, true, true);
  client.dropDatabase(dbName2, true, true, true);
  createMetastoreDB(client, dbName1);
  createMetastoreDB(client, dbName2);

  Table tbl1 = createMetastoreTableWithPartition(client, dbName1, tabName1,
      Lists.newArrayList(new FieldSchema("col1", "int", "")),
      Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
  addPartition(client, dbName1, tabName1, Lists.newArrayList(partitionVal), tbl1);

  Table tbl2 = createMetastoreTableWithPartition(client, dbName1, tabName2,
      Lists.newArrayList(new FieldSchema("col1", "int", "")),
      Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
  addPartition(client, dbName1, tabName2, Lists.newArrayList(partitionVal), tbl2);

  Table tbl3 = createMetastoreTableWithPartition(client, dbName2, tabName3,
      Lists.newArrayList(new FieldSchema("col1", "int", "")),
      Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
  addPartition(client, dbName2, tabName3, Lists.newArrayList(partitionVal), tbl3);

  Table tbl4 = createMetastoreTableWithPartition(client, dbName2, tabName4,
      Lists.newArrayList(new FieldSchema("col1", "int", "")),
      Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
  addPartition(client, dbName2, tabName4, Lists.newArrayList(partitionVal), tbl4);

  client.close();

  policyFile
      .addRolesToGroup(USERGROUP1, all_role)
      .addRolesToGroup(USERGROUP2, db1_t1_role)
      .addPermissionsToRole(all_role, "server=server1->db=" + dbName1)
      .addPermissionsToRole(all_role, "server=server1->db=" + dbName2)
      .addPermissionsToRole(all_role,
          "server=server1->db=" + dbName1 + "->table=" + tabName1 + "->action=SELECT")
      .addPermissionsToRole(all_role,
          "server=server1->db=" + dbName1 + "->table=" + tabName2 + "->action=SELECT")
      .addPermissionsToRole(all_role,
          "server=server1->db=" + dbName2 + "->table=" + tabName3 + "->action=SELECT")
      .addPermissionsToRole(all_role,
          "server=server1->db=" + dbName2 + "->table=" + tabName4 + "->action=SELECT")
      .addPermissionsToRole(db1_t1_role,
          "server=server1->db=" + dbName1 + "->table=" + tabName1 + "->action=SELECT")
      .setUserGroupMapping(StaticUserGroup.getStaticMapping());
  writePolicyFile(policyFile);
}
 
Example 17
Source File: TestMetastoreEndToEnd.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
/**
 * Verify alter partion privileges
 * TODO: We seem to have a bit inconsistency with Alter partition. It's only
 * allowed with SERVER privilege. If we allow add/drop partition with DB
 * level privilege, then this should also be at the same level.
 * @throws Exception
 */
@Test
public void testAlterSetLocationPrivileges() throws Exception {
  String newPath1 = "fooTab1";
  ArrayList<String> partVals1 = Lists.newArrayList("part1");
  ArrayList<String> partVals2 = Lists.newArrayList("part2");
  String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
      + File.separator + newPath1;

  policyFile.addRolesToGroup(USERGROUP1, uri_role)
      .addRolesToGroup(USERGROUP2, uri_role)
      .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1);
  writePolicyFile(policyFile);

  HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
  Table tbl1 = createMetastoreTableWithPartition(client, dbName,
      tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")),
      Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
  addPartition(client, dbName, tabName1, partVals1, tbl1);
  tbl1 = client.getTable(dbName, tabName1);
  addPartition(client, dbName, tabName1, partVals2, tbl1);
  client.close();

  // user with DB and URI privileges should be able to alter partition set location
  client = context.getMetaStoreClient(USER1_1);
  Partition newPartition = client.getPartition(dbName, tabName1, partVals1);
  newPartition.getSd().setLocation(tabDir1);
  client.alter_partition(dbName, tabName1, newPartition);
  client.close();

  // user with Table and URI privileges should be able to alter partition set location
  client = context.getMetaStoreClient(USER2_1);
  newPartition = client.getPartition(dbName, tabName1, partVals2);
  newPartition.getSd().setLocation(tabDir1);
  client.alter_partition(dbName, tabName1, newPartition);
  client.close();

  policyFile.addRolesToGroup(USERGROUP3, db_all_role);
  writePolicyFile(policyFile);
  // user without URI privileges should not be able to alter partition set location
  client = context.getMetaStoreClient(USER3_1);
  newPartition = client.getPartition(dbName, tabName1, partVals2);
  newPartition.getSd().setLocation(tabDir1);
  try {
    client.alter_partition(dbName, tabName1, newPartition);
    fail("alter partition with location should have failed");
  } catch (MetaException e) {
    Context.verifyMetastoreAuthException(e);
  }
  client.close();

}
 
Example 18
Source File: TestMetastoreEndToEnd.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
/**
 * Verify URI privileges for alter table table
 * @throws Exception
 */
@Test
public void testUriPartitionPrivileges() throws Exception {
  String tabName1 = "tab1";
  String newPath1 = "fooTab1";
  String newPath2 = "fooTab2";
  ArrayList<String> partVals1 = Lists.newArrayList("part1");
  ArrayList<String> partVals2 = Lists.newArrayList("part2");
  ArrayList<String> partVals3 = Lists.newArrayList("part2");

  String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
      + File.separator + newPath1;
  String tabDir2 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
      + File.separator + newPath2;
  policyFile.addRolesToGroup(USERGROUP1, uri_role)
      .addRolesToGroup(USERGROUP2, db_all_role)
      .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1)
      .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir2);
  writePolicyFile(policyFile);

  // user with URI privileges should be able to alter partition to set that specific location
  HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
  Table tbl1 = createMetastoreTableWithPartition(client, dbName,
      tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")),
      Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
  addPartition(client, dbName, tabName1, partVals1, tbl1);
  addPartitionWithLocation(client, dbName, tabName1, partVals2, tbl1,
      tabDir1);
  client.close();

  // user without URI privileges should be NOT able to alter partition to set
  // that specific location
  client = context.getMetaStoreClient(USER2_1);
  try {
    tbl1 = client.getTable(dbName, tabName1);
    addPartitionWithLocation(client, dbName, tabName1, partVals3,
        tbl1, tabDir2);
    fail("Add partition with location should have failed");
  } catch (MetaException e) {
    Context.verifyMetastoreAuthException(e);
  }
  client.close();
}
 
Example 19
Source File: HiveClientPool.java    From iceberg with Apache License 2.0 4 votes vote down vote up
@Override
protected void close(HiveMetaStoreClient client) {
  client.close();
}