Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#setSd()

The following examples show how to use org.apache.hadoop.hive.metastore.api.Table#setSd() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaTypeInvalidHashKeyType() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
      "col2:dynamo_col2#,hashKey:hashKey");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("col2", "bigint", ""));
  cols.add(new FieldSchema("hashKey", "map<string,string>", ""));
  sd.setCols(cols);
  table.setSd(sd);

  exceptionRule.expect(MetaException.class);
  exceptionRule.expectMessage("The key element hashKey does not match type. DynamoDB Type: S " +
      "Hive type: " + "map<string,string>");
  storageHandler.checkTableSchemaType(description, table);
}
 
Example 2
Source File: CatalogToHiveConverter.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
public static Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname) {
  Table hiveTable = new Table();
  hiveTable.setDbName(dbname);
  hiveTable.setTableName(catalogTable.getName());
  Date createTime = catalogTable.getCreateTime();
  hiveTable.setCreateTime(createTime == null ? 0 : (int) (createTime.getTime() / 1000));
  hiveTable.setOwner(catalogTable.getOwner());
  Date lastAccessedTime = catalogTable.getLastAccessTime();
  hiveTable.setLastAccessTime(lastAccessedTime == null ? 0 : (int) (lastAccessedTime.getTime() / 1000));
  hiveTable.setRetention(catalogTable.getRetention());
  hiveTable.setSd(convertStorageDescriptor(catalogTable.getStorageDescriptor()));
  hiveTable.setPartitionKeys(convertFieldSchemaList(catalogTable.getPartitionKeys()));
  // Hive may throw a NPE during dropTable if the parameter map is null.
  Map<String, String> parameterMap = catalogTable.getParameters();
  if (parameterMap == null) {
    parameterMap = Maps.newHashMap();
  }
  hiveTable.setParameters(parameterMap);
  hiveTable.setViewOriginalText(catalogTable.getViewOriginalText());
  hiveTable.setViewExpandedText(catalogTable.getViewExpandedText());
  hiveTable.setTableType(catalogTable.getTableType());

  return hiveTable;
}
 
Example 3
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaMappingMissingColumn() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$,hashMap:hashMap");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("col2", "tinyint", ""));
  cols.add(new FieldSchema("col3", "string", ""));
  cols.add(new FieldSchema("hashMap", "map<string,string>", ""));
  sd.setCols(cols);
  table.setSd(sd);

  exceptionRule.expect(MetaException.class);
  exceptionRule.expectMessage("Could not find column mapping for column: col2");
  storageHandler.checkTableSchemaMapping(description, table);
}
 
Example 4
Source File: DestructiveReplicaTest.java    From circus-train with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
  SourceTable sourceTable = new SourceTable();
  sourceTable.setDatabaseName(DATABASE);
  sourceTable.setTableName(TABLE);
  tableReplication.setSourceTable(sourceTable);
  ReplicaTable replicaTable = new ReplicaTable();
  replicaTable.setDatabaseName(DATABASE);
  replicaTable.setTableName(REPLICA_TABLE);
  tableReplication.setReplicaTable(replicaTable);
  when(replicaMetaStoreClientSupplier.get()).thenReturn(client);
  replica = new DestructiveReplica(replicaMetaStoreClientSupplier, cleanupLocationManager, tableReplication);

  table = new Table();
  table.setDbName(DATABASE);
  table.setTableName(REPLICA_TABLE);
  table.setPartitionKeys(Lists.newArrayList(new FieldSchema("part1", "string", "")));
  Map<String, String> parameters = new HashMap<>();
  parameters.put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), DATABASE + "." + TABLE);
  parameters.put(REPLICATION_EVENT.parameterName(), EVENT_ID);
  table.setParameters(parameters);
  StorageDescriptor sd1 = new StorageDescriptor();
  sd1.setLocation(tableLocation.toString());
  table.setSd(sd1);
}
 
Example 5
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaTypeMappingValid() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
          "col2:dynamo_col2#,hashKey:hashKey");
  parameters.put(DynamoDBConstants.DYNAMODB_TYPE_MAPPING, "col2:NS");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("col2", "array<bigint>", ""));
  cols.add(new FieldSchema("hashKey", "string", ""));
  sd.setCols(cols);
  table.setSd(sd);

  // This check is expected to pass for the given input
  storageHandler.checkTableSchemaType(description, table);
}
 
Example 6
Source File: HiveServer2CoreTest.java    From beeju with Apache License 2.0 6 votes vote down vote up
private Table createUnpartitionedTable(String databaseName, String tableName, HiveServer2Core server)
    throws Exception {
  Table table = new Table();
  table.setDbName(databaseName);
  table.setTableName(tableName);
  table.setSd(new StorageDescriptor());
  table.getSd().setCols(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)));
  table.getSd().setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  table.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  table.getSd().setSerdeInfo(new SerDeInfo());
  table.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  HiveMetaStoreClient client = server.getCore().newClient();
  client.createTable(table);
  client.close();
  return table;
}
 
Example 7
Source File: TestUtils.java    From circus-train with Apache License 2.0 6 votes vote down vote up
private static Table createView(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String view,
    String table,
    List<FieldSchema> partitionCols)
  throws TException {
  Table hiveView = new Table();
  hiveView.setDbName(database);
  hiveView.setTableName(view);
  hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
  hiveView.setViewOriginalText(hql(database, table));
  hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
  hiveView.setPartitionKeys(partitionCols);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());
  hiveView.setSd(sd);

  metaStoreClient.createTable(hiveView);

  return hiveView;
}
 
Example 8
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaTypeValid() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
      "col2:dynamo_col2#,hashKey:hashKey");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("col2", "bigint", ""));
  cols.add(new FieldSchema("hashKey", "string", ""));
  sd.setCols(cols);
  table.setSd(sd);
  // This check is expected to pass for the given input
  storageHandler.checkTableSchemaType(description, table);
}
 
Example 9
Source File: TableTransformationTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void init() {
  table = new Table();
  table.setDbName("database");
  table.setTableName("table");
  table.setTableType("type");

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
  storageDescriptor.setInputFormat("input_format");
  storageDescriptor.setOutputFormat("output_format");
  storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation("database/table/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);
}
 
Example 10
Source File: TestUtils.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table createUnpartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    URI location)
  throws TException {
  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setInputFormat(TextInputFormat.class.getName());
  sd.setOutputFormat(TextOutputFormat.class.getName());
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
  ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
  ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
  List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
  metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));

  return hiveTable;
}
 
Example 11
Source File: HiveEntityFactory.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table newTable(String name, String dbName, List<FieldSchema> partitionKeys, StorageDescriptor sd) {
  Table table = new Table();
  table.setTableName(name);
  table.setDbName(dbName);
  table.setSd(sd);
  table.setPartitionKeys(partitionKeys);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.setParameters(new HashMap<String, String>());
  return table;
}
 
Example 12
Source File: CircusTrainTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws TException, IOException {
  Table table = new Table();
  table.setDbName(DATABASE);
  table.setTableName("source_" + TABLE);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(Arrays.asList(new FieldSchema("col1", "string", null)));
  sd.setSerdeInfo(new SerDeInfo());
  table.setSd(sd);

  hive.client().createTable(table);
}
 
Example 13
Source File: HiveMetaStoreServiceJdbcImpl.java    From griffin with Apache License 2.0 5 votes vote down vote up
@Override
@Cacheable(unless = "#result==null")
public Table getTable(String dbName, String tableName) {
    Table result = new Table();
    result.setDbName(dbName);
    result.setTableName(tableName);

    String sql = SHOW_CREATE_TABLE + dbName + "." + tableName;
    Statement stmt = null;
    ResultSet rs = null;
    StringBuilder sb = new StringBuilder();

    try {
        Class.forName(hiveClassName);
        if (conn == null) {
            conn = DriverManager.getConnection(hiveUrl);
        }
        LOGGER.info("got connection");

        stmt = conn.createStatement();
        rs = stmt.executeQuery(sql);
        while (rs.next()) {
            String s = rs.getString(1);
            sb.append(s);
        }
        String location = getLocation(sb.toString());
        List<FieldSchema> cols = getColums(sb.toString());
        StorageDescriptor sd = new StorageDescriptor();
        sd.setLocation(location);
        sd.setCols(cols);
        result.setSd(sd);
    } catch (Exception e) {
        LOGGER.error("Query Hive Table metadata has error. {}", e.getMessage());
    } finally {
        closeConnection(stmt, rs);
    }
    return result;
}
 
Example 14
Source File: TestUtils.java    From circus-train with Apache License 2.0 4 votes vote down vote up
public static Table createPartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    URI location,
    List<FieldSchema> columns,
    List<FieldSchema> partitionKeys,
    String serializationLib,
    String inputFormatClassName,
    String outputFormatClassName)
    throws Exception {

  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  hiveTable.setPartitionKeys(partitionKeys);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(columns);
  sd.setLocation(location.toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setInputFormat(inputFormatClassName);
  sd.setOutputFormat(outputFormatClassName);
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setSerializationLib(serializationLib);

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
  ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
  ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
  List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
  metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));

  return hiveTable;
}
 
Example 15
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createSourceTable() throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = catalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(SOURCE_TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment source");
  source.setParameters(parameters);

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient
          .add_partitions(Arrays.asList(newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 16
Source File: HiveMetadataFetcherTest.java    From pxf with Apache License 2.0 4 votes vote down vote up
@Test
public void getTableMetadataWithIncompatibleTables() throws Exception {

    fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper);

    String tablePattern = "*";
    String dbPattern = "*";
    String dbName = "default";
    String pattern = dbPattern + "." + tablePattern;

    String tableName1 = "viewtable";
    // mock hive table returned from hive client
    Table hiveTable1 = new Table();
    hiveTable1.setTableType("VIRTUAL_VIEW");
    when(mockHiveClient.getTable(dbName, tableName1)).thenReturn(hiveTable1);

    String tableName2 = "regulartable";
    // mock hive table returned from hive client
    List<FieldSchema> fields = new ArrayList<>();
    fields.add(new FieldSchema("field1", "string", null));
    fields.add(new FieldSchema("field2", "int", null));
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(fields);
    sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
    Table hiveTable2 = new Table();
    hiveTable2.setTableType("MANAGED_TABLE");
    hiveTable2.setSd(sd);
    hiveTable2.setPartitionKeys(new ArrayList<>());
    when(mockHiveClient.getTable(dbName, tableName2)).thenReturn(hiveTable2);

    // Mock get databases and tables return from hive client
    List<String> tableNames = new ArrayList<>(Arrays.asList(tableName1, tableName2));
    List<String> dbNames = new ArrayList<>(Collections.singletonList(dbName));
    when(mockHiveClient.getDatabases(dbPattern)).thenReturn(dbNames);
    when(mockHiveClient.getTables(dbName, tablePattern)).thenReturn(tableNames);

    // Get metadata
    metadataList = fetcher.getMetadata(pattern);
    assertEquals(1, metadataList.size());
    Metadata metadata = metadataList.get(0);
    assertEquals(dbName + "." + tableName2, metadata.getItem().toString());

    List<Metadata.Field> resultFields = metadata.getFields();
    assertNotNull(resultFields);
    assertEquals(2, resultFields.size());
    Metadata.Field field = resultFields.get(0);
    assertEquals("field1", field.getName());
    assertEquals("text", field.getType().getTypeName()); // converted type
    field = resultFields.get(1);
    assertEquals("field2", field.getName());
    assertEquals("int4", field.getType().getTypeName());
}
 
Example 17
Source File: HiveConvertersImpl.java    From metacat with Apache License 2.0 4 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public Table metacatToHiveTable(final TableDto dto) {
    final Table table = new Table();
    final QualifiedName name = dto.getName();
    if (name != null) {
        table.setTableName(name.getTableName());
        table.setDbName(name.getDatabaseName());
    }

    final StorageDto storageDto = dto.getSerde();
    if (storageDto != null) {
        table.setOwner(storageDto.getOwner());
    }

    final AuditDto auditDto = dto.getAudit();
    if (auditDto != null && auditDto.getCreatedDate() != null) {
        table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
    }

    Map<String, String> params = new HashMap<>();
    if (dto.getMetadata() != null) {
        params = dto.getMetadata();
    }
    table.setParameters(params);
    updateTableTypeAndViewInfo(dto, table);

    table.setSd(fromStorageDto(storageDto, table.getTableName()));

    final List<FieldDto> fields = dto.getFields();
    if (fields == null) {
        table.setPartitionKeys(Collections.emptyList());
        table.getSd().setCols(Collections.emptyList());
    } else {
        final List<FieldSchema> nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
        final List<FieldSchema> partitionFields = Lists.newArrayListWithCapacity(fields.size());
        for (FieldDto fieldDto : fields) {
            final FieldSchema f = metacatToHiveField(fieldDto);

            if (fieldDto.isPartition_key()) {
                partitionFields.add(f);
            } else {
                nonPartitionFields.add(f);
            }
        }
        table.setPartitionKeys(partitionFields);
        table.getSd().setCols(nonPartitionFields);
    }
    return table;
}
 
Example 18
Source File: TestHDFSIntegration.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
/**
* SENTRY-1002:
* Ensure the paths with no scheme will not cause NPE during paths update.
*/
@Test
public void testMissingScheme() throws Throwable {

  // In the local test environment, EXTERNAL_SENTRY_SERVICE is false,
  // set the default URI scheme to be hdfs.
  boolean testConfOff = new Boolean(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false"));
  if (!testConfOff) {
    PathsUpdate.getConfiguration().set("fs.defaultFS", "hdfs:///");
  }

  tmpHDFSDir = new Path("/tmp/external");
  if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) {
    miniDFS.getFileSystem().mkdirs(tmpHDFSDir);
  }

  Path partitionDir = new Path("/tmp/external/p1");
  if (!miniDFS.getFileSystem().exists(partitionDir)) {
    miniDFS.getFileSystem().mkdirs(partitionDir);
  }

  String dbName = "db1";
  String tblName = "tab1";
  dbNames = new String[]{dbName};
  roles = new String[]{"admin_role"};
  admin = StaticUserGroup.ADMIN1;

  Connection conn;
  Statement stmt;

  conn = hiveServer2.createConnection("hive", "hive");
  stmt = conn.createStatement();
  stmt.execute("create role admin_role");
  stmt.execute("grant all on server server1 to role admin_role");
  stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP);
  stmt.close();
  conn.close();

  conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1);
  stmt = conn.createStatement();
  stmt.execute("create database " + dbName);
  stmt.execute("create external table " + dbName + "." + tblName + "(s string) location '/tmp/external/p1'");

  // Deep copy of table tab1
  Table tbCopy = hmsClient.getTable(dbName, tblName);

  // Change the location of the table to strip the scheme.
  StorageDescriptor sd = hmsClient.getTable(dbName, tblName).getSd();
  sd.setLocation("/tmp/external");
  tbCopy.setSd(sd);

  // Alter table tab1 to be tbCopy which is at scheme-less location.
  // And the corresponding path will be updated to sentry server.
  hmsClient.alter_table(dbName, "tab1", tbCopy);
  Assert.assertEquals(hmsClient.getTable(dbName, tblName).getSd().getLocation(), "/tmp/external");
  verifyOnPath("/tmp/external", FsAction.ALL, StaticUserGroup.HIVE, true);

  stmt.close();
  conn.close();
}
 
Example 19
Source File: HiveDifferencesIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createTable(
    String databaseName,
    String tableName,
    File tableLocation,
    String sourceTable,
    String sourceLocation,
    boolean addChecksum)
  throws Exception {
  File partition0 = createPartitionData("part=0", tableLocation, Arrays.asList("1\tadam", "2\tsusan"));
  File partition1 = createPartitionData("part=1", tableLocation, Arrays.asList("3\tchun", "4\tkim"));

  Table table = new Table();
  table.setDbName(databaseName);
  table.setTableName(tableName);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.setParameters(new HashMap<String, String>());
  if (sourceTable != null) {
    table.getParameters().put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), sourceTable);
  }
  if (sourceLocation != null) {
    table.getParameters().put(CircusTrainTableParameter.SOURCE_LOCATION.parameterName(), sourceLocation);
  }

  List<FieldSchema> partitionColumns = Arrays.asList(PARTITION_COL);
  table.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(FOO_COL, BAR_COL);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(tableLocation.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  table.setSd(sd);

  HiveMetaStoreClient client = catalog.client();
  client.createTable(table);
  LOG
      .info(">>>> Partitions added: {}",
          +client
              .add_partitions(Arrays
                  .asList(
                      newPartition(databaseName, tableName, sd, Arrays.asList("0"), partition0, sourceTable,
                          sourceLocation + "part=0", addChecksum),
                      newPartition(databaseName, tableName, sd, Arrays.asList("1"), partition1, sourceTable,
                          sourceLocation + "part=1", addChecksum))));
}
 
Example 20
Source File: SubmarineMetaStoreTest.java    From submarine with Apache License 2.0 4 votes vote down vote up
@Before
public void createDatabase() throws InvalidObjectException, MetaException {
  listTables();

  Database database = new Database();
  database.setName("testdb");
  database.setDescription("testdb");
  database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db");
  Map map = new HashMap();
  map.put("key", "value");
  database.setParameters(map);
  database.setOwnerName("root");
  database.setOwnerType(PrincipalType.USER);
  submarineMetaStore.createDatabase(database);
  assertEquals(1, submarineMetaStore.getDatabaseCount());

  Table table = new Table();
  table.setTableName("testtable");
  table.setDbName("testdb");
  table.setOwner("root");
  table.setCreateTime((int) new Date().getTime() / 1000);
  table.setLastAccessTime((int) new Date().getTime() / 1000);
  table.setRetention(0);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> fieldSchemas = new ArrayList<>();
  FieldSchema fieldSchema = new FieldSchema();
  fieldSchema.setName("a");
  fieldSchema.setType("int");
  fieldSchema.setComment("a");
  fieldSchemas.add(fieldSchema);
  sd.setCols(fieldSchemas);
  sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable");
  sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  sd.setCompressed(false);
  sd.setNumBuckets(-1);
  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setName("test");
  serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  Map<String, String> parametersMap = new HashMap();
  parametersMap.put("serialization.format", "|");
  parametersMap.put("field.delim", "|");
  serdeInfo.setParameters(parametersMap);
  sd.setSerdeInfo(serdeInfo);
  table.setSd(sd);
  List<FieldSchema> partitionKeys = new ArrayList<>();
  table.setPartitionKeys(partitionKeys);
  Map<String, String> parameters = new HashMap<>();
  table.setParameters(parameters);
  String viewOriginalText = "";
  table.setViewOriginalText(viewOriginalText);
  String viewExpandedText = "";
  table.setViewExpandedText(viewExpandedText);
  String tableType = "MANAGED_TABLE";
  table.setTableType(tableType);
  submarineMetaStore.createTable(table);

  Table tableTest = submarineMetaStore.getTable("testdb", "testtable");
  assertEquals("testtable", tableTest.getTableName());
  int tableCount = submarineMetaStore.getTableCount();
  assertEquals(1, tableCount);
}