Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#setViewOriginalText()
The following examples show how to use
org.apache.hadoop.hive.metastore.api.Table#setViewOriginalText() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CatalogToHiveConverter.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 6 votes |
public static Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname) { Table hiveTable = new Table(); hiveTable.setDbName(dbname); hiveTable.setTableName(catalogTable.getName()); Date createTime = catalogTable.getCreateTime(); hiveTable.setCreateTime(createTime == null ? 0 : (int) (createTime.getTime() / 1000)); hiveTable.setOwner(catalogTable.getOwner()); Date lastAccessedTime = catalogTable.getLastAccessTime(); hiveTable.setLastAccessTime(lastAccessedTime == null ? 0 : (int) (lastAccessedTime.getTime() / 1000)); hiveTable.setRetention(catalogTable.getRetention()); hiveTable.setSd(convertStorageDescriptor(catalogTable.getStorageDescriptor())); hiveTable.setPartitionKeys(convertFieldSchemaList(catalogTable.getPartitionKeys())); // Hive may throw a NPE during dropTable if the parameter map is null. Map<String, String> parameterMap = catalogTable.getParameters(); if (parameterMap == null) { parameterMap = Maps.newHashMap(); } hiveTable.setParameters(parameterMap); hiveTable.setViewOriginalText(catalogTable.getViewOriginalText()); hiveTable.setViewExpandedText(catalogTable.getViewExpandedText()); hiveTable.setTableType(catalogTable.getTableType()); return hiveTable; }
Example 2
Source File: ViewTransformation.java From circus-train with Apache License 2.0 | 6 votes |
@Override public Table transform(Table table) { if (!MetaStoreUtils.isView(table)) { return table; } LOG.info("Translating HQL of view {}.{}", table.getDbName(), table.getTableName()); String tableQualifiedName = Warehouse.getQualifiedName(table); String hql = hqlTranslator.translate(tableQualifiedName, table.getViewOriginalText()); String expandedHql = hqlTranslator.translate(tableQualifiedName, table.getViewExpandedText()); Table transformedView = new Table(table); transformedView.setViewOriginalText(hql); transformedView.setViewExpandedText(expandedHql); if (!replicaHiveConf.getBoolean(SKIP_TABLE_EXIST_CHECKS, false)) { LOG .info("Validating that tables used by the view {}.{} exist in the replica catalog", table.getDbName(), table.getTableName()); validateReferencedTables(transformedView); } return transformedView; }
Example 3
Source File: TestUtils.java From circus-train with Apache License 2.0 | 6 votes |
private static Table createView( HiveMetaStoreClient metaStoreClient, String database, String view, String table, List<FieldSchema> partitionCols) throws TException { Table hiveView = new Table(); hiveView.setDbName(database); hiveView.setTableName(view); hiveView.setTableType(TableType.VIRTUAL_VIEW.name()); hiveView.setViewOriginalText(hql(database, table)); hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols)); hiveView.setPartitionKeys(partitionCols); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(DATA_COLUMNS); sd.setParameters(new HashMap<String, String>()); sd.setSerdeInfo(new SerDeInfo()); hiveView.setSd(sd); metaStoreClient.createTable(hiveView); return hiveView; }
Example 4
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 6 votes |
@Test public void transformOutboundGetTablesResult() throws Exception { Table table = new Table(); table.setDbName(DB_NAME); table.setTableName(TABLE_NAME); Table table2 = new Table(); table2.setDbName(DB_NAME); table2.setTableName(TABLE_NAME); table2.setViewExpandedText(VIEW_EXPANDED_TEXT); table2.setViewOriginalText(VIEW_ORIGINAL_TEXT); GetTablesResult result = new GetTablesResult(); result.setTables(Arrays.asList(table, table2)); GetTablesResult transformedResult = databaseMapping.transformOutboundGetTablesResult(result); assertThat(transformedResult, is(sameInstance(result))); assertThat(transformedResult.getTables().size(), is(2)); assertThat(transformedResult.getTables().get(0), is(sameInstance(result.getTables().get(0)))); assertThat(transformedResult.getTables().get(0).getDbName(), is(OUT_DB_NAME)); assertThat(transformedResult.getTables().get(0).getTableName(), is(TABLE_NAME)); assertThat(transformedResult.getTables().get(0).getViewExpandedText(), nullValue()); assertThat(transformedResult.getTables().get(0).getViewOriginalText(), nullValue()); assertThat(transformedResult.getTables().get(1), is(sameInstance(result.getTables().get(1)))); assertThat(transformedResult.getTables().get(1).getDbName(), is(OUT_DB_NAME)); assertThat(transformedResult.getTables().get(1).getTableName(), is(TABLE_NAME)); assertThat(transformedResult.getTables().get(1).getViewExpandedText(), is(VIEW_EXPANDED_TEXT_TRANSFORMED)); assertThat(transformedResult.getTables().get(1).getViewOriginalText(), is(VIEW_ORIGINAL_TEXT_TRANSFORMED)); }
Example 5
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 6 votes |
@Test public void transformOutboundGetTableResultWithView() throws Exception { Table table = new Table(); table.setDbName(DB_NAME); table.setTableName(TABLE_NAME); table.setViewExpandedText(VIEW_EXPANDED_TEXT); table.setViewOriginalText(VIEW_ORIGINAL_TEXT); GetTableResult result = new GetTableResult(); result.setTable(table); GetTableResult transformedResult = databaseMapping.transformOutboundGetTableResult(result); assertThat(transformedResult, is(sameInstance(result))); assertThat(transformedResult.getTable(), is(sameInstance(result.getTable()))); assertThat(transformedResult.getTable().getDbName(), is(OUT_DB_NAME)); assertThat(transformedResult.getTable().getTableName(), is(TABLE_NAME)); assertThat(transformedResult.getTable().getViewExpandedText(), is(VIEW_EXPANDED_TEXT_TRANSFORMED)); assertThat(transformedResult.getTable().getViewOriginalText(), is(VIEW_ORIGINAL_TEXT_TRANSFORMED)); }
Example 6
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void transformOutboundTableView() throws Exception { Table table = new Table(); table.setDbName(DB_NAME); table.setViewExpandedText(VIEW_EXPANDED_TEXT); table.setViewOriginalText(VIEW_ORIGINAL_TEXT); Table result = databaseMapping.transformOutboundTable(table); assertThat(result, is(sameInstance(table))); assertThat(result.getDbName(), is(OUT_DB_NAME)); assertThat(result.getViewExpandedText(), is(VIEW_EXPANDED_TEXT_TRANSFORMED)); assertThat(result.getViewOriginalText(), is(VIEW_ORIGINAL_TEXT_TRANSFORMED)); }
Example 7
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void transformOutboundTableViewExpandedTextErrorKeepOriginal() throws Exception { String viewExpandedText = "error"; when(queryMapping.transformOutboundDatabaseName(metastoreMapping, viewExpandedText)) .thenThrow(new WaggleDanceException("cannot transform")); Table table = new Table(); table.setDbName(DB_NAME); table.setViewExpandedText(viewExpandedText); table.setViewOriginalText(VIEW_ORIGINAL_TEXT); Table result = databaseMapping.transformOutboundTable(table); assertThat(result.getViewExpandedText(), is(viewExpandedText)); }
Example 8
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void transformOutboundTableViewOriginalTextErrorTakeExpandedText() throws Exception { String viewOriginalText = "error"; when(queryMapping.transformOutboundDatabaseName(metastoreMapping, viewOriginalText)) .thenThrow(new WaggleDanceException("cannot transform")); Table table = new Table(); table.setDbName(DB_NAME); table.setViewExpandedText(VIEW_EXPANDED_TEXT); table.setViewOriginalText(viewOriginalText); Table result = databaseMapping.transformOutboundTable(table); assertThat(result.getViewOriginalText(), is(VIEW_EXPANDED_TEXT_TRANSFORMED)); }
Example 9
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void transformOutboundTableViewOriginalTextErrorKeepOriginal() throws Exception { String viewOriginalText = "error"; when(queryMapping.transformOutboundDatabaseName(metastoreMapping, viewOriginalText)) .thenThrow(new WaggleDanceException("cannot transform")); Table table = new Table(); table.setDbName(DB_NAME); table.setViewExpandedText(null); table.setViewOriginalText(viewOriginalText); Table result = databaseMapping.transformOutboundTable(table); assertThat(result.getViewOriginalText(), is(viewOriginalText)); }
Example 10
Source File: HiveConvertersImpl.java From metacat with Apache License 2.0 | 5 votes |
private void updateTableTypeAndViewInfo(final TableDto dto, final Table table) { final ViewDto viewDto = dto.getView(); if (null == dto.getView() || Strings.isNullOrEmpty(viewDto.getViewOriginalText())) { table.setTableType(TableType.EXTERNAL_TABLE.name()); return; } table.setTableType(TableType.VIRTUAL_VIEW.name()); table.setViewOriginalText(viewDto.getViewOriginalText()); table.setViewExpandedText(viewDto.getViewExpandedText()); }
Example 11
Source File: MetaStoreRestApiTest.java From submarine with Apache License 2.0 | 4 votes |
@Before public void createDatabase() { Database database = new Database(); database.setName("testdb"); database.setDescription("testdb"); database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db"); Map<String, String> map = new HashMap<>(); map.put("key", "value"); database.setParameters(map); database.setOwnerName("root"); database.setOwnerType(PrincipalType.USER); Gson gson = new Gson(); String databaseJson = gson.toJson(database); metaStoreApi.createDatabase(databaseJson); Response databaseCountResponse = metaStoreApi.getDatabaseCount(); assertEquals(databaseCountResponse.getStatus(), Response.Status.OK.getStatusCode()); assertTrue(((String) databaseCountResponse.getEntity()).contains("\"result\":1")); Table table = new Table(); table.setTableName("testtable"); table.setDbName("testdb"); table.setOwner("root"); table.setCreateTime((int) new java.util.Date().getTime() / 1000); table.setLastAccessTime((int) new Date().getTime() / 1000); table.setRetention(0); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> fieldSchemas = new ArrayList<>(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("a"); fieldSchema.setType("int"); fieldSchema.setComment("a"); fieldSchemas.add(fieldSchema); sd.setCols(fieldSchemas); sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable"); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"); sd.setCompressed(false); sd.setNumBuckets(-1); SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setName("test"); serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); Map<String, String> parametersMap = new HashMap<>(); parametersMap.put("serialization.format", "|"); parametersMap.put("field.delim", "|"); serdeInfo.setParameters(parametersMap); sd.setSerdeInfo(serdeInfo); table.setSd(sd); List<FieldSchema> partitionKeys = new ArrayList<>(); table.setPartitionKeys(partitionKeys); Map<String, String> parameters = new HashMap<>(); table.setParameters(parameters); String viewOriginalText = ""; table.setViewOriginalText(viewOriginalText); String viewExpandedText = ""; table.setViewExpandedText(viewExpandedText); String tableType = "MANAGED_TABLE"; table.setTableType(tableType); String tableJson = gson.toJson(table); metaStoreApi.createTable(tableJson); Response tableResponse = metaStoreApi.getTable("testdb", "testtable"); assertEquals(tableResponse.getStatus(), Response.Status.OK.getStatusCode()); assertTrue(((String) tableResponse.getEntity()).contains("\"tableName\":\"testtable\"")); Response tableCountResponse = metaStoreApi.getTableCount(); assertEquals(tableCountResponse.getStatus(), Response.Status.OK.getStatusCode()); assertTrue(((String) tableCountResponse.getEntity()).contains("\"result\":1")); }
Example 12
Source File: SubmarineMetaStoreTest.java From submarine with Apache License 2.0 | 4 votes |
@Before public void createDatabase() throws InvalidObjectException, MetaException { listTables(); Database database = new Database(); database.setName("testdb"); database.setDescription("testdb"); database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db"); Map map = new HashMap(); map.put("key", "value"); database.setParameters(map); database.setOwnerName("root"); database.setOwnerType(PrincipalType.USER); submarineMetaStore.createDatabase(database); assertEquals(1, submarineMetaStore.getDatabaseCount()); Table table = new Table(); table.setTableName("testtable"); table.setDbName("testdb"); table.setOwner("root"); table.setCreateTime((int) new Date().getTime() / 1000); table.setLastAccessTime((int) new Date().getTime() / 1000); table.setRetention(0); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> fieldSchemas = new ArrayList<>(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("a"); fieldSchema.setType("int"); fieldSchema.setComment("a"); fieldSchemas.add(fieldSchema); sd.setCols(fieldSchemas); sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable"); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"); sd.setCompressed(false); sd.setNumBuckets(-1); SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setName("test"); serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); Map<String, String> parametersMap = new HashMap(); parametersMap.put("serialization.format", "|"); parametersMap.put("field.delim", "|"); serdeInfo.setParameters(parametersMap); sd.setSerdeInfo(serdeInfo); table.setSd(sd); List<FieldSchema> partitionKeys = new ArrayList<>(); table.setPartitionKeys(partitionKeys); Map<String, String> parameters = new HashMap<>(); table.setParameters(parameters); String viewOriginalText = ""; table.setViewOriginalText(viewOriginalText); String viewExpandedText = ""; table.setViewExpandedText(viewExpandedText); String tableType = "MANAGED_TABLE"; table.setTableType(tableType); submarineMetaStore.createTable(table); Table tableTest = submarineMetaStore.getTable("testdb", "testtable"); assertEquals("testtable", tableTest.getTableName()); int tableCount = submarineMetaStore.getTableCount(); assertEquals(1, tableCount); }
Example 13
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table) { // let Hive set default parameters for us, e.g. serialization.format Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName()); hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); Map<String, String> properties = new HashMap<>(table.getProperties()); // Table comment properties.put(HiveCatalogConfig.COMMENT, table.getComment()); boolean isGeneric = Boolean.valueOf(properties.get(CatalogConfig.IS_GENERIC)); if (isGeneric) { properties = maskFlinkProperties(properties); } // Table properties hiveTable.setParameters(properties); // Hive table's StorageDescriptor StorageDescriptor sd = hiveTable.getSd(); setStorageFormat(sd, properties); List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema()); // Table columns and partition keys if (table instanceof CatalogTableImpl) { CatalogTable catalogTable = (CatalogTableImpl) table; if (catalogTable.isPartitioned()) { int partitionKeySize = catalogTable.getPartitionKeys().size(); List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize); List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size()); sd.setCols(regularColumns); hiveTable.setPartitionKeys(partitionColumns); } else { sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); } } else if (table instanceof CatalogViewImpl) { CatalogView view = (CatalogViewImpl) table; // TODO: [FLINK-12398] Support partitioned view in catalog API sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); hiveTable.setViewOriginalText(view.getOriginalQuery()); hiveTable.setViewExpandedText(view.getExpandedQuery()); hiveTable.setTableType(TableType.VIRTUAL_VIEW.name()); } else { throw new CatalogException( "HiveCatalog only supports CatalogTableImpl and CatalogViewImpl"); } return hiveTable; }
Example 14
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 4 votes |
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf) { if (!(table instanceof CatalogTableImpl) && !(table instanceof CatalogViewImpl)) { throw new CatalogException( "HiveCatalog only supports CatalogTableImpl and CatalogViewImpl"); } // let Hive set default parameters for us, e.g. serialization.format Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName()); hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); Map<String, String> properties = new HashMap<>(table.getProperties()); // Table comment if (table.getComment() != null) { properties.put(HiveCatalogConfig.COMMENT, table.getComment()); } boolean isGeneric = HiveCatalog.isGenericForCreate(properties); // Hive table's StorageDescriptor StorageDescriptor sd = hiveTable.getSd(); HiveTableUtil.setDefaultStorageFormat(sd, hiveConf); if (isGeneric) { DescriptorProperties tableSchemaProps = new DescriptorProperties(true); tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema()); if (table instanceof CatalogTable) { tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys()); } properties.putAll(tableSchemaProps.asMap()); properties = maskFlinkProperties(properties); hiveTable.setParameters(properties); } else { HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf); List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema()); // Table columns and partition keys if (table instanceof CatalogTableImpl) { CatalogTable catalogTable = (CatalogTableImpl) table; if (catalogTable.isPartitioned()) { int partitionKeySize = catalogTable.getPartitionKeys().size(); List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize); List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size()); sd.setCols(regularColumns); hiveTable.setPartitionKeys(partitionColumns); } else { sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); } } else { sd.setCols(allColumns); } // Table properties hiveTable.getParameters().putAll(properties); } if (table instanceof CatalogViewImpl) { // TODO: [FLINK-12398] Support partitioned view in catalog API hiveTable.setPartitionKeys(new ArrayList<>()); CatalogView view = (CatalogView) table; hiveTable.setViewOriginalText(view.getOriginalQuery()); hiveTable.setViewExpandedText(view.getExpandedQuery()); hiveTable.setTableType(TableType.VIRTUAL_VIEW.name()); } return hiveTable; }