Java Code Examples for org.apache.hadoop.hive.metastore.TableType#VIRTUAL_VIEW

The following examples show how to use org.apache.hadoop.hive.metastore.TableType#VIRTUAL_VIEW . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveClientWrapper.java    From pxf with Apache License 2.0 5 votes vote down vote up
public Table getHiveTable(IMetaStoreClient client, Metadata.Item itemName) throws Exception {
    Table tbl = client.getTable(itemName.getPath(), itemName.getName());
    String tblType = tbl.getTableType();

    LOG.debug("Item: {}.{}, type: {}", itemName.getPath(), itemName.getName(), tblType);

    if (TableType.valueOf(tblType) == TableType.VIRTUAL_VIEW) {
        throw new UnsupportedOperationException("Hive views are not supported by PXF");
    }

    return tbl;
}
 
Example 2
Source File: HiveWorkUnit.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * Automatically serializes the {@link HiveDataset} by calling {@link #setHiveDataset(HiveDataset)}
 * @param hiveDataset for which the workunit is being created
 */
@SuppressWarnings("deprecation")
public HiveWorkUnit(HiveDataset hiveDataset) {
  super();
  setHiveDataset(hiveDataset);
  if (hiveDataset.getTable().getTableType() != TableType.VIRTUAL_VIEW) {
    setTableLocation(hiveDataset.getTable().getSd().getLocation());
  }
}
 
Example 3
Source File: HiveCatalog.java    From flink with Apache License 2.0 4 votes vote down vote up
private static CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) {
	boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW;

	// Table properties
	Map<String, String> properties = hiveTable.getParameters();

	boolean isGeneric = Boolean.valueOf(properties.get(CatalogConfig.IS_GENERIC));
	if (isGeneric) {
		properties = retrieveFlinkProperties(properties);
	}
	String comment = properties.remove(HiveCatalogConfig.COMMENT);

	// Table schema
	List<FieldSchema> fields;
	if (org.apache.hadoop.hive.ql.metadata.Table.hasMetastoreBasedSchema(hiveConf,
			hiveTable.getSd().getSerdeInfo().getSerializationLib())) {
		// get schema from metastore
		fields = hiveTable.getSd().getCols();
	} else {
		// get schema from deserializer
		try {
			fields = MetaStoreUtils.getFieldsFromDeserializer(hiveTable.getTableName(),
					MetaStoreUtils.getDeserializer(hiveConf, hiveTable, true));
		} catch (SerDeException | MetaException e) {
			throw new CatalogException("Failed to get Hive table schema from deserializer", e);
		}
	}
	TableSchema tableSchema =
		HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys());

	// Partition keys
	List<String> partitionKeys = new ArrayList<>();
	if (!hiveTable.getPartitionKeys().isEmpty()) {
		partitionKeys = getFieldNames(hiveTable.getPartitionKeys());
	}

	if (isView) {
		return new CatalogViewImpl(
				hiveTable.getViewOriginalText(),
				hiveTable.getViewExpandedText(),
				tableSchema,
				properties,
				comment);
	} else {
		return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment);
	}
}
 
Example 4
Source File: HiveCatalog.java    From flink with Apache License 2.0 4 votes vote down vote up
private CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) {
	boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW;

	// Table properties
	Map<String, String> properties = hiveTable.getParameters();

	boolean isGeneric = isGenericForGet(hiveTable.getParameters());

	TableSchema tableSchema;
	// Partition keys
	List<String> partitionKeys = new ArrayList<>();

	if (isGeneric) {
		properties = retrieveFlinkProperties(properties);
		DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
		tableSchemaProps.putProperties(properties);
		ObjectPath tablePath = new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName());
		tableSchema = tableSchemaProps.getOptionalTableSchema(Schema.SCHEMA)
				.orElseThrow(() -> new CatalogException("Failed to get table schema from properties for generic table " + tablePath));
		partitionKeys = tableSchemaProps.getPartitionKeys();
		// remove the schema from properties
		properties = CatalogTableImpl.removeRedundant(properties, tableSchema, partitionKeys);
	} else {
		properties.put(CatalogConfig.IS_GENERIC, String.valueOf(false));
		// Table schema
		List<FieldSchema> fields = getNonPartitionFields(hiveConf, hiveTable);
		Set<String> notNullColumns = client.getNotNullColumns(hiveConf, hiveTable.getDbName(), hiveTable.getTableName());
		Optional<UniqueConstraint> primaryKey = isView ? Optional.empty() :
				client.getPrimaryKey(hiveTable.getDbName(), hiveTable.getTableName(), HiveTableUtil.relyConstraint((byte) 0));
		// PK columns cannot be null
		primaryKey.ifPresent(pk -> notNullColumns.addAll(pk.getColumns()));
		tableSchema = HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys(), notNullColumns, primaryKey.orElse(null));

		if (!hiveTable.getPartitionKeys().isEmpty()) {
			partitionKeys = getFieldNames(hiveTable.getPartitionKeys());
		}
	}

	String comment = properties.remove(HiveCatalogConfig.COMMENT);

	if (isView) {
		return new CatalogViewImpl(
				hiveTable.getViewOriginalText(),
				hiveTable.getViewExpandedText(),
				tableSchema,
				properties,
				comment);
	} else {
		return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment);
	}
}