Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#setCreateTime()

The following examples show how to use org.apache.hadoop.hive.metastore.api.Table#setCreateTime() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CatalogToHiveConverter.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
public static Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname) {
  Table hiveTable = new Table();
  hiveTable.setDbName(dbname);
  hiveTable.setTableName(catalogTable.getName());
  Date createTime = catalogTable.getCreateTime();
  hiveTable.setCreateTime(createTime == null ? 0 : (int) (createTime.getTime() / 1000));
  hiveTable.setOwner(catalogTable.getOwner());
  Date lastAccessedTime = catalogTable.getLastAccessTime();
  hiveTable.setLastAccessTime(lastAccessedTime == null ? 0 : (int) (lastAccessedTime.getTime() / 1000));
  hiveTable.setRetention(catalogTable.getRetention());
  hiveTable.setSd(convertStorageDescriptor(catalogTable.getStorageDescriptor()));
  hiveTable.setPartitionKeys(convertFieldSchemaList(catalogTable.getPartitionKeys()));
  // Hive may throw a NPE during dropTable if the parameter map is null.
  Map<String, String> parameterMap = catalogTable.getParameters();
  if (parameterMap == null) {
    parameterMap = Maps.newHashMap();
  }
  hiveTable.setParameters(parameterMap);
  hiveTable.setViewOriginalText(catalogTable.getViewOriginalText());
  hiveTable.setViewExpandedText(catalogTable.getViewExpandedText());
  hiveTable.setTableType(catalogTable.getTableType());

  return hiveTable;
}
 
Example 2
Source File: TestUtils.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table newTable(String database, String tableName) {
  Table table = new Table();
  table.setDbName(database);
  table.setTableName(tableName);
  table.setTableType(TABLE_TYPE);
  table.setOwner(OWNER);
  table.setCreateTime(CREATE_TIME);
  table.setRetention(RETENTION);

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(COLS);
  storageDescriptor.setInputFormat(INPUT_FORMAT);
  storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
  storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation(DATABASE + "/" + tableName + "/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);

  return table;
}
 
Example 3
Source File: HiveMetaStoreUtils.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * Convert a {@link HiveTable} into a {@link Table}.
 */
public static Table getTable(HiveTable hiveTable) {
  State props = hiveTable.getProps();
  Table table = new Table();
  table.setDbName(hiveTable.getDbName());
  table.setTableName(hiveTable.getTableName());
  table.setParameters(getParameters(props));
  if (hiveTable.getCreateTime().isPresent()) {
    table.setCreateTime(Ints.checkedCast(hiveTable.getCreateTime().get()));
  }
  if (hiveTable.getLastAccessTime().isPresent()) {
    table.setLastAccessTime(Ints.checkedCast(hiveTable.getLastAccessTime().get()));
  }
  if (hiveTable.getOwner().isPresent()) {
    table.setOwner(hiveTable.getOwner().get());
  }
  if (hiveTable.getRetention().isPresent()) {
    table.setRetention(Ints.checkedCast(hiveTable.getRetention().get()));
  }
  if (hiveTable.getTableType().isPresent()) {
    table.setTableType(hiveTable.getTableType().get());
  } else {
    table.setTableType(DEFAULT_TABLE_TYPE.toString());
  }
  if (table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
    table.getParameters().put(EXTERNAL, Boolean.TRUE.toString().toUpperCase());
  }
  table.setPartitionKeys(getFieldSchemas(hiveTable.getPartitionKeys()));
  table.setSd(getStorageDescriptor(hiveTable));
  return table;
}
 
Example 4
Source File: HiveMetaStoreBasedRegister.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/**
 * Sets create time if not already set.
 */
private Table gettableWithCreateTime(Table table, int createTime) {
  if (table.isSetCreateTime() && table.getCreateTime() > 0) {
    return table;
  }
  Table actualtable = table.deepCopy();
  actualtable.setCreateTime(createTime);
  return actualtable;
}
 
Example 5
Source File: MetaStoreRestApiTest.java    From submarine with Apache License 2.0 4 votes vote down vote up
@Before
public void createDatabase() {
  Database database = new Database();
  database.setName("testdb");
  database.setDescription("testdb");
  database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db");
  Map<String, String> map = new HashMap<>();
  map.put("key", "value");
  database.setParameters(map);
  database.setOwnerName("root");
  database.setOwnerType(PrincipalType.USER);

  Gson gson = new Gson();
  String databaseJson = gson.toJson(database);

  metaStoreApi.createDatabase(databaseJson);
  Response databaseCountResponse = metaStoreApi.getDatabaseCount();
  assertEquals(databaseCountResponse.getStatus(), Response.Status.OK.getStatusCode());
  assertTrue(((String) databaseCountResponse.getEntity()).contains("\"result\":1"));

  Table table = new Table();
  table.setTableName("testtable");
  table.setDbName("testdb");
  table.setOwner("root");
  table.setCreateTime((int) new java.util.Date().getTime() / 1000);
  table.setLastAccessTime((int) new Date().getTime() / 1000);
  table.setRetention(0);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> fieldSchemas = new ArrayList<>();
  FieldSchema fieldSchema = new FieldSchema();
  fieldSchema.setName("a");
  fieldSchema.setType("int");
  fieldSchema.setComment("a");
  fieldSchemas.add(fieldSchema);
  sd.setCols(fieldSchemas);
  sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable");
  sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  sd.setCompressed(false);
  sd.setNumBuckets(-1);
  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setName("test");
  serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  Map<String, String> parametersMap = new HashMap<>();
  parametersMap.put("serialization.format", "|");
  parametersMap.put("field.delim", "|");
  serdeInfo.setParameters(parametersMap);
  sd.setSerdeInfo(serdeInfo);
  table.setSd(sd);
  List<FieldSchema> partitionKeys = new ArrayList<>();
  table.setPartitionKeys(partitionKeys);
  Map<String, String> parameters = new HashMap<>();
  table.setParameters(parameters);
  String viewOriginalText = "";
  table.setViewOriginalText(viewOriginalText);
  String viewExpandedText = "";
  table.setViewExpandedText(viewExpandedText);
  String tableType = "MANAGED_TABLE";
  table.setTableType(tableType);

  String tableJson = gson.toJson(table);
  metaStoreApi.createTable(tableJson);

  Response tableResponse = metaStoreApi.getTable("testdb", "testtable");
  assertEquals(tableResponse.getStatus(), Response.Status.OK.getStatusCode());
  assertTrue(((String) tableResponse.getEntity()).contains("\"tableName\":\"testtable\""));
  Response tableCountResponse = metaStoreApi.getTableCount();
  assertEquals(tableCountResponse.getStatus(), Response.Status.OK.getStatusCode());
  assertTrue(((String) tableCountResponse.getEntity()).contains("\"result\":1"));
}
 
Example 6
Source File: SubmarineMetaStoreTest.java    From submarine with Apache License 2.0 4 votes vote down vote up
@Before
public void createDatabase() throws InvalidObjectException, MetaException {
  listTables();

  Database database = new Database();
  database.setName("testdb");
  database.setDescription("testdb");
  database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db");
  Map map = new HashMap();
  map.put("key", "value");
  database.setParameters(map);
  database.setOwnerName("root");
  database.setOwnerType(PrincipalType.USER);
  submarineMetaStore.createDatabase(database);
  assertEquals(1, submarineMetaStore.getDatabaseCount());

  Table table = new Table();
  table.setTableName("testtable");
  table.setDbName("testdb");
  table.setOwner("root");
  table.setCreateTime((int) new Date().getTime() / 1000);
  table.setLastAccessTime((int) new Date().getTime() / 1000);
  table.setRetention(0);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> fieldSchemas = new ArrayList<>();
  FieldSchema fieldSchema = new FieldSchema();
  fieldSchema.setName("a");
  fieldSchema.setType("int");
  fieldSchema.setComment("a");
  fieldSchemas.add(fieldSchema);
  sd.setCols(fieldSchemas);
  sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable");
  sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  sd.setCompressed(false);
  sd.setNumBuckets(-1);
  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setName("test");
  serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  Map<String, String> parametersMap = new HashMap();
  parametersMap.put("serialization.format", "|");
  parametersMap.put("field.delim", "|");
  serdeInfo.setParameters(parametersMap);
  sd.setSerdeInfo(serdeInfo);
  table.setSd(sd);
  List<FieldSchema> partitionKeys = new ArrayList<>();
  table.setPartitionKeys(partitionKeys);
  Map<String, String> parameters = new HashMap<>();
  table.setParameters(parameters);
  String viewOriginalText = "";
  table.setViewOriginalText(viewOriginalText);
  String viewExpandedText = "";
  table.setViewExpandedText(viewExpandedText);
  String tableType = "MANAGED_TABLE";
  table.setTableType(tableType);
  submarineMetaStore.createTable(table);

  Table tableTest = submarineMetaStore.getTable("testdb", "testtable");
  assertEquals("testtable", tableTest.getTableName());
  int tableCount = submarineMetaStore.getTableCount();
  assertEquals(1, tableCount);
}
 
Example 7
Source File: HiveCatalog.java    From flink with Apache License 2.0 4 votes vote down vote up
private  static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table) {
	// let Hive set default parameters for us, e.g. serialization.format
	Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(),
		tablePath.getObjectName());
	hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000));

	Map<String, String> properties = new HashMap<>(table.getProperties());
	// Table comment
	properties.put(HiveCatalogConfig.COMMENT, table.getComment());

	boolean isGeneric = Boolean.valueOf(properties.get(CatalogConfig.IS_GENERIC));

	if (isGeneric) {
		properties = maskFlinkProperties(properties);
	}
	// Table properties
	hiveTable.setParameters(properties);

	// Hive table's StorageDescriptor
	StorageDescriptor sd = hiveTable.getSd();
	setStorageFormat(sd, properties);

	List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema());

	// Table columns and partition keys
	if (table instanceof CatalogTableImpl) {
		CatalogTable catalogTable = (CatalogTableImpl) table;

		if (catalogTable.isPartitioned()) {
			int partitionKeySize = catalogTable.getPartitionKeys().size();
			List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize);
			List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size());

			sd.setCols(regularColumns);
			hiveTable.setPartitionKeys(partitionColumns);
		} else {
			sd.setCols(allColumns);
			hiveTable.setPartitionKeys(new ArrayList<>());
		}
	} else if (table instanceof CatalogViewImpl) {
		CatalogView view = (CatalogViewImpl) table;

		// TODO: [FLINK-12398] Support partitioned view in catalog API
		sd.setCols(allColumns);
		hiveTable.setPartitionKeys(new ArrayList<>());

		hiveTable.setViewOriginalText(view.getOriginalQuery());
		hiveTable.setViewExpandedText(view.getExpandedQuery());
		hiveTable.setTableType(TableType.VIRTUAL_VIEW.name());
	} else {
		throw new CatalogException(
			"HiveCatalog only supports CatalogTableImpl and CatalogViewImpl");
	}

	return hiveTable;
}
 
Example 8
Source File: HiveConvertersImpl.java    From metacat with Apache License 2.0 4 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public Table metacatToHiveTable(final TableDto dto) {
    final Table table = new Table();
    final QualifiedName name = dto.getName();
    if (name != null) {
        table.setTableName(name.getTableName());
        table.setDbName(name.getDatabaseName());
    }

    final StorageDto storageDto = dto.getSerde();
    if (storageDto != null) {
        table.setOwner(storageDto.getOwner());
    }

    final AuditDto auditDto = dto.getAudit();
    if (auditDto != null && auditDto.getCreatedDate() != null) {
        table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
    }

    Map<String, String> params = new HashMap<>();
    if (dto.getMetadata() != null) {
        params = dto.getMetadata();
    }
    table.setParameters(params);
    updateTableTypeAndViewInfo(dto, table);

    table.setSd(fromStorageDto(storageDto, table.getTableName()));

    final List<FieldDto> fields = dto.getFields();
    if (fields == null) {
        table.setPartitionKeys(Collections.emptyList());
        table.getSd().setCols(Collections.emptyList());
    } else {
        final List<FieldSchema> nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
        final List<FieldSchema> partitionFields = Lists.newArrayListWithCapacity(fields.size());
        for (FieldDto fieldDto : fields) {
            final FieldSchema f = metacatToHiveField(fieldDto);

            if (fieldDto.isPartition_key()) {
                partitionFields.add(f);
            } else {
                nonPartitionFields.add(f);
            }
        }
        table.setPartitionKeys(partitionFields);
        table.getSd().setCols(nonPartitionFields);
    }
    return table;
}
 
Example 9
Source File: HiveTableUtil.java    From flink with Apache License 2.0 4 votes vote down vote up
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf) {
	if (!(table instanceof CatalogTableImpl) && !(table instanceof CatalogViewImpl)) {
		throw new CatalogException(
				"HiveCatalog only supports CatalogTableImpl and CatalogViewImpl");
	}
	// let Hive set default parameters for us, e.g. serialization.format
	Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(),
			tablePath.getObjectName());
	hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000));

	Map<String, String> properties = new HashMap<>(table.getProperties());
	// Table comment
	if (table.getComment() != null) {
		properties.put(HiveCatalogConfig.COMMENT, table.getComment());
	}

	boolean isGeneric = HiveCatalog.isGenericForCreate(properties);

	// Hive table's StorageDescriptor
	StorageDescriptor sd = hiveTable.getSd();
	HiveTableUtil.setDefaultStorageFormat(sd, hiveConf);

	if (isGeneric) {
		DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
		tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema());

		if (table instanceof CatalogTable) {
			tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys());
		}

		properties.putAll(tableSchemaProps.asMap());
		properties = maskFlinkProperties(properties);
		hiveTable.setParameters(properties);
	} else {
		HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf);
		List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema());
		// Table columns and partition keys
		if (table instanceof CatalogTableImpl) {
			CatalogTable catalogTable = (CatalogTableImpl) table;

			if (catalogTable.isPartitioned()) {
				int partitionKeySize = catalogTable.getPartitionKeys().size();
				List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize);
				List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size());

				sd.setCols(regularColumns);
				hiveTable.setPartitionKeys(partitionColumns);
			} else {
				sd.setCols(allColumns);
				hiveTable.setPartitionKeys(new ArrayList<>());
			}
		} else {
			sd.setCols(allColumns);
		}
		// Table properties
		hiveTable.getParameters().putAll(properties);
	}

	if (table instanceof CatalogViewImpl) {
		// TODO: [FLINK-12398] Support partitioned view in catalog API
		hiveTable.setPartitionKeys(new ArrayList<>());

		CatalogView view = (CatalogView) table;
		hiveTable.setViewOriginalText(view.getOriginalQuery());
		hiveTable.setViewExpandedText(view.getExpandedQuery());
		hiveTable.setTableType(TableType.VIRTUAL_VIEW.name());
	}

	return hiveTable;
}