Java Code Examples for org.apache.hadoop.hive.metastore.api.Database#setLocationUri()

The following examples show how to use org.apache.hadoop.hive.metastore.api.Database#setLocationUri() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHCatalogStore.java    From incubator-tajo with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  Path testPath = CommonTestingUtil.getTestDir();
  warehousePath = new Path(testPath, DB_NAME);

  //create local hiveMeta
  HiveConf conf = new HiveConf();
  String jdbcUri = "jdbc:derby:;databaseName="+testPath.toUri().getPath()+"/metastore_db;create=true";
  conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousePath.toUri().toString());
  conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, jdbcUri);

  // create local HCatalogStore.
  TajoConf tajoConf = new TajoConf(conf);
  Database db = new Database();
  db.setLocationUri(warehousePath.toUri().toString());
  db.setName(DB_NAME);
  pool = new HCatalogStoreClientPool(1, tajoConf);
  HCatalogStoreClientPool.HCatalogStoreClient client = pool.getClient();
  client.getHiveClient().createDatabase(db);
  client.release();

  store = new HCatalogStore(tajoConf, pool);
}
 
Example 2
Source File: CatalogToHiveConverter.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 5 votes vote down vote up
public static Database convertDatabase(com.amazonaws.services.glue.model.Database catalogDatabase) {
  Database hiveDatabase = new Database();
  hiveDatabase.setName(catalogDatabase.getName());
  hiveDatabase.setDescription(catalogDatabase.getDescription());
  String location = catalogDatabase.getLocationUri();
  hiveDatabase.setLocationUri(location == null ? "" : location);
  hiveDatabase.setParameters(firstNonNull(catalogDatabase.getParameters(), Maps.<String, String>newHashMap()));
  return hiveDatabase;
}
 
Example 3
Source File: HiveCatalog.java    From iceberg with Apache License 2.0 5 votes vote down vote up
Database convertToDatabase(Namespace namespace, Map<String, String> meta) {
  String warehouseLocation = conf.get("hive.metastore.warehouse.dir");

  if (!isValidateNamespace(namespace)) {
    throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
  }

  Database database  = new Database();
  Map<String, String> parameter = Maps.newHashMap();

  database.setName(namespace.level(0));
  database.setLocationUri(new Path(warehouseLocation, namespace.level(0)).toString() + ".db");

  meta.forEach((key, value) -> {
    if (key.equals("comment")) {
      database.setDescription(value);
    } else if (key.equals("location")) {
      database.setLocationUri(value);
    } else {
      if (value != null) {
        parameter.put(key, value);
      }
    }
  });
  database.setParameters(parameter);

  return database;
}
 
Example 4
Source File: HiveConvertersImpl.java    From metacat with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
@SuppressWarnings("unchecked")
public Database metacatToHiveDatabase(final DatabaseDto dto) {
    final Database database = new Database();

    String name = "";
    String description = "";
    final QualifiedName databaseName = dto.getName();
    if (databaseName != null) {
        name = databaseName.getDatabaseName();
        // Since this is required setting it to the same as the DB name for now
        description = databaseName.getDatabaseName();
    }
    database.setName(name);
    database.setDescription(description);

    String dbUri = dto.getUri();
    if (Strings.isNullOrEmpty(dbUri)) {
        dbUri = "";
    }
    database.setLocationUri(dbUri);

    Map<String, String> metadata = dto.getMetadata();
    if (metadata == null) {
        metadata = Collections.EMPTY_MAP;
    }
    database.setParameters(metadata);

    return database;
}
 
Example 5
Source File: MetaStoreRestApiTest.java    From submarine with Apache License 2.0 4 votes vote down vote up
@Before
public void createDatabase() {
  Database database = new Database();
  database.setName("testdb");
  database.setDescription("testdb");
  database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db");
  Map<String, String> map = new HashMap<>();
  map.put("key", "value");
  database.setParameters(map);
  database.setOwnerName("root");
  database.setOwnerType(PrincipalType.USER);

  Gson gson = new Gson();
  String databaseJson = gson.toJson(database);

  metaStoreApi.createDatabase(databaseJson);
  Response databaseCountResponse = metaStoreApi.getDatabaseCount();
  assertEquals(databaseCountResponse.getStatus(), Response.Status.OK.getStatusCode());
  assertTrue(((String) databaseCountResponse.getEntity()).contains("\"result\":1"));

  Table table = new Table();
  table.setTableName("testtable");
  table.setDbName("testdb");
  table.setOwner("root");
  table.setCreateTime((int) new java.util.Date().getTime() / 1000);
  table.setLastAccessTime((int) new Date().getTime() / 1000);
  table.setRetention(0);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> fieldSchemas = new ArrayList<>();
  FieldSchema fieldSchema = new FieldSchema();
  fieldSchema.setName("a");
  fieldSchema.setType("int");
  fieldSchema.setComment("a");
  fieldSchemas.add(fieldSchema);
  sd.setCols(fieldSchemas);
  sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable");
  sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  sd.setCompressed(false);
  sd.setNumBuckets(-1);
  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setName("test");
  serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  Map<String, String> parametersMap = new HashMap<>();
  parametersMap.put("serialization.format", "|");
  parametersMap.put("field.delim", "|");
  serdeInfo.setParameters(parametersMap);
  sd.setSerdeInfo(serdeInfo);
  table.setSd(sd);
  List<FieldSchema> partitionKeys = new ArrayList<>();
  table.setPartitionKeys(partitionKeys);
  Map<String, String> parameters = new HashMap<>();
  table.setParameters(parameters);
  String viewOriginalText = "";
  table.setViewOriginalText(viewOriginalText);
  String viewExpandedText = "";
  table.setViewExpandedText(viewExpandedText);
  String tableType = "MANAGED_TABLE";
  table.setTableType(tableType);

  String tableJson = gson.toJson(table);
  metaStoreApi.createTable(tableJson);

  Response tableResponse = metaStoreApi.getTable("testdb", "testtable");
  assertEquals(tableResponse.getStatus(), Response.Status.OK.getStatusCode());
  assertTrue(((String) tableResponse.getEntity()).contains("\"tableName\":\"testtable\""));
  Response tableCountResponse = metaStoreApi.getTableCount();
  assertEquals(tableCountResponse.getStatus(), Response.Status.OK.getStatusCode());
  assertTrue(((String) tableCountResponse.getEntity()).contains("\"result\":1"));
}
 
Example 6
Source File: SubmarineMetaStoreTest.java    From submarine with Apache License 2.0 4 votes vote down vote up
@Before
public void createDatabase() throws InvalidObjectException, MetaException {
  listTables();

  Database database = new Database();
  database.setName("testdb");
  database.setDescription("testdb");
  database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db");
  Map map = new HashMap();
  map.put("key", "value");
  database.setParameters(map);
  database.setOwnerName("root");
  database.setOwnerType(PrincipalType.USER);
  submarineMetaStore.createDatabase(database);
  assertEquals(1, submarineMetaStore.getDatabaseCount());

  Table table = new Table();
  table.setTableName("testtable");
  table.setDbName("testdb");
  table.setOwner("root");
  table.setCreateTime((int) new Date().getTime() / 1000);
  table.setLastAccessTime((int) new Date().getTime() / 1000);
  table.setRetention(0);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> fieldSchemas = new ArrayList<>();
  FieldSchema fieldSchema = new FieldSchema();
  fieldSchema.setName("a");
  fieldSchema.setType("int");
  fieldSchema.setComment("a");
  fieldSchemas.add(fieldSchema);
  sd.setCols(fieldSchemas);
  sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable");
  sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
  sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat");
  sd.setCompressed(false);
  sd.setNumBuckets(-1);
  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setName("test");
  serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
  Map<String, String> parametersMap = new HashMap();
  parametersMap.put("serialization.format", "|");
  parametersMap.put("field.delim", "|");
  serdeInfo.setParameters(parametersMap);
  sd.setSerdeInfo(serdeInfo);
  table.setSd(sd);
  List<FieldSchema> partitionKeys = new ArrayList<>();
  table.setPartitionKeys(partitionKeys);
  Map<String, String> parameters = new HashMap<>();
  table.setParameters(parameters);
  String viewOriginalText = "";
  table.setViewOriginalText(viewOriginalText);
  String viewExpandedText = "";
  table.setViewExpandedText(viewExpandedText);
  String tableType = "MANAGED_TABLE";
  table.setTableType(tableType);
  submarineMetaStore.createTable(table);

  Table tableTest = submarineMetaStore.getTable("testdb", "testtable");
  assertEquals("testtable", tableTest.getTableName());
  int tableCount = submarineMetaStore.getTableCount();
  assertEquals(1, tableCount);
}
 
Example 7
Source File: HiveEntityFactory.java    From circus-train with Apache License 2.0 4 votes vote down vote up
public static Database newDatabase(String name, String locationUri) {
  Database database = new Database();
  database.setName(name);
  database.setLocationUri(locationUri);
  return database;
}
 
Example 8
Source File: HiveDatabaseUtil.java    From flink with Apache License 2.0 4 votes vote down vote up
static Database alterDatabase(Database hiveDB, CatalogDatabase newDatabase) {
	Map<String, String> params = hiveDB.getParameters();
	boolean isGeneric = isGenericForGet(params);
	if (isGeneric) {
		// altering generic DB doesn't merge properties, see CatalogTest::testAlterDb
		hiveDB.setParameters(newDatabase.getProperties());
	} else {
		String opStr = newDatabase.getProperties().remove(ALTER_DATABASE_OP);
		if (opStr == null) {
			throw new CatalogException(ALTER_DATABASE_OP + " property is missing for alter database statement");
		}
		String newLocation = newDatabase.getProperties().remove(SqlCreateHiveDatabase.DATABASE_LOCATION_URI);
		Map<String, String> newParams = newDatabase.getProperties();
		SqlAlterHiveDatabase.AlterHiveDatabaseOp op = SqlAlterHiveDatabase.AlterHiveDatabaseOp.valueOf(opStr);
		switch (op) {
			case CHANGE_PROPS:
				if (params == null) {
					hiveDB.setParameters(newParams);
				} else {
					params.putAll(newParams);
				}
				break;
			case CHANGE_LOCATION:
				hiveDB.setLocationUri(newLocation);
				break;
			case CHANGE_OWNER:
				String ownerName = newParams.remove(DATABASE_OWNER_NAME);
				String ownerType = newParams.remove(DATABASE_OWNER_TYPE);
				hiveDB.setOwnerName(ownerName);
				switch (ownerType) {
					case SqlAlterHiveDatabaseOwner.ROLE_OWNER:
						hiveDB.setOwnerType(PrincipalType.ROLE);
						break;
					case SqlAlterHiveDatabaseOwner.USER_OWNER:
						hiveDB.setOwnerType(PrincipalType.USER);
						break;
					default:
						throw new CatalogException("Unsupported database owner type: " + ownerType);
				}
				break;
			default:
				throw new CatalogException("Unsupported alter database op:" + opStr);
		}
	}
	return hiveDB;
}