Java Code Examples for org.apache.hadoop.hive.metastore.api.Database#setName()
The following examples show how to use
org.apache.hadoop.hive.metastore.api.Database#setName() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MetastoreClientDatabaseIntegrationTest.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 6 votes |
@Test public void alterDatabase() throws TException { Map<String, String> parameters = Maps.newHashMap(); parameters.put("param3", "value3"); parameters.put("param4", "value4"); metastoreClient.createDatabase(hiveDB); Database updatedDB = CatalogToHiveConverter.convertDatabase(getTestDatabase()); updatedDB.setName(hiveDB.getName()); updatedDB.setParameters(parameters); metastoreClient.alterDatabase(hiveDB.getName(), updatedDB); Database afterUpdate = metastoreClient.getDatabase(hiveDB.getName()); assertTrue(afterUpdate.getParameters().containsKey("param3")); assertTrue(afterUpdate.getParameters().containsKey("param4")); }
Example 2
Source File: TestHCatalogStore.java From incubator-tajo with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws Exception { Path testPath = CommonTestingUtil.getTestDir(); warehousePath = new Path(testPath, DB_NAME); //create local hiveMeta HiveConf conf = new HiveConf(); String jdbcUri = "jdbc:derby:;databaseName="+testPath.toUri().getPath()+"/metastore_db;create=true"; conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousePath.toUri().toString()); conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, jdbcUri); // create local HCatalogStore. TajoConf tajoConf = new TajoConf(conf); Database db = new Database(); db.setLocationUri(warehousePath.toUri().toString()); db.setName(DB_NAME); pool = new HCatalogStoreClientPool(1, tajoConf); HCatalogStoreClientPool.HCatalogStoreClient client = pool.getClient(); client.getHiveClient().createDatabase(db); client.release(); store = new HCatalogStore(tajoConf, pool); }
Example 3
Source File: CatalogToHiveConverter.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 5 votes |
public static Database convertDatabase(com.amazonaws.services.glue.model.Database catalogDatabase) { Database hiveDatabase = new Database(); hiveDatabase.setName(catalogDatabase.getName()); hiveDatabase.setDescription(catalogDatabase.getDescription()); String location = catalogDatabase.getLocationUri(); hiveDatabase.setLocationUri(location == null ? "" : location); hiveDatabase.setParameters(firstNonNull(catalogDatabase.getParameters(), Maps.<String, String>newHashMap())); return hiveDatabase; }
Example 4
Source File: HiveCatalog.java From iceberg with Apache License 2.0 | 5 votes |
Database convertToDatabase(Namespace namespace, Map<String, String> meta) { String warehouseLocation = conf.get("hive.metastore.warehouse.dir"); if (!isValidateNamespace(namespace)) { throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace); } Database database = new Database(); Map<String, String> parameter = Maps.newHashMap(); database.setName(namespace.level(0)); database.setLocationUri(new Path(warehouseLocation, namespace.level(0)).toString() + ".db"); meta.forEach((key, value) -> { if (key.equals("comment")) { database.setDescription(value); } else if (key.equals("location")) { database.setLocationUri(value); } else { if (value != null) { parameter.put(key, value); } } }); database.setParameters(parameter); return database; }
Example 5
Source File: DatabaseMappingImplTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Before public void setUp() { databaseMapping = new DatabaseMappingImpl(metastoreMapping, queryMapping); database = new Database(); database.setName(DB_NAME); partition = new Partition(); partition.setDbName(DB_NAME); partitions = Lists.newArrayList(partition); index = new Index(); index.setDbName(DB_NAME); hiveObjectRef = new HiveObjectRef(); hiveObjectRef.setDbName(DB_NAME); hiveObjectRef.setObjectType(HiveObjectType.DATABASE); hiveObjectRef.setObjectName(DB_NAME); hiveObjectPrivileges = new ArrayList<>(); HiveObjectPrivilege hiveObjectPrivilege = new HiveObjectPrivilege(); hiveObjectPrivilege.setHiveObject(hiveObjectRef); hiveObjectPrivileges.add(hiveObjectPrivilege); partitionSpec = new PartitionSpec(); partitionSpec.setDbName(DB_NAME); when(metastoreMapping.transformInboundDatabaseName(anyString())).thenReturn(IN_DB_NAME); when(metastoreMapping.transformOutboundDatabaseName(anyString())).thenReturn(OUT_DB_NAME); when(queryMapping.transformOutboundDatabaseName(metastoreMapping, VIEW_EXPANDED_TEXT)) .thenReturn(VIEW_EXPANDED_TEXT_TRANSFORMED); when(queryMapping.transformOutboundDatabaseName(metastoreMapping, VIEW_ORIGINAL_TEXT)) .thenReturn(VIEW_ORIGINAL_TEXT_TRANSFORMED); }
Example 6
Source File: FederatedHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void create_database() throws Exception { Database database = new Database(); database.setName(DB_P); Database inboundDB = new Database(); inboundDB.setName("inbound"); when(primaryMapping.transformInboundDatabase(database)).thenReturn(inboundDB); handler.create_database(database); verify(primaryMapping).createDatabase(inboundDB); }
Example 7
Source File: FederatedHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void get_database() throws Exception { Database database = new Database(); database.setName(DB_P); Database outboundDB = new Database(); outboundDB.setName("outbound"); when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound"); when(primaryClient.get_database("inbound")).thenReturn(database); when(primaryMapping.transformOutboundDatabase(database)).thenReturn(outboundDB); Database result = handler.get_database(DB_P); assertThat(result, is(outboundDB)); }
Example 8
Source File: FederatedHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void alter_database() throws TException { Database database = new Database(); database.setName(DB_P); Database inboundDB = new Database(); inboundDB.setName("inbound"); when(primaryMapping.transformInboundDatabase(database)).thenReturn(inboundDB); when(primaryMapping.transformInboundDatabaseName(DB_P)).thenReturn("inbound"); handler.alter_database(DB_P, database); verify(primaryMapping, times(2)).checkWritePermissions(DB_P); verify(primaryClient).alter_database("inbound", inboundDB); }
Example 9
Source File: HiveConvertersImpl.java From metacat with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override @SuppressWarnings("unchecked") public Database metacatToHiveDatabase(final DatabaseDto dto) { final Database database = new Database(); String name = ""; String description = ""; final QualifiedName databaseName = dto.getName(); if (databaseName != null) { name = databaseName.getDatabaseName(); // Since this is required setting it to the same as the DB name for now description = databaseName.getDatabaseName(); } database.setName(name); database.setDescription(description); String dbUri = dto.getUri(); if (Strings.isNullOrEmpty(dbUri)) { dbUri = ""; } database.setLocationUri(dbUri); Map<String, String> metadata = dto.getMetadata(); if (metadata == null) { metadata = Collections.EMPTY_MAP; } database.setParameters(metadata); return database; }
Example 10
Source File: MetaStoreRestApiTest.java From submarine with Apache License 2.0 | 4 votes |
@Before public void createDatabase() { Database database = new Database(); database.setName("testdb"); database.setDescription("testdb"); database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db"); Map<String, String> map = new HashMap<>(); map.put("key", "value"); database.setParameters(map); database.setOwnerName("root"); database.setOwnerType(PrincipalType.USER); Gson gson = new Gson(); String databaseJson = gson.toJson(database); metaStoreApi.createDatabase(databaseJson); Response databaseCountResponse = metaStoreApi.getDatabaseCount(); assertEquals(databaseCountResponse.getStatus(), Response.Status.OK.getStatusCode()); assertTrue(((String) databaseCountResponse.getEntity()).contains("\"result\":1")); Table table = new Table(); table.setTableName("testtable"); table.setDbName("testdb"); table.setOwner("root"); table.setCreateTime((int) new java.util.Date().getTime() / 1000); table.setLastAccessTime((int) new Date().getTime() / 1000); table.setRetention(0); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> fieldSchemas = new ArrayList<>(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("a"); fieldSchema.setType("int"); fieldSchema.setComment("a"); fieldSchemas.add(fieldSchema); sd.setCols(fieldSchemas); sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable"); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"); sd.setCompressed(false); sd.setNumBuckets(-1); SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setName("test"); serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); Map<String, String> parametersMap = new HashMap<>(); parametersMap.put("serialization.format", "|"); parametersMap.put("field.delim", "|"); serdeInfo.setParameters(parametersMap); sd.setSerdeInfo(serdeInfo); table.setSd(sd); List<FieldSchema> partitionKeys = new ArrayList<>(); table.setPartitionKeys(partitionKeys); Map<String, String> parameters = new HashMap<>(); table.setParameters(parameters); String viewOriginalText = ""; table.setViewOriginalText(viewOriginalText); String viewExpandedText = ""; table.setViewExpandedText(viewExpandedText); String tableType = "MANAGED_TABLE"; table.setTableType(tableType); String tableJson = gson.toJson(table); metaStoreApi.createTable(tableJson); Response tableResponse = metaStoreApi.getTable("testdb", "testtable"); assertEquals(tableResponse.getStatus(), Response.Status.OK.getStatusCode()); assertTrue(((String) tableResponse.getEntity()).contains("\"tableName\":\"testtable\"")); Response tableCountResponse = metaStoreApi.getTableCount(); assertEquals(tableCountResponse.getStatus(), Response.Status.OK.getStatusCode()); assertTrue(((String) tableCountResponse.getEntity()).contains("\"result\":1")); }
Example 11
Source File: SubmarineMetaStoreTest.java From submarine with Apache License 2.0 | 4 votes |
@Before public void createDatabase() throws InvalidObjectException, MetaException { listTables(); Database database = new Database(); database.setName("testdb"); database.setDescription("testdb"); database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db"); Map map = new HashMap(); map.put("key", "value"); database.setParameters(map); database.setOwnerName("root"); database.setOwnerType(PrincipalType.USER); submarineMetaStore.createDatabase(database); assertEquals(1, submarineMetaStore.getDatabaseCount()); Table table = new Table(); table.setTableName("testtable"); table.setDbName("testdb"); table.setOwner("root"); table.setCreateTime((int) new Date().getTime() / 1000); table.setLastAccessTime((int) new Date().getTime() / 1000); table.setRetention(0); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> fieldSchemas = new ArrayList<>(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("a"); fieldSchema.setType("int"); fieldSchema.setComment("a"); fieldSchemas.add(fieldSchema); sd.setCols(fieldSchemas); sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable"); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"); sd.setCompressed(false); sd.setNumBuckets(-1); SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setName("test"); serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); Map<String, String> parametersMap = new HashMap(); parametersMap.put("serialization.format", "|"); parametersMap.put("field.delim", "|"); serdeInfo.setParameters(parametersMap); sd.setSerdeInfo(serdeInfo); table.setSd(sd); List<FieldSchema> partitionKeys = new ArrayList<>(); table.setPartitionKeys(partitionKeys); Map<String, String> parameters = new HashMap<>(); table.setParameters(parameters); String viewOriginalText = ""; table.setViewOriginalText(viewOriginalText); String viewExpandedText = ""; table.setViewExpandedText(viewExpandedText); String tableType = "MANAGED_TABLE"; table.setTableType(tableType); submarineMetaStore.createTable(table); Table tableTest = submarineMetaStore.getTable("testdb", "testtable"); assertEquals("testtable", tableTest.getTableName()); int tableCount = submarineMetaStore.getTableCount(); assertEquals(1, tableCount); }
Example 12
Source File: HiveEntityFactory.java From circus-train with Apache License 2.0 | 4 votes |
public static Database newDatabase(String name, String locationUri) { Database database = new Database(); database.setName(name); database.setLocationUri(locationUri); return database; }
Example 13
Source File: DatabaseMappingImpl.java From waggle-dance with Apache License 2.0 | 4 votes |
@Override public Database transformInboundDatabase(Database database) { database.setName(metaStoreMapping.transformInboundDatabaseName(database.getName())); return database; }
Example 14
Source File: MetaStoreMappingImpl.java From waggle-dance with Apache License 2.0 | 4 votes |
@Override public Database transformOutboundDatabase(Database database) { database.setName(transformOutboundDatabaseName(database.getName())); return database; }
Example 15
Source File: IdentityMappingTest.java From waggle-dance with Apache License 2.0 | 4 votes |
@Before public void setUp() { databaseMapping = new IdentityMapping(metastoreMapping); database = new Database(); database.setName(DB_NAME); }
Example 16
Source File: AbstractMetastoreTestWithStaticConfiguration.java From incubator-sentry with Apache License 2.0 | 4 votes |
public void createMetastoreDB(HiveMetaStoreClient client, String dbName) throws Exception { Database db = new Database(); db.setName(dbName); client.createDatabase(db); }