Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#setTableType()

The following examples show how to use org.apache.hadoop.hive.metastore.api.Table#setTableType() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestUtils.java    From waggle-dance with Apache License 2.0 6 votes vote down vote up
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
  throws Exception {

  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  hiveTable.setPartitionKeys(PARTITION_COLUMNS);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toURI().toString());
  sd.setParameters(new HashMap<>());
  sd.setSerdeInfo(new SerDeInfo());

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  return hiveTable;
}
 
Example 2
Source File: ReplicaTest.java    From circus-train with Apache License 2.0 6 votes vote down vote up
private Table newTable() {
  Table table = new Table();
  table.setDbName(DB_NAME);
  table.setTableName(TABLE_NAME);
  table.setTableType(TableType.EXTERNAL_TABLE.name());

  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(tableLocation);
  table.setSd(sd);

  HashMap<String, String> parameters = new HashMap<>();
  parameters.put(StatsSetupConst.ROW_COUNT, "1");
  table.setParameters(parameters);

  table.setPartitionKeys(PARTITIONS);
  return table;
}
 
Example 3
Source File: TestUtils.java    From waggle-dance with Apache License 2.0 6 votes vote down vote up
static Table createUnpartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    File location)
  throws TException {
  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toURI().toString());
  sd.setParameters(new HashMap<>());
  sd.setSerdeInfo(new SerDeInfo());

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  return hiveTable;
}
 
Example 4
Source File: TestUtils.java    From circus-train with Apache License 2.0 6 votes vote down vote up
private static Table createView(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String view,
    String table,
    List<FieldSchema> partitionCols)
  throws TException {
  Table hiveView = new Table();
  hiveView.setDbName(database);
  hiveView.setTableName(view);
  hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
  hiveView.setViewOriginalText(hql(database, table));
  hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols));
  hiveView.setPartitionKeys(partitionCols);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());
  hiveView.setSd(sd);

  metaStoreClient.createTable(hiveView);

  return hiveView;
}
 
Example 5
Source File: MetastoreClientTableIntegrationTest.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
@Test
public void alterTableValid() throws Exception {
  //TODO: add test for alter Table cascade.
  // if change is related with column and cascade is turned on, it will also change table's partition
  String newType = "boolean";
  Table newHiveTable = CatalogToHiveConverter.convertTable(getTestTable(), hiveTable.getDbName());

  // changing table name is not supported
  newHiveTable.setTableName(hiveTable.getTableName());

  Path oldDBPath = new Path(hiveDB.getLocationUri());
  Path oldTablePath = new Path(hiveTable.getSd().getLocation());
  Path newTablePath = new Path(oldDBPath, newHiveTable.getTableName());

  when(wh.getDatabasePath(hiveDB)).thenReturn(oldDBPath);
  when(wh.getFs(oldTablePath)).thenReturn(new RawLocalFileSystem());
  when(wh.getFs(newTablePath)).thenReturn(new RawLocalFileSystem());

  newHiveTable.setTableType(newType);
  metastoreClient.createTable(hiveTable);

  metastoreClient.alter_table(newHiveTable.getDbName(), hiveTable.getTableName(), newHiveTable);
  Table result = metastoreClient.getTable(hiveDB.getName(), newHiveTable.getTableName());

  assertEquals(newType, result.getTableType());
}
 
Example 6
Source File: CatalogToHiveConverter.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
public static Table convertTable(com.amazonaws.services.glue.model.Table catalogTable, String dbname) {
  Table hiveTable = new Table();
  hiveTable.setDbName(dbname);
  hiveTable.setTableName(catalogTable.getName());
  Date createTime = catalogTable.getCreateTime();
  hiveTable.setCreateTime(createTime == null ? 0 : (int) (createTime.getTime() / 1000));
  hiveTable.setOwner(catalogTable.getOwner());
  Date lastAccessedTime = catalogTable.getLastAccessTime();
  hiveTable.setLastAccessTime(lastAccessedTime == null ? 0 : (int) (lastAccessedTime.getTime() / 1000));
  hiveTable.setRetention(catalogTable.getRetention());
  hiveTable.setSd(convertStorageDescriptor(catalogTable.getStorageDescriptor()));
  hiveTable.setPartitionKeys(convertFieldSchemaList(catalogTable.getPartitionKeys()));
  // Hive may throw a NPE during dropTable if the parameter map is null.
  Map<String, String> parameterMap = catalogTable.getParameters();
  if (parameterMap == null) {
    parameterMap = Maps.newHashMap();
  }
  hiveTable.setParameters(parameterMap);
  hiveTable.setViewOriginalText(catalogTable.getViewOriginalText());
  hiveTable.setViewExpandedText(catalogTable.getViewExpandedText());
  hiveTable.setTableType(catalogTable.getTableType());

  return hiveTable;
}
 
Example 7
Source File: AvroHiveTableStrategy.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@Override
public Table newHiveTable(
    String databaseName,
    String tableName,
    String partitionColumnName,
    String location,
    Schema schema,
    int version) {

  Table table = new Table();
  table.setDbName(databaseName);
  table.setTableName(tableName);

  table.setTableType(TableType.EXTERNAL_TABLE.toString());
  table.putToParameters("EXTERNAL", "TRUE");
  addRoadAnnotations(table);

  URI schemaUri = uriResolver.resolve(schema, table.getTableName(), version);
  table.putToParameters(AVRO_SCHEMA_URL, schemaUri.toString());
  table.putToParameters(AVRO_SCHEMA_VERSION, Integer.toString(version));
  table.setPartitionKeys(Arrays.asList(new FieldSchema(partitionColumnName, "string", null)));

  table.setSd(AvroStorageDescriptorFactory.create(location));

  return table;
}
 
Example 8
Source File: HiveMetadataFetcherTest.java    From pxf with Apache License 2.0 5 votes vote down vote up
@Test
public void getTableMetadataView() throws Exception {
    expectedException.expect(UnsupportedOperationException.class);
    expectedException.expectMessage("Hive views are not supported by PXF");

    String tableName = "cause";
    fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper);

    // mock hive table returned from hive client
    Table hiveTable = new Table();
    hiveTable.setTableType("VIRTUAL_VIEW");
    when(mockHiveClient.getTable("default", tableName)).thenReturn(hiveTable);

    metadataList = fetcher.getMetadata(tableName);
}
 
Example 9
Source File: HiveTableUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
private static void extractExternal(Table hiveTable, Map<String, String> properties) {
	boolean external = Boolean.parseBoolean(properties.remove(TABLE_IS_EXTERNAL));
	if (external) {
		hiveTable.setTableType(TableType.EXTERNAL_TABLE.toString());
		// follow Hive to set this property
		hiveTable.getParameters().put("EXTERNAL", "TRUE");
	}
}
 
Example 10
Source File: TestUtils.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table newTable(String database, String tableName) {
  Table table = new Table();
  table.setDbName(database);
  table.setTableName(tableName);
  table.setTableType(TABLE_TYPE);
  table.setOwner(OWNER);
  table.setCreateTime(CREATE_TIME);
  table.setRetention(RETENTION);

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(COLS);
  storageDescriptor.setInputFormat(INPUT_FORMAT);
  storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
  storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation(DATABASE + "/" + tableName + "/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);

  return table;
}
 
Example 11
Source File: ConvertibleHiveDatasetTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public static Table getTestTable(String dbName, String tableName) {
  Table table = new Table();
  table.setDbName(dbName);
  table.setTableName(tableName);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation("/tmp/test");
  table.setSd(sd);
  return table;
}
 
Example 12
Source File: HiveEntityFactory.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table newTable(String name, String dbName, List<FieldSchema> partitionKeys, StorageDescriptor sd) {
  Table table = new Table();
  table.setTableName(name);
  table.setDbName(dbName);
  table.setSd(sd);
  table.setPartitionKeys(partitionKeys);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.setParameters(new HashMap<String, String>());
  return table;
}
 
Example 13
Source File: FilterToolIntegrationTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private void createTable(File sourceTableUri) throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = sourceCatalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  source.setParameters(new HashMap<String, String>());

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "tinyint", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient.add_partitions(Arrays.asList(newPartition(sd, Arrays.asList("2000-01-01", "0"), partitionUk),
          newPartition(sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 14
Source File: ReplicaTableFactory.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private void setReplicaTableType(Table source, Table replica) {
  if (TableType.VIRTUAL_VIEW.name().equals(source.getTableType())) {
    replica.setTableType(TableType.VIRTUAL_VIEW.name());
    return;
  }
  // We set the table to external no matter what. We don't want to delete data accidentally when dropping a mirrored
  // table.
  replica.setTableType(TableType.EXTERNAL_TABLE.name());
  replica.putToParameters(EXTERNAL, "TRUE");
}
 
Example 15
Source File: CircusTrainTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws TException, IOException {
  Table table = new Table();
  table.setDbName(DATABASE);
  table.setTableName("source_" + TABLE);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(Arrays.asList(new FieldSchema("col1", "string", null)));
  sd.setSerdeInfo(new SerDeInfo());
  table.setSd(sd);

  hive.client().createTable(table);
}
 
Example 16
Source File: TestManagedExternalHandling.java    From kite with Apache License 2.0 4 votes vote down vote up
@Test
public void testRepositoryList() throws Exception {
  // create unreadable hive tables
  MetaStoreUtil metastore = MetaStoreUtil.get(new Configuration());
  metastore.dropTable("default", "bad_type");
  metastore.dropTable("bad", "bad_serde");
  metastore.dropTable("bad", "bad_schema");

  Table badType = HiveUtils.createEmptyTable("default", "bad_type");
  badType.setTableType(TableType.VIRTUAL_VIEW.toString());
  metastore.createTable(badType);

  Table badSerDe = HiveUtils.createEmptyTable("bad", "bad_serde");
  badSerDe.setTableType(TableType.MANAGED_TABLE.toString()); // readable type
  badSerDe.getSd().getSerdeInfo().setSerializationLib("com.example.ExampleHiveSerDe");
  metastore.createTable(badSerDe);

  // add a bad schema if decimal is supported (not supported by Kite)
  if (HiveSchemaConverter.decimalClass != null) {
    Table badSchema = HiveUtils.createEmptyTable("bad", "bad_schema");
    badSchema.setTableType(TableType.MANAGED_TABLE.toString()); // readable type
    badSchema.getSd().getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.avro.AvroSerDe");
    badSchema.getSd().setInputFormat("org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat");
    badSchema.getSd().setOutputFormat("org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat");
    badSchema.getSd().getCols().add(new FieldSchema("invalid", "decimal(1,2)", null));
    metastore.createTable(badSchema);
  }

  // note that unreadable tables are not in the lists
  Set<String> expectedNamespaces = Sets.newHashSet("default", "ns");
  Assert.assertEquals("Managed should list namespaces with external and managed tables",
      expectedNamespaces, Sets.newHashSet(managed.namespaces()));
  Assert.assertEquals("External should list namespaces with external and managed tables",
      expectedNamespaces, Sets.newHashSet(external.namespaces()));

  Set<String> expectedInDefault = Sets.newHashSet("managed");
  Assert.assertEquals("Managed should list external and managed tables",
      expectedInDefault, Sets.newHashSet(managed.datasets("default")));
  Assert.assertEquals("External should list external and managed tables",
      expectedInDefault, Sets.newHashSet(external.datasets("default")));

  Set<String> expectedInNS = Sets.newHashSet("external");
  Assert.assertEquals("Managed should list external and managed tables",
      expectedInNS, Sets.newHashSet(managed.datasets("ns")));
  Assert.assertEquals("External should list external and managed tables",
      expectedInNS, Sets.newHashSet(external.datasets("ns")));
}
 
Example 17
Source File: HiveDifferencesIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createTable(
    String databaseName,
    String tableName,
    File tableLocation,
    String sourceTable,
    String sourceLocation,
    boolean addChecksum)
  throws Exception {
  File partition0 = createPartitionData("part=0", tableLocation, Arrays.asList("1\tadam", "2\tsusan"));
  File partition1 = createPartitionData("part=1", tableLocation, Arrays.asList("3\tchun", "4\tkim"));

  Table table = new Table();
  table.setDbName(databaseName);
  table.setTableName(tableName);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.setParameters(new HashMap<String, String>());
  if (sourceTable != null) {
    table.getParameters().put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), sourceTable);
  }
  if (sourceLocation != null) {
    table.getParameters().put(CircusTrainTableParameter.SOURCE_LOCATION.parameterName(), sourceLocation);
  }

  List<FieldSchema> partitionColumns = Arrays.asList(PARTITION_COL);
  table.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(FOO_COL, BAR_COL);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(tableLocation.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  table.setSd(sd);

  HiveMetaStoreClient client = catalog.client();
  client.createTable(table);
  LOG
      .info(">>>> Partitions added: {}",
          +client
              .add_partitions(Arrays
                  .asList(
                      newPartition(databaseName, tableName, sd, Arrays.asList("0"), partition0, sourceTable,
                          sourceLocation + "part=0", addChecksum),
                      newPartition(databaseName, tableName, sd, Arrays.asList("1"), partition1, sourceTable,
                          sourceLocation + "part=1", addChecksum))));
}
 
Example 18
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createReplicaTable() throws Exception {
  File partitionEurope = new File(replicaTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\tuk\n2\tsusan\tglasgow\tuk\n");

  File partitionAsia = new File(replicaTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\tchina\n2\tshanghai\tmilan\titaly\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient replicaClient = catalog.client();

  Table replica = new Table();
  replica.setDbName(DATABASE);
  replica.setTableName(REPLICA_TABLE);
  replica.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment replica");
  replica.setParameters(parameters);
  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  replica.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""),
      new FieldSchema("country", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(replicaTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  replica.setSd(sd);

  replicaClient.createTable(replica);
  LOG.info(">>>> Partitions added: {}",
      +replicaClient.add_partitions(
          Arrays.asList(newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 19
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createSourceTable() throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = catalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(SOURCE_TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment source");
  source.setParameters(parameters);

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient
          .add_partitions(Arrays.asList(newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 20
Source File: HiveUtils.java    From kite with Apache License 2.0 4 votes vote down vote up
static Table tableForDescriptor(String namespace, String name,
                                DatasetDescriptor descriptor,
                                boolean external,
                                boolean includeSchema) {
  final Table table = createEmptyTable(namespace, name);

  if (external) {
    // you'd think this would do it...
    table.setTableType(TableType.EXTERNAL_TABLE.toString());
    // but it doesn't work without some additional magic:
    table.getParameters().put("EXTERNAL", "TRUE");
    table.getSd().setLocation(descriptor.getLocation().toString());
  } else {
    table.setTableType(TableType.MANAGED_TABLE.toString());
  }

  addPropertiesForDescriptor(table, descriptor);

  // translate from Format to SerDe
  final Format format = descriptor.getFormat();
  if (FORMAT_TO_SERDE.containsKey(format)) {
    table.getSd().getSerdeInfo().setSerializationLib(FORMAT_TO_SERDE.get(format));
    table.getSd().setInputFormat(FORMAT_TO_INPUT_FORMAT.get(format));
    table.getSd().setOutputFormat(FORMAT_TO_OUTPUT_FORMAT.get(format));
  } else {
    throw new UnknownFormatException(
        "No known serde for format:" + format.getName());
  }

  if (includeSchema) {
    URL schemaURL = descriptor.getSchemaUrl();
    if (useSchemaURL(schemaURL)) {
      table.getParameters().put(
          AVRO_SCHEMA_URL_PROPERTY_NAME,
          descriptor.getSchemaUrl().toExternalForm());
    } else {
      table.getParameters().put(
          AVRO_SCHEMA_LITERAL_PROPERTY_NAME,
          descriptor.getSchema().toString());
    }
  }

  table.getParameters().put(COMPRESSION_TYPE_PROPERTY_NAME,
      descriptor.getCompressionType().getName());

  // convert the schema to Hive columns
  table.getSd().setCols(HiveSchemaConverter.convertSchema(descriptor.getSchema()));

  // copy partitioning info
  if (descriptor.isPartitioned()) {
    PartitionStrategy ps = descriptor.getPartitionStrategy();
    table.getParameters().put(PARTITION_EXPRESSION_PROPERTY_NAME,
        Accessor.getDefault().toExpression(ps));
    table.setPartitionKeys(partitionColumns(ps, descriptor.getSchema()));
  }

  return table;
}