Java Code Examples for org.apache.hadoop.hive.metastore.api.StorageDescriptor#setCols()

The following examples show how to use org.apache.hadoop.hive.metastore.api.StorageDescriptor#setCols() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckStructTableSchemaTypeInvalid() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
      "col2:dynamo_col2#,hashKey:hashKey");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "struct<bignum:bigint,smallnum:tinyint>", ""));
  cols.add(new FieldSchema("col2", "array<map<string,bigint>>", ""));
  cols.add(new FieldSchema("hashKey", "string", ""));
  sd.setCols(cols);
  table.setSd(sd);

  exceptionRule.expect(MetaException.class);
  exceptionRule.expectMessage("The hive type struct<bignum:bigint,smallnum:tinyint> is not " +
      "supported in DynamoDB");
  storageHandler.checkTableSchemaType(description, table);
}
 
Example 2
Source File: CatalogToHiveConverter.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
public static StorageDescriptor convertStorageDescriptor(com.amazonaws.services.glue.model.StorageDescriptor catalogSd) {
  StorageDescriptor hiveSd = new StorageDescriptor();
  hiveSd.setCols(convertFieldSchemaList(catalogSd.getColumns()));
  hiveSd.setLocation(catalogSd.getLocation());
  hiveSd.setInputFormat(catalogSd.getInputFormat());
  hiveSd.setOutputFormat(catalogSd.getOutputFormat());
  hiveSd.setCompressed(catalogSd.getCompressed());
  hiveSd.setNumBuckets(catalogSd.getNumberOfBuckets());
  hiveSd.setSerdeInfo(convertSerDeInfo(catalogSd.getSerdeInfo()));
  hiveSd.setBucketCols(firstNonNull(catalogSd.getBucketColumns(), Lists.<String>newArrayList()));
  hiveSd.setSortCols(convertOrderList(catalogSd.getSortColumns()));
  hiveSd.setParameters(firstNonNull(catalogSd.getParameters(), Maps.<String, String>newHashMap()));
  hiveSd.setSkewedInfo(convertSkewedInfo(catalogSd.getSkewedInfo()));
  hiveSd.setStoredAsSubDirectories(catalogSd.getStoredAsSubDirectories());

  return hiveSd;
}
 
Example 3
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaMappingMissingColumnMapping() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
   "col2:dynamo_col2#,hashKey:hashKey,hashMap:hashMap");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("hashMap", "map<string,string>", ""));
  sd.setCols(cols);
  table.setSd(sd);

  exceptionRule.expect(MetaException.class);
  exceptionRule.expectMessage("Could not find column(s) for column mapping(s): ");
  exceptionRule.expectMessage("col2:dynamo_col2#");
  exceptionRule.expectMessage("hashkey:hashKey");
  storageHandler.checkTableSchemaMapping(description, table);
}
 
Example 4
Source File: AbstractMetastoreTestWithStaticConfiguration.java    From incubator-sentry with Apache License 2.0 6 votes vote down vote up
public Table makeMetastoreTableObject(HiveMetaStoreClient client,
    String dbName, String tabName, List<FieldSchema> cols) throws Exception {
  Table tbl = new Table();
  tbl.setDbName(dbName);
  tbl.setTableName(tabName);
  StorageDescriptor sd = new StorageDescriptor();
  tbl.setSd(sd);
  tbl.setParameters(new HashMap<String, String>());
  sd.setCols(cols);
  sd.setCompressed(false);
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setName(tbl.getTableName());
  sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  sd.getSerdeInfo().getParameters()
      .put(serdeConstants.SERIALIZATION_FORMAT, "1");
  sd.setSortCols(new ArrayList<Order>());
  return tbl;
}
 
Example 5
Source File: TestUtils.java    From waggle-dance with Apache License 2.0 6 votes vote down vote up
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location)
  throws Exception {

  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  hiveTable.setPartitionKeys(PARTITION_COLUMNS);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toURI().toString());
  sd.setParameters(new HashMap<>());
  sd.setSerdeInfo(new SerDeInfo());

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  return hiveTable;
}
 
Example 6
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaTypeValid() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," +
      "col2:dynamo_col2#,hashKey:hashKey");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("col2", "bigint", ""));
  cols.add(new FieldSchema("hashKey", "string", ""));
  sd.setCols(cols);
  table.setSd(sd);
  // This check is expected to pass for the given input
  storageHandler.checkTableSchemaType(description, table);
}
 
Example 7
Source File: LocalHiveMetastoreTestUtils.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
public Partition addTestPartition(Table tbl, List<String> values, int createTime) throws Exception {
  StorageDescriptor partitionSd = new StorageDescriptor();
  if (StringUtils.isNotBlank(tbl.getSd().getLocation())) {
    partitionSd.setLocation(tbl.getSd().getLocation() + values);
  } else {
    partitionSd.setLocation("/tmp/" + tbl.getTableName() + "/part1");
  }

  partitionSd.setSerdeInfo(
      new SerDeInfo("name", "serializationLib", ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")));
  partitionSd.setCols(tbl.getPartitionKeys());
  Partition partition =
      new Partition(values, tbl.getDbName(), tbl.getTableName(), 1, 1, partitionSd, new HashMap<String, String>());
  partition.setCreateTime(createTime);
  return this.getLocalMetastoreClient().add_partition(partition);

}
 
Example 8
Source File: DynamoDBStorageHandlerTest.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckTableSchemaMappingMissingColumn() throws MetaException {
  TableDescription description = getHashRangeTable();

  Table table = new Table();
  Map<String, String> parameters = Maps.newHashMap();
  parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$,hashMap:hashMap");
  table.setParameters(parameters);
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = Lists.newArrayList();
  cols.add(new FieldSchema("col1", "string", ""));
  cols.add(new FieldSchema("col2", "tinyint", ""));
  cols.add(new FieldSchema("col3", "string", ""));
  cols.add(new FieldSchema("hashMap", "map<string,string>", ""));
  sd.setCols(cols);
  table.setSd(sd);

  exceptionRule.expect(MetaException.class);
  exceptionRule.expectMessage("Could not find column mapping for column: col2");
  storageHandler.checkTableSchemaMapping(description, table);
}
 
Example 9
Source File: AvroStorageDescriptorFactory.java    From data-highway with Apache License 2.0 5 votes vote down vote up
public static StorageDescriptor create(String location) {
  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setInputFormat(AVRO_INPUT_FORMAT);
  storageDescriptor.setOutputFormat(AVRO_OUTPUT_FORMAT);
  storageDescriptor.setLocation(location);
  storageDescriptor.setCols(emptyList());

  SerDeInfo serdeInfo = new SerDeInfo();
  serdeInfo.setSerializationLib(AVRO_SERDE);
  storageDescriptor.setSerdeInfo(serdeInfo);

  return storageDescriptor;
}
 
Example 10
Source File: AvroSchemaManagerTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private Partition getTestPartition(Table table) throws HiveException {
  Partition partition = new Partition(table, ImmutableMap.of("partition_key", "1"), null);
  StorageDescriptor sd = new StorageDescriptor();
  sd.setSerdeInfo(new SerDeInfo("avro", AvroSerDe.class.getName(), null));
  sd.setCols(Lists.newArrayList(new FieldSchema("foo", "int", null)));
  partition.getTPartition().setSd(sd);
  return partition;
}
 
Example 11
Source File: PartitionTransformationTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void init() {
  partition = new Partition();
  partition.setDbName("database");
  partition.setTableName("table");
  partition.setValues(ImmutableList.of("part"));

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  partition.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
  storageDescriptor.setInputFormat("input_format");
  storageDescriptor.setOutputFormat("output_format");
  storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation("database/table/part/");
  partition.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  partition.setParameters(parameters);
}
 
Example 12
Source File: HiveEntityFactory.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static StorageDescriptor newStorageDescriptor(File location, String... columns) {
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = new ArrayList<>(columns.length);
  for (String name : columns) {
    cols.add(newFieldSchema(name));
  }
  sd.setCols(cols);
  sd.setSerdeInfo(new SerDeInfo());
  sd.setLocation(location.toURI().toString());
  return sd;
}
 
Example 13
Source File: TestUtils.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table newTable(String database, String tableName) {
  Table table = new Table();
  table.setDbName(database);
  table.setTableName(tableName);
  table.setTableType(TABLE_TYPE);
  table.setOwner(OWNER);
  table.setCreateTime(CREATE_TIME);
  table.setRetention(RETENTION);

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(COLS);
  storageDescriptor.setInputFormat(INPUT_FORMAT);
  storageDescriptor.setOutputFormat(OUTPUT_FORMAT);
  storageDescriptor.setSerdeInfo(new SerDeInfo(SERDE_INFO_NAME, SERIALIZATION_LIB, new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation(DATABASE + "/" + tableName + "/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);

  return table;
}
 
Example 14
Source File: CircusTrainTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws TException, IOException {
  Table table = new Table();
  table.setDbName(DATABASE);
  table.setTableName("source_" + TABLE);
  table.setTableType(TableType.EXTERNAL_TABLE.name());
  table.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(Arrays.asList(new FieldSchema("col1", "string", null)));
  sd.setSerdeInfo(new SerDeInfo());
  table.setSd(sd);

  hive.client().createTable(table);
}
 
Example 15
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createSourceTable() throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = catalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(SOURCE_TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment source");
  source.setParameters(parameters);

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient
          .add_partitions(Arrays.asList(newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 16
Source File: HiveTableUtil.java    From flink with Apache License 2.0 4 votes vote down vote up
public static void alterColumns(StorageDescriptor sd, CatalogTable catalogTable) {
	List<FieldSchema> allCols = HiveTableUtil.createHiveColumns(catalogTable.getSchema());
	List<FieldSchema> nonPartCols = allCols.subList(0, allCols.size() - catalogTable.getPartitionKeys().size());
	sd.setCols(nonPartCols);
}
 
Example 17
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createReplicaTable() throws Exception {
  File partitionEurope = new File(replicaTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\tuk\n2\tsusan\tglasgow\tuk\n");

  File partitionAsia = new File(replicaTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\tchina\n2\tshanghai\tmilan\titaly\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient replicaClient = catalog.client();

  Table replica = new Table();
  replica.setDbName(DATABASE);
  replica.setTableName(REPLICA_TABLE);
  replica.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment replica");
  replica.setParameters(parameters);
  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  replica.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""),
      new FieldSchema("country", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(replicaTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  replica.setSd(sd);

  replicaClient.createTable(replica);
  LOG.info(">>>> Partitions added: {}",
      +replicaClient.add_partitions(
          Arrays.asList(newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 18
Source File: TestUtils.java    From circus-train with Apache License 2.0 4 votes vote down vote up
public static Table createPartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    URI location,
    List<FieldSchema> columns,
    List<FieldSchema> partitionKeys,
    String serializationLib,
    String inputFormatClassName,
    String outputFormatClassName)
    throws Exception {

  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  hiveTable.setPartitionKeys(partitionKeys);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(columns);
  sd.setLocation(location.toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setInputFormat(inputFormatClassName);
  sd.setOutputFormat(outputFormatClassName);
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setSerializationLib(serializationLib);

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
  ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
  ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
  List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
  metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));

  return hiveTable;
}
 
Example 19
Source File: HiveMetadataFetcherTest.java    From pxf with Apache License 2.0 4 votes vote down vote up
@Test
public void getTableMetadataWithMultipleTables() throws Exception {

    fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper);

    String tablePattern = "*";
    String dbPattern = "*";
    String dbName = "default";
    String tableNameBase = "regulartable";
    String pattern = dbPattern + "." + tablePattern;

    List<String> dbNames = new ArrayList<>(Collections.singletonList(dbName));
    List<String> tableNames = new ArrayList<>();

    // Prepare for tables
    List<FieldSchema> fields = new ArrayList<>();
    fields.add(new FieldSchema("field1", "string", null));
    fields.add(new FieldSchema("field2", "int", null));
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(fields);
    sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");

    // Mock hive tables returned from hive client
    for (int index = 1; index <= 2; index++) {
        String tableName = tableNameBase + index;
        tableNames.add(tableName);
        Table hiveTable = new Table();
        hiveTable.setTableType("MANAGED_TABLE");
        hiveTable.setSd(sd);
        hiveTable.setPartitionKeys(new ArrayList<>());
        when(mockHiveClient.getTable(dbName, tableName)).thenReturn(hiveTable);
    }

    // Mock database and table names return from hive client
    when(mockHiveClient.getDatabases(dbPattern)).thenReturn(dbNames);
    when(mockHiveClient.getTables(dbName, tablePattern)).thenReturn(tableNames);

    // Get metadata
    metadataList = fetcher.getMetadata(pattern);
    assertEquals(2, metadataList.size());

    for (int index = 1; index <= 2; index++) {
        Metadata metadata = metadataList.get(index - 1);
        assertEquals(dbName + "." + tableNameBase + index, metadata.getItem().toString());
        List<Metadata.Field> resultFields = metadata.getFields();
        assertNotNull(resultFields);
        assertEquals(2, resultFields.size());
        Metadata.Field field = resultFields.get(0);
        assertEquals("field1", field.getName());
        assertEquals("text", field.getType().getTypeName()); // converted type
        field = resultFields.get(1);
        assertEquals("field2", field.getName());
        assertEquals("int4", field.getType().getTypeName());
    }
}
 
Example 20
Source File: HiveConvertersImpl.java    From metacat with Apache License 2.0 4 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public Partition metacatToHivePartition(final PartitionDto partitionDto, @Nullable final TableDto tableDto) {
    final Partition result = new Partition();

    final QualifiedName name = partitionDto.getName();
    List<String> values = Lists.newArrayListWithCapacity(16);
    String databaseName = null;
    String tableName = null;
    if (name != null) {
        if (name.getPartitionName() != null) {
            //
            // Unescape the partition name to get the right partition values.
            // Partition name always are escaped where as the parition values are not.
            //
            values = getPartValsFromName(tableDto, name.getPartitionName());
        }

        if (name.getDatabaseName() != null) {
            databaseName = name.getDatabaseName();
        }

        if (name.getTableName() != null) {
            tableName = name.getTableName();
        }
    }
    result.setValues(values);
    result.setDbName(databaseName);
    result.setTableName(tableName);

    Map<String, String> metadata = partitionDto.getMetadata();
    if (metadata == null) {
        metadata = Maps.newHashMap();
    }
    result.setParameters(metadata);

    result.setSd(fromStorageDto(partitionDto.getSerde(), tableName));
    final StorageDescriptor sd = result.getSd();
    if (tableDto != null) {
        if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty(
            sd.getSerdeInfo().getSerializationLib())) {
            sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib());
        }

        final List<FieldDto> fields = tableDto.getFields();
        if (fields == null) {
            sd.setCols(Collections.emptyList());
        } else {
            sd.setCols(fields.stream()
                .filter(field -> !field.isPartition_key())
                .map(this::metacatToHiveField)
                .collect(Collectors.toList()));
        }
    }

    final AuditDto auditDto = partitionDto.getAudit();
    if (auditDto != null) {
        if (auditDto.getCreatedDate() != null) {
            result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate()));
        }
        if (auditDto.getLastModifiedDate() != null) {
            result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate()));
        }
    }

    return result;
}