Java Code Examples for org.apache.hadoop.hive.ql.io.parquet.convert.HiveSchemaConverter#convert()

The following examples show how to use org.apache.hadoop.hive.ql.io.parquet.convert.HiveSchemaConverter#convert() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ExaParquetWriterImpl.java    From hadoop-etl-udfs with MIT License 6 votes vote down vote up
public ExaParquetWriterImpl(final List<String> colNames,
                            final List<TypeInfo> colTypes,
                            final Configuration conf,
                            final Path path,
                            final String compressionType,
                            final ExaIterator exa,
                            final int firstColumnIndex,
                            final List<Integer> dynamicPartitionExaColNums) throws Exception {
    this(HiveSchemaConverter.convert(colNames, colTypes),
            colNames.size(),
            conf,
            path,
            compressionType,
            exa,
            firstColumnIndex,
            dynamicPartitionExaColNums);
}
 
Example 2
Source File: ExaParquetWriterImpl.java    From hadoop-etl-udfs with MIT License 6 votes vote down vote up
public ExaParquetWriterImpl(final List<String> colNames,
                            final List<TypeInfo> colTypes,
                            final Configuration conf,
                            final Path path,
                            final String compressionType,
                            final ExaIterator exa,
                            final int firstColumnIndex,
                            final List<Integer> dynamicPartitionExaColNums) throws Exception {

    this(HiveSchemaConverter.convert(colNames, colTypes),
            colNames.size(),
            conf,
            path,
            compressionType,
            exa,
            firstColumnIndex,
            dynamicPartitionExaColNums);
}
 
Example 3
Source File: TestHiveSchemaConverter.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testMapOriginalType() throws Exception {
  final String hiveColumnTypes = "map<string,string>";
  final String hiveColumnNames = "mapCol";
  final List<String> columnNames = createHiveColumnsFrom(hiveColumnNames);
  final List<TypeInfo> columnTypes = createHiveTypeInfoFrom(hiveColumnTypes);
  final MessageType messageTypeFound = HiveSchemaConverter.convert(columnNames, columnTypes);
  // this messageType only has one optional field, whose name is mapCol, original Type is MAP
  assertEquals(1, messageTypeFound.getFieldCount());
  org.apache.parquet.schema.Type topLevel = messageTypeFound.getFields().get(0);
  assertEquals("mapCol",topLevel.getName());
  assertEquals(OriginalType.MAP, topLevel.getOriginalType());
  assertEquals(Repetition.OPTIONAL, topLevel.getRepetition());

  assertEquals(1, topLevel.asGroupType().getFieldCount());
  org.apache.parquet.schema.Type secondLevel = topLevel.asGroupType().getFields().get(0);
  //there is one repeated field for mapCol, the field name is "map" and its original Type is MAP_KEY_VALUE;
  assertEquals("map", secondLevel.getName());
  assertEquals(OriginalType.MAP_KEY_VALUE, secondLevel.getOriginalType());
  assertEquals(Repetition.REPEATED, secondLevel.getRepetition());
}
 
Example 4
Source File: ParquetRecordWriterUtil.java    From presto with Apache License 2.0 5 votes vote down vote up
private static RecordWriter createParquetWriter(Path target, JobConf conf, Properties properties)
        throws IOException
{
    if (conf.get(DataWritableWriteSupport.PARQUET_HIVE_SCHEMA) == null) {
        List<String> columnNames = Splitter.on(',').splitToList(properties.getProperty(IOConstants.COLUMNS));
        List<TypeInfo> columnTypes = getTypeInfosFromTypeString(properties.getProperty(IOConstants.COLUMNS_TYPES));
        MessageType schema = HiveSchemaConverter.convert(columnNames, columnTypes);
        setParquetSchema(conf, schema);
    }

    ParquetOutputFormat<ParquetHiveRecord> outputFormat = new ParquetOutputFormat<>(new DataWritableWriteSupport());

    return new ParquetRecordWriterWrapper(outputFormat, conf, target.toString(), Reporter.NULL, properties);
}
 
Example 5
Source File: TestHiveSchemaConverter.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private void testConversion(final String columnNamesStr, final String columnsTypeStr, final String expectedSchema) throws Exception {
  final List<String> columnNames = createHiveColumnsFrom(columnNamesStr);
  final List<TypeInfo> columnTypes = createHiveTypeInfoFrom(columnsTypeStr);
  final MessageType messageTypeFound = HiveSchemaConverter.convert(columnNames, columnTypes);
  final MessageType expectedMT = MessageTypeParser.parseMessageType(expectedSchema);
  assertEquals("converting " + columnNamesStr + ": " + columnsTypeStr + " to " + expectedSchema, expectedMT, messageTypeFound);
}