org.apache.parquet.column.ParquetProperties.WriterVersion Java Examples

The following examples show how to use org.apache.parquet.column.ParquetProperties.WriterVersion. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ApacheParquet.java    From sylph with Apache License 2.0 7 votes vote down vote up
private ApacheParquet(String outputPath, MessageType schema, WriterVersion writerVersion)
        throws IOException
{
    this.schema = schema;
    this.outputPath = outputPath;

    Configuration configuration = new Configuration();
    GroupWriteSupport.setSchema(schema, configuration);

    this.writer = ExampleParquetWriter.builder(new Path(outputPath))
            .withType(schema)
            .withConf(configuration)
            .withPageSize(DEFAULT_PAGE_SIZE)
            .withDictionaryPageSize(DEFAULT_PAGE_SIZE)
            .withDictionaryEncoding(DEFAULT_IS_DICTIONARY_ENABLED)
            .withValidation(DEFAULT_IS_VALIDATING_ENABLED)
            .withWriterVersion(writerVersion)
            .withRowGroupSize(DEFAULT_BLOCK_SIZE) // set Parquet file block size and page size values
            .withCompressionCodec(CompressionCodecName.UNCOMPRESSED) //压缩类型
            .build();

    this.groupFactory = new SimpleGroupFactory(this.schema);
}
 
Example #2
Source File: ParquetPartition.java    From entrada with GNU General Public License v3.0 6 votes vote down vote up
public ParquetPartition(String partition, Schema schema) {

    Configuration conf = new Configuration();
    Path file =
        new Path(partition + System.getProperty("file.separator") + UUID.randomUUID() + ".parquet");
    filename = file.toString();

    log.info("Create new parquet file: {}", filename);

    try {
      Files.createDirectories(Paths.get(partition));

      writer = AvroParquetWriter
          .<T>builder(file)
          .enableDictionaryEncoding()
          .withCompressionCodec(CompressionCodecName.SNAPPY)
          .withConf(conf)
          .withWriterVersion(WriterVersion.PARQUET_1_0)
          .withSchema(schema)
          .withRowGroupSize(ROWGROUP_SIZE)
          .build();
    } catch (IOException e) {
      throw new ApplicationException("Cannot create a Parquet parition", e);
    }
  }
 
Example #3
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
public void testColumnWiseDictionaryWithFalseDefault() {
  ValuesWriterFactory factory = getDefaultFactory(WriterVersion.PARQUET_2_0, false,
      "binary_dict",
      "boolean_dict",
      "float_dict",
      "int32_dict");
  validateFactory(factory, BINARY, "binary_dict",
      PlainBinaryDictionaryValuesWriter.class, DeltaByteArrayWriter.class);
  validateFactory(factory, BINARY, "binary_no_dict",
      DeltaByteArrayWriter.class);
  validateFactory(factory, BOOLEAN, "boolean_dict",
      RunLengthBitPackingHybridValuesWriter.class);
  validateFactory(factory, BOOLEAN, "boolean_no_dict",
      RunLengthBitPackingHybridValuesWriter.class);
  validateFactory(factory, FLOAT, "float_dict",
      PlainFloatDictionaryValuesWriter.class, PlainValuesWriter.class);
  validateFactory(factory, FLOAT, "float_no_dict",
      PlainValuesWriter.class);
  validateFactory(factory, INT32, "int32_dict",
      PlainIntegerDictionaryValuesWriter.class, DeltaBinaryPackingValuesWriter.class);
  validateFactory(factory, INT32, "int32_no_dict",
      DeltaBinaryPackingValuesWriter.class);
}
 
Example #4
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testColumnWiseDictionaryWithTrueDefault() {
  ValuesWriterFactory factory = getDefaultFactory(WriterVersion.PARQUET_2_0, true,
      "binary_no_dict",
      "boolean_no_dict",
      "float_no_dict",
      "int32_no_dict");
  validateFactory(factory, BINARY, "binary_dict",
      PlainBinaryDictionaryValuesWriter.class, DeltaByteArrayWriter.class);
  validateFactory(factory, BINARY, "binary_no_dict",
      DeltaByteArrayWriter.class);
  validateFactory(factory, BOOLEAN, "boolean_dict",
      RunLengthBitPackingHybridValuesWriter.class);
  validateFactory(factory, BOOLEAN, "boolean_no_dict",
      RunLengthBitPackingHybridValuesWriter.class);
  validateFactory(factory, FLOAT, "float_dict",
      PlainFloatDictionaryValuesWriter.class, PlainValuesWriter.class);
  validateFactory(factory, FLOAT, "float_no_dict",
      PlainValuesWriter.class);
  validateFactory(factory, INT32, "int32_dict",
      PlainIntegerDictionaryValuesWriter.class, DeltaBinaryPackingValuesWriter.class);
  validateFactory(factory, INT32, "int32_no_dict",
      DeltaBinaryPackingValuesWriter.class);
}
 
Example #5
Source File: FileEncodingsIT.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testFileEncodingsWithDictionary() throws Exception {
  final boolean ENABLE_DICTIONARY = true;
  List<?> dictionaryValues = generateDictionaryValues(this.paramTypeName, RECORD_COUNT);

  /* Run an encoding test per each writer version.
   * This loop will make sure to test future writer versions added to WriterVersion enum.
   */
  for (WriterVersion writerVersion : WriterVersion.values()) {
    System.out.println(String.format("Testing %s/%s/%s + DICTIONARY encodings using ROW_GROUP_SIZE=%d PAGE_SIZE=%d",
        writerVersion, this.paramTypeName, this.compression, TEST_ROW_GROUP_SIZE, TEST_PAGE_SIZE));

    Path parquetFile = createTempFile();
    writeValuesToFile(parquetFile, this.paramTypeName, dictionaryValues, TEST_ROW_GROUP_SIZE, TEST_PAGE_SIZE, ENABLE_DICTIONARY, writerVersion);
    PageGroupValidator.validatePages(parquetFile, dictionaryValues);
  }
}
 
Example #6
Source File: FileEncodingsIT.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testFileEncodingsWithoutDictionary() throws Exception {
  final boolean DISABLE_DICTIONARY = false;
  List<?> randomValues; randomValues = generateRandomValues(this.paramTypeName, RECORD_COUNT);

  /* Run an encoding test per each writer version.
   * This loop will make sure to test future writer versions added to WriterVersion enum.
   */
  for (WriterVersion writerVersion : WriterVersion.values()) {
    System.out.println(String.format("Testing %s/%s/%s encodings using ROW_GROUP_SIZE=%d PAGE_SIZE=%d",
        writerVersion, this.paramTypeName, this.compression, TEST_ROW_GROUP_SIZE, TEST_PAGE_SIZE));

    Path parquetFile = createTempFile();
    writeValuesToFile(parquetFile, this.paramTypeName, randomValues, TEST_ROW_GROUP_SIZE, TEST_PAGE_SIZE, DISABLE_DICTIONARY, writerVersion);
    PageGroupValidator.validatePages(parquetFile, randomValues);
  }
}
 
Example #7
Source File: TestColumnIndexFiltering.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createFile() throws IOException {
  int pageSize = DATA.size() / 10;     // Ensure that several pages will be created
  int rowGroupSize = pageSize * 6 * 5; // Ensure that there are more row-groups created
  PhoneBookWriter.write(ExampleParquetWriter.builder(FILE_V1)
      .withWriteMode(OVERWRITE)
      .withRowGroupSize(rowGroupSize)
      .withPageSize(pageSize)
      .withWriterVersion(WriterVersion.PARQUET_1_0),
      DATA);
  PhoneBookWriter.write(ExampleParquetWriter.builder(FILE_V2)
      .withWriteMode(OVERWRITE)
      .withRowGroupSize(rowGroupSize)
      .withPageSize(pageSize)
      .withWriterVersion(WriterVersion.PARQUET_2_0),
      DATA);
}
 
Example #8
Source File: ParquetWriter.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
/**
 * Create a new ParquetWriter.
 *
 * @param file the file to create
 * @param mode file creation mode
 * @param writeSupport the implementation to write a record to a RecordConsumer
 * @param compressionCodecName the compression codec to use
 * @param blockSize the block size threshold
 * @param pageSize the page size threshold
 * @param dictionaryPageSize the page size threshold for the dictionary pages
 * @param enableDictionary to turn dictionary encoding on
 * @param validating to turn on validation using the schema
 * @param writerVersion version of parquetWriter from {@link ParquetProperties.WriterVersion}
 * @param conf Hadoop configuration to use while accessing the filesystem
 * @throws IOException if there is an error while writing
 * @deprecated will be removed in 2.0.0
 */
@Deprecated
public ParquetWriter(
    Path file,
    ParquetFileWriter.Mode mode,
    WriteSupport<T> writeSupport,
    CompressionCodecName compressionCodecName,
    int blockSize,
    int pageSize,
    int dictionaryPageSize,
    boolean enableDictionary,
    boolean validating,
    WriterVersion writerVersion,
    Configuration conf) throws IOException {
  this(HadoopOutputFile.fromPath(file, conf),
      mode, writeSupport, compressionCodecName, blockSize,
      validating, conf, MAX_PADDING_SIZE_DEFAULT,
      ParquetProperties.builder()
          .withPageSize(pageSize)
          .withDictionaryPageSize(dictionaryPageSize)
          .withDictionaryEncoding(enableDictionary)
          .withWriterVersion(writerVersion)
          .build());
}
 
Example #9
Source File: ParquetWriter.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
/**
 * Create a new ParquetWriter.
 *
 * @param file the file to create
 * @param writeSupport the implementation to write a record to a RecordConsumer
 * @param compressionCodecName the compression codec to use
 * @param blockSize the block size threshold
 * @param pageSize the page size threshold
 * @param dictionaryPageSize the page size threshold for the dictionary pages
 * @param enableDictionary to turn dictionary encoding on
 * @param validating to turn on validation using the schema
 * @param writerVersion version of parquetWriter from {@link ParquetProperties.WriterVersion}
 * @param conf Hadoop configuration to use while accessing the filesystem
 * @throws IOException if there is an error while writing
 * @deprecated will be removed in 2.0.0
 */
@Deprecated
public ParquetWriter(
    Path file,
    WriteSupport<T> writeSupport,
    CompressionCodecName compressionCodecName,
    int blockSize,
    int pageSize,
    int dictionaryPageSize,
    boolean enableDictionary,
    boolean validating,
    WriterVersion writerVersion,
    Configuration conf) throws IOException {
  this(file, ParquetFileWriter.Mode.CREATE, writeSupport,
      compressionCodecName, blockSize, pageSize, dictionaryPageSize,
      enableDictionary, validating, writerVersion, conf);
}
 
Example #10
Source File: ParquetRecordWriter.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
/**
 *
 * @param w the file to write to
 * @param writeSupport the class to convert incoming records
 * @param schema the schema of the records
 * @param extraMetaData extra meta data to write in the footer of the file
 * @param blockSize the size of a block in the file (this will be approximate)
 * @param pageSize the size of a page in the file (this will be approximate)
 * @param compressor the compressor used to compress the pages
 * @param dictionaryPageSize the threshold for dictionary size
 * @param enableDictionary to enable the dictionary
 * @param validating if schema validation should be turned on
 * @param writerVersion writer compatibility version
 * @param memoryManager memory manager for the write
 */
@Deprecated
public ParquetRecordWriter(
    ParquetFileWriter w,
    WriteSupport<T> writeSupport,
    MessageType schema,
    Map<String, String> extraMetaData,
    long blockSize, int pageSize,
    BytesCompressor compressor,
    int dictionaryPageSize,
    boolean enableDictionary,
    boolean validating,
    WriterVersion writerVersion,
    MemoryManager memoryManager) {
  ParquetProperties props = ParquetProperties.builder()
      .withPageSize(pageSize)
      .withDictionaryPageSize(dictionaryPageSize)
      .withDictionaryEncoding(enableDictionary)
      .withWriterVersion(writerVersion)
      .build();
  internalWriter = new InternalParquetRecordWriter<T>(w, writeSupport, schema,
      extraMetaData, blockSize, compressor, validating, props);
  this.memoryManager = Objects.requireNonNull(memoryManager, "memoryManager cannot be null");
  memoryManager.addWriter(internalWriter, blockSize);
  this.codecFactory = null;
}
 
Example #11
Source File: DictionaryFilterTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
public DictionaryFilterTest(WriterVersion version) {
  this.version = version;
  switch (version) {
  case PARQUET_1_0:
    file = FILE_V1;
    break;
  case PARQUET_2_0:
    file = FILE_V2;
    break;
  }
}
 
Example #12
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testDouble_V2_WithByteStreamSplit() {
  doTestValueWriter(
    PrimitiveTypeName.DOUBLE,
    WriterVersion.PARQUET_2_0,
    false,
    true,
    ByteStreamSplitValuesWriter.class);
}
 
Example #13
Source File: DictionaryFilterTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private static void prepareFile(WriterVersion version, Path file) throws IOException {
  GroupWriteSupport.setSchema(schema, conf);
  SimpleGroupFactory f = new SimpleGroupFactory(schema);
  ParquetWriter<Group> writer = ExampleParquetWriter.builder(file)
      .withWriterVersion(version)
      .withCompressionCodec(GZIP)
      .withRowGroupSize(1024*1024)
      .withPageSize(1024)
      .enableDictionaryEncoding()
      .withDictionaryPageSize(2*1024)
      .withConf(conf)
      .build();
  writeData(f, writer);
}
 
Example #14
Source File: FileEncodingsIT.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
/**
 * Writes a set of values to a parquet file.
 * The ParquetWriter will write the values with dictionary encoding disabled so that we test specific encodings for
 */
private void writeValuesToFile(Path file, PrimitiveTypeName type, List<?> values, int rowGroupSize, int pageSize, boolean enableDictionary, WriterVersion version) throws IOException {
  MessageType schema;
  if (type == PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
    schema = Types.buildMessage().required(type).length(FIXED_LENGTH).named("field").named("test");
  } else {
    schema = Types.buildMessage().required(type).named("field").named("test");
  }

  SimpleGroupFactory message = new SimpleGroupFactory(schema);
  GroupWriteSupport.setSchema(schema, configuration);

  ParquetWriter<Group> writer = ExampleParquetWriter.builder(file)
      .withCompressionCodec(compression)
      .withRowGroupSize(rowGroupSize)
      .withPageSize(pageSize)
      .withDictionaryPageSize(TEST_DICT_PAGE_SIZE)
      .withDictionaryEncoding(enableDictionary)
      .withWriterVersion(version)
      .withConf(configuration)
      .build();

  for (Object o: values) {
    switch (type) {
      case BOOLEAN:
        writer.write(message.newGroup().append("field", (Boolean)o));
      break;
      case INT32:
        writer.write(message.newGroup().append("field", (Integer)o));
      break;
      case INT64:
        writer.write(message.newGroup().append("field", (Long)o));
      break;
      case FLOAT:
        writer.write(message.newGroup().append("field", (Float)o));
      break;
      case DOUBLE:
        writer.write(message.newGroup().append("field", (Double)o));
      break;
      case INT96:
      case BINARY:
      case FIXED_LEN_BYTE_ARRAY:
        writer.write(message.newGroup().append("field", (Binary)o));
      break;
      default:
        throw new IllegalArgumentException("Unknown type name: " + type);
    }
  }

  writer.close();
}
 
Example #15
Source File: ParquetRecordWriter.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
/**
 *
 * @param w the file to write to
 * @param writeSupport the class to convert incoming records
 * @param schema the schema of the records
 * @param extraMetaData extra meta data to write in the footer of the file
 * @param blockSize the size of a block in the file (this will be approximate)
 * @param pageSize the size of a page in the file (this will be approximate)
 * @param compressor the compressor used to compress the pages
 * @param dictionaryPageSize the threshold for dictionary size
 * @param enableDictionary to enable the dictionary
 * @param validating if schema validation should be turned on
 * @param writerVersion writer compatibility version
 */
@Deprecated
public ParquetRecordWriter(
    ParquetFileWriter w,
    WriteSupport<T> writeSupport,
    MessageType schema,
    Map<String, String> extraMetaData,
    int blockSize, int pageSize,
    BytesCompressor compressor,
    int dictionaryPageSize,
    boolean enableDictionary,
    boolean validating,
    WriterVersion writerVersion) {
  ParquetProperties props = ParquetProperties.builder()
      .withPageSize(pageSize)
      .withDictionaryPageSize(dictionaryPageSize)
      .withDictionaryEncoding(enableDictionary)
      .withWriterVersion(writerVersion)
      .build();
  internalWriter = new InternalParquetRecordWriter<T>(w, writeSupport, schema,
      extraMetaData, blockSize, compressor, validating, props);
  this.memoryManager = null;
  this.codecFactory = null;
}
 
Example #16
Source File: ParquetFileAccessor.java    From pxf with Apache License 2.0 5 votes vote down vote up
/**
 * Opens the resource for write.
 * Uses compression codec based on user input which
 * defaults to Snappy
 *
 * @return true if the resource is successfully opened
 * @throws IOException if opening the resource failed
 */
@Override
public boolean openForWrite() throws IOException {

    HcfsType hcfsType = HcfsType.getHcfsType(configuration, context);
    // skip codec extension in filePrefix, because we add it in this accessor
    filePrefix = hcfsType.getUriForWrite(configuration, context, true);
    String compressCodec = context.getOption("COMPRESSION_CODEC");
    codecName = codecFactory.getCodec(compressCodec, DEFAULT_COMPRESSION);

    // Options for parquet write
    pageSize = context.getOption("PAGE_SIZE", DEFAULT_PAGE_SIZE);
    rowGroupSize = context.getOption("ROWGROUP_SIZE", DEFAULT_ROWGROUP_SIZE);
    dictionarySize = context.getOption("DICTIONARY_PAGE_SIZE", DEFAULT_DICTIONARY_PAGE_SIZE);
    String parquetVerStr = context.getOption("PARQUET_VERSION");
    parquetVersion = parquetVerStr != null ? WriterVersion.fromString(parquetVerStr.toLowerCase()) : DEFAULT_PARQUET_VERSION;
    LOG.debug("{}-{}: Parquet options: PAGE_SIZE = {}, ROWGROUP_SIZE = {}, DICTIONARY_PAGE_SIZE = {}, PARQUET_VERSION = {}",
            context.getTransactionId(), context.getSegmentId(), pageSize, rowGroupSize, dictionarySize, parquetVersion);

    // Read schema file, if given
    String schemaFile = context.getOption("SCHEMA");
    MessageType schema = (schemaFile != null) ? readSchemaFile(schemaFile) :
            generateParquetSchema(context.getTupleDescription());
    LOG.debug("{}-{}: Schema fields = {}", context.getTransactionId(),
            context.getSegmentId(), schema.getFields());
    GroupWriteSupport.setSchema(schema, configuration);
    groupWriteSupport = new GroupWriteSupport();

    // We get the parquet schema and set it to the metadata in the request context
    // to avoid computing the schema again in the Resolver
    context.setMetadata(schema);
    createParquetWriter();
    return true;
}
 
Example #17
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testDouble_V2_WithByteStreamSplitAndDictionary() {
  doTestValueWriter(
    PrimitiveTypeName.DOUBLE,
    WriterVersion.PARQUET_2_0,
    true,
    true,
    PlainDoubleDictionaryValuesWriter.class, ByteStreamSplitValuesWriter.class);
}
 
Example #18
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private void doTestValueWriter(PrimitiveTypeName typeName, WriterVersion version, boolean enableDictionary, boolean enableByteStreamSplit, Class<? extends ValuesWriter> expectedValueWriterClass) {
  ColumnDescriptor mockPath = createColumnDescriptor(typeName);
  ValuesWriterFactory factory = getDefaultFactory(version, enableDictionary, enableByteStreamSplit);
  ValuesWriter writer = factory.newValuesWriter(mockPath);

  validateWriterType(writer, expectedValueWriterClass);
}
 
Example #19
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFloat_V2_WithByteStreamSplitAndDictionary() {
  doTestValueWriter(
    PrimitiveTypeName.FLOAT,
    WriterVersion.PARQUET_2_0,
    true,
    true,
    PlainFloatDictionaryValuesWriter.class, ByteStreamSplitValuesWriter.class);
}
 
Example #20
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private ValuesWriterFactory getDefaultFactory(WriterVersion writerVersion, boolean enableDictionary, boolean enableByteStreamSplit) {
  ValuesWriterFactory factory = new DefaultValuesWriterFactory();
  ParquetProperties.builder()
    .withDictionaryEncoding(enableDictionary)
    .withByteStreamSplitEncoding(enableByteStreamSplit)
    .withWriterVersion(writerVersion)
    .withValuesWriterFactory(factory)
    .build();

  return factory;
}
 
Example #21
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private ValuesWriterFactory getDefaultFactory(WriterVersion writerVersion, boolean dictEnabledDefault, String... dictInverseColumns) {
  ValuesWriterFactory factory = new DefaultValuesWriterFactory();
  ParquetProperties.Builder builder = ParquetProperties.builder()
      .withDictionaryEncoding(dictEnabledDefault)
      .withWriterVersion(writerVersion)
      .withValuesWriterFactory(factory);
  for (String column : dictInverseColumns) {
    builder.withDictionaryEncoding(column, !dictEnabledDefault);
  }
  builder.build();

  return factory;
}
 
Example #22
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testDouble_V1_WithByteStreamSplitAndDictionary() {
  doTestValueWriter(
    PrimitiveTypeName.DOUBLE,
    WriterVersion.PARQUET_1_0,
    true,
    true,
    PlainDoubleDictionaryValuesWriter.class, ByteStreamSplitValuesWriter.class);
}
 
Example #23
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
public void testFloat_V1_WithByteStreamSplitAndDictionary() {
  doTestValueWriter(
    PrimitiveTypeName.FLOAT,
    WriterVersion.PARQUET_1_0,
    true,
    true,
    PlainFloatDictionaryValuesWriter.class, ByteStreamSplitValuesWriter.class);
}
 
Example #24
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private void doTestValueWriter(PrimitiveTypeName typeName, WriterVersion version, boolean enableDictionary, boolean enableByteStreamSplit, Class<? extends ValuesWriter> initialValueWriterClass, Class<? extends ValuesWriter> fallbackValueWriterClass) {
  ColumnDescriptor mockPath = createColumnDescriptor(typeName);
  ValuesWriterFactory factory = getDefaultFactory(version, enableDictionary, enableByteStreamSplit);
  ValuesWriter writer = factory.newValuesWriter(mockPath);

  validateFallbackWriter(writer, initialValueWriterClass, fallbackValueWriterClass);
}
 
Example #25
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFloat_V2_WithByteStreamSplit() {
  doTestValueWriter(
    PrimitiveTypeName.FLOAT,
    WriterVersion.PARQUET_2_0,
    false,
    true,
    ByteStreamSplitValuesWriter.class);
}
 
Example #26
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testDouble_V1_WithByteStreamSplit() {
  doTestValueWriter(
    PrimitiveTypeName.DOUBLE,
    WriterVersion.PARQUET_1_0,
    false,
    true,
    ByteStreamSplitValuesWriter.class);
}
 
Example #27
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFloat_V1_WithByteStreamSplit() {
 doTestValueWriter(
   PrimitiveTypeName.FLOAT,
   WriterVersion.PARQUET_1_0,
   false,
   true,
   ByteStreamSplitValuesWriter.class);
}
 
Example #28
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFloat_V2_NoDict() {
  doTestValueWriter(
    PrimitiveTypeName.FLOAT,
    WriterVersion.PARQUET_2_0,
    false,
    false,
    PlainValuesWriter.class);
}
 
Example #29
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFloat_V2() {
  doTestValueWriter(
    PrimitiveTypeName.FLOAT,
    WriterVersion.PARQUET_2_0,
    true,
    false,
    PlainFloatDictionaryValuesWriter.class, PlainValuesWriter.class);
}
 
Example #30
Source File: DefaultValuesWriterFactoryTest.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFloat_NoDict() {
  doTestValueWriter(
    PrimitiveTypeName.FLOAT,
    WriterVersion.PARQUET_1_0,
    false,
    false,
    PlainValuesWriter.class);
}