org.apache.parquet.example.data.simple.convert.GroupRecordConverter Java Examples

The following examples show how to use org.apache.parquet.example.data.simple.convert.GroupRecordConverter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ParquetResolverTest.java    From pxf with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation")
private List<Group> readParquetFile(String file, long expectedSize, MessageType schema) throws IOException {
    List<Group> result = new ArrayList<>();
    String parquetFile = Objects.requireNonNull(getClass().getClassLoader().getResource("parquet/" + file)).getPath();
    Path path = new Path(parquetFile);

    ParquetFileReader fileReader = new ParquetFileReader(new Configuration(), path, ParquetMetadataConverter.NO_FILTER);
    PageReadStore rowGroup;
    while ((rowGroup = fileReader.readNextRowGroup()) != null) {
        MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema);
        RecordReader<Group> recordReader = columnIO.getRecordReader(rowGroup, new GroupRecordConverter(schema));
        long rowCount = rowGroup.getRowCount();
        for (long i = 0; i < rowCount; i++) {
            result.add(recordReader.read());
        }
    }
    fileReader.close();
    assertEquals(expectedSize, result.size());
    return result;
}
 
Example #2
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testFilteredNotPaged() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 8);

  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(not(column("DocId", equalTo(10l)))));

  List<Group> all = readAll(recordReader);
  assertEquals("expecting 8 records " + all, 8, all.size());
  for (int i = 0; i < all.size(); i++) {
    assertEquals("expecting record2", r2.toString(), all.get(i).toString());
  }
}
 
Example #3
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testFilteredOrPaged() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 8);

  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(or(column("DocId", equalTo(10l)),
              column("DocId", equalTo(20l)))));

  List<Group> all = readAll(recordReader);
  assertEquals("expecting 8 records " + all, 16, all.size());
  for (int i = 0; i < all.size () / 2; i++) {
    assertEquals("expecting record1", r1.toString(), all.get(2 * i).toString());
    assertEquals("expecting record2", r2.toString(), all.get(2 * i + 1).toString());
  }
}
 
Example #4
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testFilteredAndPaged() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 8);

  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(and(column("DocId", equalTo(10l)), page(2, 4))));

  List<Group> all = readAll(recordReader);
  assertEquals("expecting 4 records " + all, 4, all.size());
  for (int i = 0; i < all.size(); i++) {
    assertEquals("expecting record1", r1.toString(), all.get(i).toString());
  }

}
 
Example #5
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testPaged() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 6);

  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(page(4, 4)));

  List<Group> all = readAll(recordReader);
  assertEquals("expecting records " + all, 4, all.size());
  for (int i = 0; i < all.size(); i++) {
    assertEquals("expecting record", (i%2 == 0 ? r2 : r1).toString(), all.get(i).toString());
  }
}
 
Example #6
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplyFunctionFilterOnLong() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 1);

  // Get first record
  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("DocId", equalTo(10l))));

  readOne(recordReader, "r2 filtered out", r1);

  // Get second record
  recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("DocId", applyFunctionToLong(new LongGreaterThan15Predicate()))));

  readOne(recordReader, "r1 filtered out", r2);
}
 
Example #7
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 6 votes vote down vote up
@Test
public void testFilterOnInteger() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 1);

  // Get first record
  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter, FilterCompat.get(column("DocId", equalTo(10l))));

  readOne(recordReader, "r2 filtered out", r1);

  // Get second record
  recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("DocId", equalTo(20l))));

  readOne(recordReader, "r1 filtered out", r2);

}
 
Example #8
Source File: ParquetFileLineFetcher.java    From hugegraph-loader with Apache License 2.0 5 votes vote down vote up
private boolean fetchNextPage() {
    try {
        this.pages = this.reader.readNextRowGroup();
        if (this.pages == null) {
            return false;
        }
    } catch (IOException e) {
        throw new LoadException("Failed to read next page for '%s'", e);
    }
    GroupRecordConverter converter = new GroupRecordConverter(this.schema);
    this.recordReader = this.columnIO.getRecordReader(this.pages, converter);
    this.pagesRowCount = this.pages.getRowCount();
    this.currRowOffset = 0L;
    return this.currRowOffset < this.pagesRowCount;
}
 
Example #9
Source File: TestTupleRecordConsumer.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
private void testFromGroups(String pigSchemaString, List<Group> input) throws ParserException {
  List<Tuple> tuples = new ArrayList<Tuple>();
  MessageType schema = getMessageType(pigSchemaString);
  RecordMaterializer<Tuple> pigRecordConsumer = newPigRecordConsumer(pigSchemaString);
  GroupWriter groupWriter = new GroupWriter(new RecordConsumerLoggingWrapper(new ConverterConsumer(pigRecordConsumer.getRootConverter(), schema)), schema);

  for (Group group : input) {
    groupWriter.write(group);
    final Tuple tuple = pigRecordConsumer.getCurrentRecord();
    tuples.add(tuple);
    LOG.debug("in: {}\nout:{}", group, tuple);
  }

  List<Group> groups = new ArrayList<Group>();
  GroupRecordConverter recordConsumer = new GroupRecordConverter(schema);
  TupleWriteSupport tupleWriter = newTupleWriter(pigSchemaString, recordConsumer);
  for (Tuple t : tuples) {
    LOG.debug("{}", t);
    tupleWriter.write(t);
    groups.add(recordConsumer.getCurrentRecord());
  }

  assertEquals(input.size(), groups.size());
  for (int i = 0; i < input.size(); i++) {
    Group in = input.get(i);
    LOG.debug("{}", in);
    Group out = groups.get(i);
    assertEquals(in.toString(), out.toString());
  }
}
 
Example #10
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testApplyFunctionFilterOnString() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 1);

  // First try matching against the A url in record 1
  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("Name.Url", applyFunctionToString(new StringEndsWithAPredicate()))));

  readOne(recordReader, "r2 filtered out", r1);

  // Second try matching against the B url in record 1 - it should fail as we only match
  // against the first instance of a
  recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("Name.Url", equalTo("http://B"))));

  List<Group> all = readAll(recordReader);
  assertEquals("There should be no matching records: " + all , 0, all.size());

  // Finally try matching against the C url in record 2
  recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("Name.Url", equalTo("http://C"))));

  readOne(recordReader, "r1 filtered out", r2);

}
 
Example #11
Source File: TestFiltered.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testFilterOnString() {
  MessageColumnIO columnIO =  new ColumnIOFactory(true).getColumnIO(schema);
  MemPageStore memPageStore = writeTestRecords(columnIO, 1);

  // First try matching against the A url in record 1
  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);
  RecordReaderImplementation<Group> recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("Name.Url", equalTo("http://A"))));

  readOne(recordReader, "r2 filtered out", r1);

  // Second try matching against the B url in record 1 - it should fail as we only match
  // against the first instance of a
  recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("Name.Url", equalTo("http://B"))));

  List<Group> all = readAll(recordReader);
  assertEquals("There should be no matching records: " + all , 0, all.size());

  // Finally try matching against the C url in record 2
  recordReader = (RecordReaderImplementation<Group>)
      columnIO.getRecordReader(memPageStore, recordConverter,
          FilterCompat.get(column("Name.Url", equalTo("http://C"))));

  readOne(recordReader, "r1 filtered out", r2);

}
 
Example #12
Source File: TestColumnIO.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
@Test
public void testGroupWriter() {
  List<Group> result = new ArrayList<>();
  final GroupRecordConverter groupRecordConverter = new GroupRecordConverter(schema);
  RecordConsumer groupConsumer = new ConverterConsumer(groupRecordConverter.getRootConverter(), schema);
  GroupWriter groupWriter = new GroupWriter(new RecordConsumerLoggingWrapper(groupConsumer), schema);
  groupWriter.write(r1);
  result.add(groupRecordConverter.getCurrentRecord());
  groupWriter.write(r2);
  result.add(groupRecordConverter.getCurrentRecord());
  assertEquals("deserialization does not display the expected result", result.get(0).toString(), r1.toString());
  assertEquals("deserialization does not display the expected result", result.get(1).toString(), r2.toString());
}
 
Example #13
Source File: SparkModelParser.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Load logistic regression model.
 *
 * @param pathToMdl Path to model.
 * @param learningEnvironment Learning environment.
 */
private static Model loadLogRegModel(String pathToMdl,
    LearningEnvironment learningEnvironment) {
    Vector coefficients = null;
    double interceptor = 0;

    try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
        PageReadStore pages;

        final MessageType schema = r.getFooter().getFileMetaData().getSchema();
        final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);

        while (null != (pages = r.readNextRowGroup())) {
            final long rows = pages.getRowCount();
            final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
            for (int i = 0; i < rows; i++) {
                final SimpleGroup g = (SimpleGroup)recordReader.read();
                interceptor = readInterceptor(g);
                coefficients = readCoefficients(g);
            }
        }

    }
    catch (IOException e) {
        String msg = "Error reading parquet file: " + e.getMessage();
        learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg);
        e.printStackTrace();
    }

    return new LogisticRegressionModel(coefficients, interceptor);
}
 
Example #14
Source File: SparkModelParser.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Load linear regression model.
 *
 * @param pathToMdl Path to model.
 * @param learningEnvironment Learning environment.
 */
private static Model loadLinRegModel(String pathToMdl,
    LearningEnvironment learningEnvironment) {
    Vector coefficients = null;
    double interceptor = 0;

    try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
        PageReadStore pages;

        final MessageType schema = r.getFooter().getFileMetaData().getSchema();
        final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);

        while (null != (pages = r.readNextRowGroup())) {
            final long rows = pages.getRowCount();
            final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
            for (int i = 0; i < rows; i++) {
                final SimpleGroup g = (SimpleGroup)recordReader.read();
                interceptor = readLinRegInterceptor(g);
                coefficients = readLinRegCoefficients(g);
            }
        }

    }
    catch (IOException e) {
        String msg = "Error reading parquet file: " + e.getMessage();
        learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg);
        e.printStackTrace();
    }

    return new LinearRegressionModel(coefficients, interceptor);
}
 
Example #15
Source File: SparkModelParser.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Load SVM model.
 *
 * @param pathToMdl Path to model.
 * @param learningEnvironment Learning environment.
 */
private static Model loadLinearSVMModel(String pathToMdl,
    LearningEnvironment learningEnvironment) {
    Vector coefficients = null;
    double interceptor = 0;

    try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
        PageReadStore pages;

        final MessageType schema = r.getFooter().getFileMetaData().getSchema();
        final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);

        while (null != (pages = r.readNextRowGroup())) {
            final long rows = pages.getRowCount();
            final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
            for (int i = 0; i < rows; i++) {
                final SimpleGroup g = (SimpleGroup)recordReader.read();
                interceptor = readSVMInterceptor(g);
                coefficients = readSVMCoefficients(g);
            }
        }
    }
    catch (IOException e) {
        String msg = "Error reading parquet file: " + e.getMessage();
        learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg);
        e.printStackTrace();
    }

    return new SVMLinearClassificationModel(coefficients, interceptor);
}
 
Example #16
Source File: SparkModelParser.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Load Decision Tree model.
 *
 * @param pathToMdl Path to model.
 * @param learningEnvironment Learning environment.
 */
private static Model loadDecisionTreeModel(String pathToMdl, LearningEnvironment learningEnvironment) {
    try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
        PageReadStore pages;

        final MessageType schema = r.getFooter().getFileMetaData().getSchema();
        final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);
        final Map<Integer, NodeData> nodes = new TreeMap<>();

        while (null != (pages = r.readNextRowGroup())) {
            final long rows = pages.getRowCount();
            final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));

            for (int i = 0; i < rows; i++) {
                final SimpleGroup g = (SimpleGroup)recordReader.read();
                NodeData nodeData = extractNodeDataFromParquetRow(g);
                nodes.put(nodeData.id, nodeData);
            }
        }
        return buildDecisionTreeModel(nodes);
    }
    catch (IOException e) {
        String msg = "Error reading parquet file: " + e.getMessage();
        learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg);
        e.printStackTrace();
    }
    return null;
}
 
Example #17
Source File: TestColumnIO.java    From parquet-mr with Apache License 2.0 4 votes vote down vote up
private RecordReaderImplementation<Group> getRecordReader(MessageColumnIO columnIO, MessageType schema, PageReadStore pageReadStore) {
  RecordMaterializer<Group> recordConverter = new GroupRecordConverter(schema);

  return (RecordReaderImplementation<Group>)columnIO.getRecordReader(pageReadStore, recordConverter);
}
 
Example #18
Source File: ParquetHdfsDataWriterTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
@Override
public RecordMaterializer<Group> prepareForRead(Configuration conf, Map<String, String> metaData,
    MessageType schema, ReadContext context) {
  return new GroupRecordConverter(schema);
}
 
Example #19
Source File: GroupReadSupport.java    From parquet-mr with Apache License 2.0 4 votes vote down vote up
@Override
public RecordMaterializer<Group> prepareForRead(Configuration configuration,
    Map<String, String> keyValueMetaData, MessageType fileSchema,
    org.apache.parquet.hadoop.api.ReadSupport.ReadContext readContext) {
  return new GroupRecordConverter(readContext.getRequestedSchema());
}
 
Example #20
Source File: SparkModelParser.java    From ignite with Apache License 2.0 4 votes vote down vote up
/**
 * Load K-Means model.
 *
 * @param pathToMdl Path to model.
 * @param learningEnvironment learningEnvironment
 */
private static Model loadKMeansModel(String pathToMdl,
    LearningEnvironment learningEnvironment) {
    Vector[] centers = null;

    try (ParquetFileReader r = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(pathToMdl), new Configuration()))) {
        PageReadStore pages;
        final MessageType schema = r.getFooter().getFileMetaData().getSchema();
        final MessageColumnIO colIO = new ColumnIOFactory().getColumnIO(schema);

        while (null != (pages = r.readNextRowGroup())) {
            final int rows = (int)pages.getRowCount();
            final RecordReader recordReader = colIO.getRecordReader(pages, new GroupRecordConverter(schema));
            centers = new DenseVector[rows];

            for (int i = 0; i < rows; i++) {
                final SimpleGroup g = (SimpleGroup)recordReader.read();
                // final int clusterIdx = g.getInteger(0, 0);

                Group clusterCenterCoeff = g.getGroup(1, 0).getGroup(3, 0);

                final int amountOfCoefficients = clusterCenterCoeff.getFieldRepetitionCount(0);

                centers[i] = new DenseVector(amountOfCoefficients);

                for (int j = 0; j < amountOfCoefficients; j++) {
                    double coefficient = clusterCenterCoeff.getGroup(0, j).getDouble(0, 0);
                    centers[i].set(j, coefficient);
                }
            }
        }

    }
    catch (IOException e) {
        String msg = "Error reading parquet file: " + e.getMessage();
        learningEnvironment.logger().log(MLLogger.VerboseLevel.HIGH, msg);
        e.printStackTrace();
    }

    return new KMeansModel(centers, new EuclideanDistance());
}
 
Example #21
Source File: GroupReadSupport.java    From iow-hadoop-streaming with Apache License 2.0 4 votes vote down vote up
@Override
public RecordMaterializer<Group> prepareForRead(Configuration configuration,
                                                Map<String, String> keyValueMetaData, MessageType fileSchema, ReadContext readContext) {
    return new GroupRecordConverter(readContext.getRequestedSchema());
}