org.apache.arrow.vector.ValueVector Java Examples

The following examples show how to use org.apache.arrow.vector.ValueVector. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FileSplitParquetRecordReader.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
private void checkFieldTypesCompatibleWithHiveTable(OutputMutator readerOutputMutator, BatchSchema tableSchema) {
  for (ValueVector fieldVector : readerOutputMutator.getVectors()) {
    Field fieldInFileSchema = fieldVector.getField();
    Optional<Field> fieldInTable = tableSchema.findFieldIgnoreCase(fieldInFileSchema.getName());

    if (!fieldInTable.isPresent()) {
      throw UserException.validationError()
        .message("Field [%s] not found in table schema %s", fieldInFileSchema.getName(),
          tableSchema.getFields())
        .buildSilently();
    }

    boolean compatible = areFieldsCompatible(fieldInTable.get(), fieldInFileSchema);
    if (!compatible) {
      BatchSchemaField batchSchemaFieldInTable = BatchSchemaField.fromField(fieldInTable.get());
      BatchSchemaField batchSchemaFieldInFile = BatchSchemaField.fromField(fieldInFileSchema);
      throw UserException.unsupportedError().message("Field [%s] has incompatible types in file and table." +
          " Type in fileschema: [%s], type in tableschema: [%s]", fieldInFileSchema.getName(), batchSchemaFieldInFile, batchSchemaFieldInTable).buildSilently();
    }
  }
}
 
Example #2
Source File: HiveTextReader.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Override
public void internalInit(InputSplit inputSplit, JobConf jobConf, ValueVector[] vectors) throws IOException {
  try (OperatorStats.WaitRecorder recorder = OperatorStats.getWaitRecorder(this.context.getStats())) {
    reader = jobConf.getInputFormat().getRecordReader(inputSplit, jobConf, Reporter.NULL);
  }
  catch(FSError e) {
    throw HadoopFileSystemWrapper.propagateFSError(e);
  }

  if(logger.isTraceEnabled()) {
    logger.trace("hive reader created: {} for inputSplit {}", reader.getClass().getName(), inputSplit.toString());
  }

  key = reader.createKey();
  final FileSplit fileSplit = (FileSplit)inputSplit;
  skipRecordsInspector = new SkipRecordsInspector(fileSplit.getStart(), jobConf, reader);

  if (!partitionOI.equals(finalOI)) {
    // If the partition and table have different schemas, create a converter
    partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter(partitionOI, finalOI);
  }
}
 
Example #3
Source File: HiveORCCopiers.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
UnionCopier(HiveColumnVectorData columnVectorData,
            int ordinalId,
            UnionColumnVector inputVector,
            UnionVector outputVector,
            HiveOperatorContextOptions operatorContextOptions) {
  this.inputVector = inputVector;
  this.outputVector = outputVector;
  // The loop below assumes that the getChildrenFromFields() API returns
  // the list of children in the same order as was provided when building the UnionVector.
  List<FieldVector> childArrowFields = outputVector.getChildrenFromFields();
  int childPos = ordinalId + 1; // first field is immediately next to union vector itself
  for (int idx=0; idx<childArrowFields.size(); ++idx) {
    if (idx < inputVector.fields.length) {
      ColumnVector hiveFieldVector = inputVector.fields[idx];
      ValueVector arrowfieldVector = childArrowFields.get(idx);
      arrowFieldVectors.add(arrowfieldVector);
      ORCCopier childCopier = createCopier(columnVectorData, childPos, arrowfieldVector, hiveFieldVector, operatorContextOptions);
      fieldCopiers.add(childCopier);
      childPos += columnVectorData.getTotalVectorCount(childPos);
    } else {
      fieldCopiers.add(new NoOpCopier(null, null));
    }
  }
}
 
Example #4
Source File: HiveNonVarcharCoercionReader.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
public void runProjector(int recordCount) {
  if (projector != null) {
    try {
      if (recordCount > 0) {
        splitter.projectRecords(recordCount, javaCodeGenWatch,
          gandivaCodeGenWatch);
      }
      javaCodeGenWatch.start();
      projector.projectRecords(recordCount);
      javaCodeGenWatch.stop();
    } catch (Exception e) {
      throw Throwables.propagate(e);
    }
    for (final ValueVector v : allocationVectors) {
      v.setValueCount(recordCount);
    }
  }
  OperatorStats stats = context.getStats();
  stats.addLongStat(ScanOperator.Metric.JAVA_EXECUTE_TIME, javaCodeGenWatch.elapsed(TimeUnit.NANOSECONDS));
  stats.addLongStat(ScanOperator.Metric.GANDIVA_EXECUTE_TIME, gandivaCodeGenWatch.elapsed(TimeUnit.NANOSECONDS));
  javaCodeGenWatch.reset();
  gandivaCodeGenWatch.reset();
}
 
Example #5
Source File: TypeHelper.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
public static void loadFromValidityAndDataBuffers(ValueVector v, SerializedField metadata, ArrowBuf dataBuffer,
    ArrowBuf validityBuffer) {
  if (v instanceof ZeroVector) {
    throw new UnsupportedOperationException(String.format("this loader is not supported for vector %s", v));
  } else if (v instanceof UnionVector) {
    throw new UnsupportedOperationException(String.format("this loader is not supported for vector %s", v));
  } else if (v instanceof ListVector) {
    throw new UnsupportedOperationException(String.format("this loader is not supported for vector %s", v));
  } else if (v instanceof StructVector) {
    throw new UnsupportedOperationException(String.format("this loader is not supported for vector %s", v));
  } else if (v instanceof NonNullableStructVector) {
    throw new UnsupportedOperationException(String.format("this loader is not supported for vector %s", v));
  }

  Optional<ValueVectorHelper> helper = getHelper(v);

  if (!helper.isPresent()) {
    throw new UnsupportedOperationException(String.format("no loader for vector %s", v));
  }

  helper.get().loadFromValidityAndDataBuffers(metadata, dataBuffer, validityBuffer);
}
 
Example #6
Source File: TypeHelper.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
private static ValueVectorHelper getHelperNull(ValueVector v) {
  if (v instanceof ZeroVector) {
    return new ZeroVectorHelper((ZeroVector) v);
  } else if (v instanceof NullVector) {
    return new NullVectorHelper((NullVector) v);
  } else if (v instanceof UnionVector) {
    return new UnionVectorHelper((UnionVector) v);
  } else if (v instanceof ListVector) {
    return new ListVectorHelper((ListVector) v);
  } else if (v instanceof StructVector) {
    return new StructVectorHelper((StructVector) v);
  } else if (v instanceof NonNullableStructVector) {
    return new NonNullableStructVectorHelper((NonNullableStructVector) v);
  } else if (v instanceof BaseFixedWidthVector) {
    return new FixedWidthVectorHelper<BaseFixedWidthVector>((BaseFixedWidthVector) v);
  } else if (v instanceof BaseVariableWidthVector) {
    return new VariableWidthVectorHelper<BaseVariableWidthVector>((BaseVariableWidthVector) v);
  }

  return null;
}
 
Example #7
Source File: HiveParquetCopier.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
/**
 * Top level driver routine that setups copiers
 * @param context
 * @param input List of input vectors
 * @param output List of output vectors
 * @param hiveTypeCoercion
 * @param javaCodeGenWatch
 * @param gandivaCodeGenWatch
 * @return
 */
public static ParquetCopier[] createCopiers(OperatorContext context,
                                            final ArrayList<ValueVector> input, final ArrayList<ValueVector> output,
                                            TypeCoercion hiveTypeCoercion,
                                            Stopwatch javaCodeGenWatch, Stopwatch gandivaCodeGenWatch) {
  Preconditions.checkArgument(input.size() == output.size(), "Invalid column size ("
    + Integer.toString(input.size()) + ", " +  Integer.toString(output.size()) + ")");
  final int numColumns = output.size();

  // create one copier for each (input, output) pair
  final ParquetCopier[] copiers = new ParquetCopier[numColumns];
  for (int pos = 0; pos < numColumns; pos++) {
    copiers[pos] = createCopier(context, input.get(pos), output.get(pos),
      hiveTypeCoercion, javaCodeGenWatch, gandivaCodeGenWatch);
  }

  return copiers;
}
 
Example #8
Source File: HiveParquetPrimitiveTypeReader.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
public void runProjector(int recordCount, VectorContainer incoming) {
  try {
    hiveNonVarcharCoercionReader.runProjector(recordCount);
    if (recordCount > 0) {
      if (!fixedLenVarCharMap.isEmpty()) {
        for (VectorWrapper<? extends ValueVector> wrapper : incoming) {
          if (fixedLenVarCharMap.containsKey(wrapper.getField().getName())) {
            HiveVarcharTruncationReader varcharTruncationReader = fixedLenVarCharMap.get(wrapper.getField().getName());
            varcharTruncationReader.runProjector((BaseVariableWidthVector) wrapper.getValueVector(), recordCount,
              this.context, javaCodeGenWatch,
              gandivaCodeGenWatch);
          }
        }
      }
    }
  } catch (Exception e) {
    throw Throwables.propagate(e);
  }

  OperatorStats stats = this.context.getStats();
  stats.addLongStat(ScanOperator.Metric.JAVA_EXECUTE_TIME, javaCodeGenWatch.elapsed(TimeUnit.NANOSECONDS));
  stats.addLongStat(ScanOperator.Metric.GANDIVA_EXECUTE_TIME, gandivaCodeGenWatch.elapsed(TimeUnit.NANOSECONDS));
  javaCodeGenWatch.reset();
  gandivaCodeGenWatch.reset();
}
 
Example #9
Source File: TestMultiInputAdd.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultiInputAdd() throws Exception {
  List<QueryDataBatch> results = client.runQuery(com.dremio.exec.proto.UserBitShared.QueryType.PHYSICAL,
          Files.toString(FileUtils.getResourceAsFile("/functions/multi_input_add_test.json"), Charsets.UTF_8));
  try(RecordBatchLoader batchLoader = new RecordBatchLoader(nodes[0].getContext().getAllocator())){
    QueryDataBatch batch = results.get(0);
    assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));

    for (VectorWrapper<?> v : batchLoader) {

        ValueVector vv = v.getValueVector();

        assertTrue((vv.getObject(0)).equals(10));
    }

    batchLoader.clear();
    for(QueryDataBatch b : results){
        b.release();
    }
  }
}
 
Example #10
Source File: StreamingAggOperator.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
/**
 * Method is invoked when we have a straight aggregate (no group by expression) and our input is empty.
 * In this case we construct an outgoing batch with record count as 1. For the nullable vectors we don't set anything
 * as we want the output to be NULL. For the required vectors (only for count()) we set the value to be zero since
 * we don't zero out our buffers initially while allocating them.
 */
private void constructSpecialBatch() {
  outgoing.allocateNew();
  List<NamedExpression> exprs = config.getAggrExprs();
  if(outgoing.getNumberOfColumns() != exprs.size()){
    throw new IllegalStateException();
  }
  int exprIndex = 0;
  for (final VectorWrapper<?> vw: outgoing) {
    final ValueVector vv = vw.getValueVector();
    if (!exprs.isEmpty() && isCount(exprs.get(exprIndex))) {
      ((BigIntVector) vv).setSafe(0, 0);
    }
    vv.setValueCount(SPECIAL_BATCH_COUNT);
    exprIndex++;
  }
  outgoing.setRecordCount(SPECIAL_BATCH_COUNT);
}
 
Example #11
Source File: CompliantTextRecordReader.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Override
public void allocate(Map<String, ValueVector> vectorMap) throws OutOfMemoryException {
  int estimatedRecordCount;
  if ((reader != null) && (reader.getInput() != null) && (vectorMap.size() > 0)) {
    final OptionManager options = context.getOptions();
    final int listSizeEstimate = (int) options.getOption(ExecConstants.BATCH_LIST_SIZE_ESTIMATE);
    final int varFieldSizeEstimate = (int) options.getOption(ExecConstants.BATCH_VARIABLE_FIELD_SIZE_ESTIMATE);
    final int estimatedRecordSize = BatchSchema.estimateRecordSize(vectorMap, listSizeEstimate, varFieldSizeEstimate);
    if (estimatedRecordSize > 0) {
      estimatedRecordCount = (int) Math.min(reader.getInput().length / estimatedRecordSize, numRowsPerBatch);
    } else {
      estimatedRecordCount = (int) numRowsPerBatch;
    }
  } else {
    estimatedRecordCount = (int) numRowsPerBatch;
  }
  for (final ValueVector v : vectorMap.values()) {
    v.setInitialCapacity(estimatedRecordCount);
    v.allocateNew();
  }
}
 
Example #12
Source File: WindowFrameOperator.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private int doWork() throws Exception {

    final VectorContainer current = batches.get(0);
    final int recordCount = current.getRecordCount();

    logger.trace("WindowFramer.doWork() START, num batches {}, current batch has {} rows", batches.size(), recordCount);

    // allocate outgoing vectors
    outgoing.allocateNew();

    for (WindowFramer framer : framers) {
      framer.doWork();
    }

    // transfer "non aggregated" vectors
    for (VectorWrapper<?> vw : current) {
      ValueVector v = outgoing.addOrGet(vw.getField());
      TransferPair tp = vw.getValueVector().makeTransferPair(v);
      tp.transfer();
    }

    if(recordCount > 0){
      try{
      outgoing.setAllCount(recordCount);
      }catch(RuntimeException ex){
        throw ex;
      }
    }

    // we can safely free the current batch
    current.close();
    batches.remove(0);

    logger.trace("doWork() END");
    return recordCount;
  }
 
Example #13
Source File: SmallIntToScaledFixedConverter.java    From snowflake-jdbc with Apache License 2.0 5 votes vote down vote up
public SmallIntToScaledFixedConverter(ValueVector fieldVector, int columnIndex, DataConversionContext context,
                                      int sfScale)
{
  super(fieldVector,
        columnIndex,
        context);
  logicalTypeStr = String.format("%s(%s,%s)", SnowflakeType.FIXED,
                                 fieldVector.getField().getMetadata().get("precision"),
                                 fieldVector.getField().getMetadata().get("scale"));
  format = ArrowResultUtil.getStringFormat(sfScale);
  this.sfScale = sfScale;
}
 
Example #14
Source File: NonNullableStructVectorHelper.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public SerializedField getMetadata() {
  SerializedField.Builder b = SerializedField.newBuilder()
      .setNamePart(NamePart.newBuilder().setName(structVector.getField().getName()))
      .setMajorType(Types.optional(MinorType.STRUCT))
      .setBufferLength(structVector.getBufferSize())
      .setValueCount(structVector.valueCount);


  for(ValueVector v : structVector.getChildren()) {
    b.addChild(TypeHelper.getMetadata(v));
  }
  return b.build();
}
 
Example #15
Source File: HashAggTemplate.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public BatchHolder() {

      this.aggrValuesContainer = new VectorContainer();
      boolean success = false;
      try {
        ValueVector vector;

        for (int i = 0; i < materializedValueFields.length; i++) {
          Field outputField = materializedValueFields[i];
          // Create a type-specific ValueVector for this value
          vector = TypeHelper.getNewVector(outputField, allocator);

          // Try to allocate space to store BATCH_SIZE records. Key stored at index i in HashTable has its workspace
          // variables (such as count, sum etc) stored at index i in HashAgg. HashTable and HashAgg both have
          // BatchHolders. Whenever a BatchHolder in HashAgg reaches its capacity, a new BatchHolder is added to
          // HashTable. If HashAgg can't store BATCH_SIZE records in a BatchHolder, it leaves empty slots in current
          // BatchHolder in HashTable, causing the HashTable to be space inefficient. So it is better to allocate space
          // to fit as close to as BATCH_SIZE records.
          if (vector instanceof FixedWidthVector) {
            ((FixedWidthVector) vector).allocateNew(HashTable.BATCH_SIZE);
          } else if (vector instanceof VariableWidthVector) {
            ((VariableWidthVector) vector).allocateNew(((long) HashTable.VARIABLE_WIDTH_VECTOR_SIZE) * HashTable.BATCH_SIZE,
                HashTable.BATCH_SIZE);
          } else if (vector instanceof ObjectVector) {
            ((ObjectVector) vector).allocateNew(HashTable.BATCH_SIZE);
          } else {
            vector.allocateNew();
          }

          capacity = Math.min(capacity, vector.getValueCapacity());

          aggrValuesContainer.add(vector);
        }
        success = true;
      } finally {
        if (!success) {
          aggrValuesContainer.close();
        }
      }
    }
 
Example #16
Source File: Fixtures.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public DataHolder(RecordBatchData data){
  this.sv2 = data.getSchema().getSelectionVectorMode() == SelectionVectorMode.TWO_BYTE ? data.getSv2() : null;
  List<ValueVector> vectors = new ArrayList<>();
  for(VectorWrapper<?> w : data.getContainer()){
    vectors.add(w.getValueVector());
  }
  this.vectors = ImmutableList.copyOf(vectors);
}
 
Example #17
Source File: TestPivotRoundtrip.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Test
public void varcharRoundtrip() throws Exception {
  final int count = 1024;
  try (
    VarCharVector in = new VarCharVector("in", allocator);
    VarCharVector out = new VarCharVector("out", allocator);
  ) {

    in.allocateNew(count * 8, count);

    for (int i = 0; i < count; i++) {
      if (i % 5 == 0) {
        byte[] data = ("hello-" + i).getBytes(Charsets.UTF_8);
        in.setSafe(i, data, 0, data.length);
      }
    }
    in.setValueCount(count);

    final PivotDef pivot = PivotBuilder.getBlockDefinition(new FieldVectorPair(in, out));
    try (
      final FixedBlockVector fbv = new FixedBlockVector(allocator, pivot.getBlockWidth());
      final VariableBlockVector vbv = new VariableBlockVector(allocator, pivot.getVariableCount());
    ) {
      fbv.ensureAvailableBlocks(count);
      Pivots.pivot(pivot, count, fbv, vbv);

      ValueVector[] ins = new ValueVector[]{in};
      ValueVector[] outs = new ValueVector[]{out};
      unpivotHelper(pivot, fbv, vbv, ins, outs, 0, count);
      unpivotHelper(pivot, fbv, vbv, ins, outs, 0, 100);
      unpivotHelper(pivot, fbv, vbv, ins, outs, 100, 924);
    }
  }
}
 
Example #18
Source File: ThreeFieldStructToTimestampTZConverter.java    From snowflake-jdbc with Apache License 2.0 5 votes vote down vote up
public ThreeFieldStructToTimestampTZConverter(ValueVector fieldVector, int columnIndex, DataConversionContext context)
{
  super(SnowflakeType.TIMESTAMP_LTZ.name(), fieldVector, columnIndex, context);
  structVector = (StructVector) fieldVector;
  epochs = structVector.getChild(FIELD_NAME_EPOCH, BigIntVector.class);
  fractions = structVector.getChild(FIELD_NAME_FRACTION, IntVector.class);
  timeZoneIndices = structVector.getChild(FIELD_NAME_TIME_ZONE_INDEX, IntVector.class);
}
 
Example #19
Source File: HiveParquetCoercionReader.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Override
public void allocate(Map<String, ValueVector> vectorMap) throws OutOfMemoryException {
  // do not allocate if called by FilteringReader
  if (nextMethodState == HiveParquetCoercionReader.NextMethodState.NOT_CALLED_BY_FILTERING_READER) {
    super.allocate(vectorMap);
    inner.allocate(mutator.getFieldVectorMap());
  }
}
 
Example #20
Source File: ConvertFromJsonOperator.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public JsonConverter(ConversionColumn column, int sizeLimit, int maxLeafLimit, T vector, ValueVector outgoingVector) {
  this.column = column;
  this.vector = vector;
  this.writer = VectorAccessibleComplexWriter.getWriter(column.getInputField(), outgoing);
  this.reader = new JsonReader(context.getManagedBuffer(), sizeLimit, maxLeafLimit, context.getOptions().getOption(ExecConstants.JSON_READER_ALL_TEXT_MODE_VALIDATOR), false, false);
  this.outgoingVector = outgoingVector;
  this.sizeLimit = sizeLimit;
}
 
Example #21
Source File: DecimalToScaledFixedConverter.java    From snowflake-jdbc with Apache License 2.0 5 votes vote down vote up
public DecimalToScaledFixedConverter(ValueVector fieldVector, int vectorIndex, DataConversionContext context)
{
  super(String.format("%s(%s,%s)", SnowflakeType.FIXED,
                      fieldVector.getField().getMetadata().get("precision"),
                      fieldVector.getField().getMetadata().get("scale")),
        fieldVector,
        vectorIndex,
        context);
  decimalVector = (DecimalVector) fieldVector;
}
 
Example #22
Source File: TinyIntToScaledFixedConverter.java    From snowflake-jdbc with Apache License 2.0 5 votes vote down vote up
public TinyIntToScaledFixedConverter(ValueVector fieldVector, int columnIndex, DataConversionContext context,
                                     int sfScale)
{
  super(fieldVector,
        columnIndex,
        context);
  logicalTypeStr = String.format("%s(%s,%s)", SnowflakeType.FIXED,
                                 fieldVector.getField().getMetadata().get("precision"),
                                 fieldVector.getField().getMetadata().get("scale"));
  format = ArrowResultUtil.getStringFormat(sfScale);
  this.sfScale = sfScale;
}
 
Example #23
Source File: TestHashJoinAdvanced.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Test
public void simpleEqualityJoin() throws Throwable {
  // Function checks hash join with single equality condition

  final String plan = Files.toString(FileUtils.getResourceAsFile("/join/hash_join.json"), Charsets.UTF_8)
                  .replace("#{TEST_FILE_1}", FileUtils.getResourceAsFile("/build_side_input.json").toURI().toString())
                  .replace("#{TEST_FILE_2}", FileUtils.getResourceAsFile("/probe_side_input.json").toURI().toString());

  List<QueryDataBatch> results = testRunAndReturn(QueryType.PHYSICAL, plan);
  try(RecordBatchLoader batchLoader = new RecordBatchLoader(nodes[0].getContext().getAllocator())){

    QueryDataBatch batch = results.get(1);
    assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));

    Iterator<VectorWrapper<?>> itr = batchLoader.iterator();

    // Just test the join key
    long colA[] = {1, 1, 2, 2, 1, 1};

    // Check the output of decimal9
    ValueVector intValueVector = itr.next().getValueVector();


    for (int i = 0; i < intValueVector.getValueCount(); i++) {
      assertEquals(intValueVector.getObject(i), colA[i]);
    }
    assertEquals(6, intValueVector.getValueCount());
  }

  for (QueryDataBatch result : results) {
    result.release();
  }

}
 
Example #24
Source File: TimeWriter.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void doWrite(T in, int ordinal) {
	ValueVector valueVector = getValueVector();
	if (isNullAt(in, ordinal)) {
		((BaseFixedWidthVector) valueVector).setNull(getCount());
	} else if (valueVector instanceof TimeSecVector) {
		((TimeSecVector) valueVector).setSafe(getCount(), readTime(in, ordinal) / 1000);
	} else if (valueVector instanceof TimeMilliVector) {
		((TimeMilliVector) valueVector).setSafe(getCount(), readTime(in, ordinal));
	} else if (valueVector instanceof TimeMicroVector) {
		((TimeMicroVector) valueVector).setSafe(getCount(), readTime(in, ordinal) * 1000L);
	} else {
		((TimeNanoVector) valueVector).setSafe(getCount(), readTime(in, ordinal) * 1000000L);
	}
}
 
Example #25
Source File: HyperVectorWrapper.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Transfer vectors to destination HyperVectorWrapper.
 * Both this and destination must be of same type and have same number of vectors.
 * @param destination destination HyperVectorWrapper.
 */
@Override
public void transfer(VectorWrapper<?> destination) {
  Preconditions.checkArgument(destination instanceof HyperVectorWrapper);
  Preconditions.checkArgument(getField().getType().equals(destination.getField().getType()));
  Preconditions.checkArgument(vectors.size() == ((HyperVectorWrapper<?>)destination).vectors.size());

  List<ValueVector> destionationVectors = (List<ValueVector>)(((HyperVectorWrapper<?>)destination).vectors);
  for (int i = 0; i < vectors.size(); ++i) {
    vectors.get(i).makeTransferPair(destionationVectors.get(i)).transfer();
  }
}
 
Example #26
Source File: TypeHelper.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public static void loadData(ValueVector v, SerializedField metadata, ArrowBuf buffer) {
  Optional<ValueVectorHelper> helper = getHelper(v);

  if (!helper.isPresent()) {
    throw new UnsupportedOperationException(String.format("no loader for vector %s", v));
  }

  helper.get().loadData(metadata, buffer);
}
 
Example #27
Source File: TestQueryReAttempt.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private static void assertContainsFields(Set<String> fields, List<ValueVector> original) {
  assertEquals(original.size(), fields.size());

  for (ValueVector vv : original) {
    final String fieldName = vv.getField().getName();
    assertTrue("original field '" + fieldName + "' is not part of the new schema", fields.contains(fieldName));
  }
}
 
Example #28
Source File: SplitStageExecutor.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
void evaluateProjector(int recordsToConsume, Stopwatch javaWatch, Stopwatch gandivaWatch) throws Exception {
  try {
    allocateNew(recordsToConsume);

    gandivaWatch.start();
    nativeProjectEvaluator.evaluate(recordsToConsume);
    gandivaWatch.stop();
    javaWatch.start();
    javaProjector.projectRecords(recordsToConsume);
    javaWatch.stop();

    setValueCount(recordsToConsume);
    transferOut();
  } catch (Exception e) {
    // release memory allocated in case of an exception
    for(ValueVector vv : allocationVectors) {
      vv.clear();
    }

    for(ComplexWriter writer : complexWriters) {
      writer.clear();
    }

    throw e;
  } finally {
    markSplitOutputAsRead();
  }
}
 
Example #29
Source File: TestQueryReAttempt.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private static Set<String> extractSchema(RecordBatchData batch) {
  Set<String> schema = Sets.newHashSet();
  for (ValueVector vv : batch.getVectors()) {
    schema.add(vv.getField().getName());
  }
  return schema;
}
 
Example #30
Source File: TestQueryReAttempt.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Test
public void testSupportedSchemaChange() throws Exception {
  // original schema [field1=bit, field2=int]
  // new schema [field1=bit, field3=map, field2=int]
  List<ValueVector> original = Arrays.<ValueVector>asList(bitVector("field1"), intVector("field2"));
  List<ValueVector> updated = Arrays.asList(bitVector("field1"), structVector("field3"), intVector("field2"));
  checkSchemaChange(original, updated);
}