org.apache.kudu.client.PartialRow Java Examples

The following examples show how to use org.apache.kudu.client.PartialRow. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Tables.java    From kudu-ts with Apache License 2.0 6 votes vote down vote up
/**
 * The {@code tagsets} table is range partitioned on the {@code id} column.
 * Because the table is essentially a linear probe hash table, it must be able
 * to be scanned in PK order, so hash partitioning is not possible. Since the
 * tagset IDs are effectively random, setting split points at even intervals
 * over the ID range gives good protection against hotspotting.
 * @param options the create options
 * @param numTabletServers the number of tablet servers
 * @return the tagset table create options
 */
static CreateTableOptions tagsetsCreateTableOptions(CreateOptions options,
                                                    int numTabletServers) {
  CreateTableOptions create = new CreateTableOptions();
  create.setNumReplicas(options.getNumReplicas());

  create.setRangePartitionColumns(ImmutableList.of("id"));

  int numTablets = options.getNumTagsetsTablets(numTabletServers);
  long interval = (1L << 32) / numTablets;
  for (int i = 1; i < numTablets; i++) {
    PartialRow split = TAGSETS_SCHEMA.newPartialRow();
    split.addInt(TAGSETS_ID_INDEX, (int) (Integer.MIN_VALUE + i * interval));
    create.addSplitRow(split);
  }
  return create;
}
 
Example #2
Source File: KuduTableProperties.java    From presto-kudu with Apache License 2.0 6 votes vote down vote up
private static RangeBoundValue buildRangePartitionBound(KuduTable table, byte[] rangeKey) throws Exception {
    if (rangeKey.length == 0) {
        return null;
    } else {
        Schema schema = table.getSchema();
        PartitionSchema partitionSchema = table.getPartitionSchema();
        PartitionSchema.RangeSchema rangeSchema = partitionSchema.getRangeSchema();
        List<Integer> rangeColumns = rangeSchema.getColumns();

        final int numColumns = rangeColumns.size();

        PartialRow bound = KeyEncoderAccessor.decodeRangePartitionKey(schema, partitionSchema, rangeKey);

        RangeBoundValue value = new RangeBoundValue();
        ArrayList<Object> list = new ArrayList<>();
        for (int i = 0; i < numColumns; i++) {
            Object obj = toValue(schema, bound, rangeColumns.get(i));
            list.add(obj);
        }
        value.setValues(list);
        return value;
    }
}
 
Example #3
Source File: KuduTableProperties.java    From presto-kudu with Apache License 2.0 6 votes vote down vote up
private static Object toValue(Schema schema, PartialRow bound, Integer idx) {
    Type type = schema.getColumnByIndex(idx).getType();
    switch (type) {
        case UNIXTIME_MICROS:
            long millis = bound.getLong(idx) / 1000;
            return ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC).print(millis);
        case STRING:
            return bound.getString(idx);
        case INT64:
            return bound.getLong(idx);
        case INT32:
            return bound.getInt(idx);
        case INT16:
            return bound.getShort(idx);
        case INT8:
            short s = bound.getByte(idx);
            return s;
        case BOOL:
            return bound.getBoolean(idx);
        case BINARY:
            return bound.getBinaryCopy(idx);
        default:
            throw new IllegalStateException("Unhandled type " + type + " for range partition");
    }
}
 
Example #4
Source File: KuduTableProperties.java    From presto-kudu with Apache License 2.0 6 votes vote down vote up
public static PartialRow toRangeBoundToPartialRow(Schema schema, RangePartitionDefinition definition,
                                                  RangeBoundValue boundValue) {
    PartialRow partialRow = new PartialRow(schema);
    if (boundValue != null) {
        List<Integer> rangeColumns = definition.getColumns().stream()
                .map(name -> schema.getColumnIndex(name)).collect(toImmutableList());

        if (rangeColumns.size() != boundValue.getValues().size()) {
            throw new IllegalStateException("Expected " + rangeColumns.size()
                    + " range columns, but got " + boundValue.getValues().size());
        }
        for (int i = 0; i < rangeColumns.size(); i++) {
            Object obj = boundValue.getValues().get(i);
            int idx = rangeColumns.get(i);
            ColumnSchema columnSchema = schema.getColumnByIndex(idx);
            setColumnValue(partialRow, idx, obj, columnSchema.getType(), columnSchema.getName());
        }
    }
    return partialRow;
}
 
Example #5
Source File: KuduPageSink.java    From presto-kudu with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<?> appendPage(Page page) {
    for (int position = 0; position < page.getPositionCount(); position++) {
        Upsert upsert = table.newUpsert();
        PartialRow row = upsert.getRow();
        int start = 0;
        if (generateUUID) {
            String id = String.format("%s-%08x", uuid, nextSubId++);
            row.addString(0, id);
            start = 1;
        }

        for (int channel = 0; channel < page.getChannelCount(); channel++) {
            appendColumn(row, page, position, channel, channel + start);
        }

        try {
            session.apply(upsert);
        } catch (KuduException e) {
            throw new RuntimeException(e);
        }
    }
    return NOT_BLOCKED;
}
 
Example #6
Source File: KuduPageSink.java    From presto with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<?> appendPage(Page page)
{
    for (int position = 0; position < page.getPositionCount(); position++) {
        Upsert upsert = table.newUpsert();
        PartialRow row = upsert.getRow();
        int start = 0;
        if (generateUUID) {
            String id = format("%s-%08x", uuid, nextSubId++);
            row.addString(0, id);
            start = 1;
        }

        for (int channel = 0; channel < page.getChannelCount(); channel++) {
            appendColumn(row, page, position, channel, channel + start);
        }

        try {
            session.apply(upsert);
        }
        catch (KuduException e) {
            throw new RuntimeException(e);
        }
    }
    return NOT_BLOCKED;
}
 
Example #7
Source File: KuduUpdatablePageSource.java    From presto-kudu with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteRows(Block rowIds) {
    Schema schema = table.getSchema();
    KuduSession session = clientSession.newSession();
    session.setFlushMode(FlushMode.AUTO_FLUSH_BACKGROUND);
    try {
        try {
            for (int i = 0; i < rowIds.getPositionCount(); i++) {
                int len = rowIds.getSliceLength(i);
                Slice slice = rowIds.getSlice(i, 0, len);
                PartialRow row = KeyEncoderAccessor.decodePrimaryKey(schema, slice.getBytes());
                Delete delete = table.newDelete();
                RowHelper.copyPrimaryKey(schema, row, delete.getRow());
                session.apply(delete);
            }
        } finally {
            session.close();
        }
    } catch (KuduException e) {
        throw new RuntimeException(e);
    }
}
 
Example #8
Source File: AbstractSingleOperationMapper.java    From bahir-flink with Apache License 2.0 6 votes vote down vote up
@Override
public List<Operation> createOperations(T input, KuduTable table) {
    Optional<Operation> operationOpt = createBaseOperation(input, table);
    if (!operationOpt.isPresent()) {
        return Collections.emptyList();
    }

    Operation operation = operationOpt.get();
    PartialRow partialRow = operation.getRow();

    for (int i = 0; i < columnNames.length; i++) {
        partialRow.addObject(columnNames[i], getField(input, i));
    }

    return Collections.singletonList(operation);
}
 
Example #9
Source File: PojoOperationMapperTest.java    From bahir-flink with Apache License 2.0 6 votes vote down vote up
@Test
void testPojoMapper() {

    PojoOperationMapper<BookInfo> mapper = new PojoOperationMapper<>(BookInfo.class, KuduTestBase.columns, AbstractSingleOperationMapper.KuduOperation.INSERT);

    BookInfo bookInfo = KuduTestBase.booksDataPojo().get(0);

    assertEquals(bookInfo.id, mapper.getField(bookInfo, 0));
    assertEquals(bookInfo.title, mapper.getField(bookInfo, 1));
    assertEquals(bookInfo.author, mapper.getField(bookInfo, 2));
    assertEquals(bookInfo.price, mapper.getField(bookInfo, 3));
    assertEquals(bookInfo.quantity, mapper.getField(bookInfo, 4));

    List<Operation> operations = mapper.createOperations(bookInfo, mockTable);
    assertEquals(1, operations.size());

    PartialRow row = operations.get(0).getRow();
    Mockito.verify(row, Mockito.times(1)).addObject("id", bookInfo.id);
    Mockito.verify(row, Mockito.times(1)).addObject("quantity", bookInfo.quantity);

    Mockito.verify(row, Mockito.times(1)).addObject("title", bookInfo.title);
    Mockito.verify(row, Mockito.times(1)).addObject("author", bookInfo.author);

    Mockito.verify(row, Mockito.times(1)).addObject("price", bookInfo.price);
}
 
Example #10
Source File: AbstractKuduTest.java    From syndesis with Apache License 2.0 6 votes vote down vote up
protected void insertRowInTestTable(final String tableName, final String connection) throws KuduException {
    try (KuduClient client = new KuduClient.KuduClientBuilder(connection).build()) {

        final KuduTable table = client.openTable(tableName);

        final Insert insert = table.newInsert();
        final PartialRow row = insert.getRow();

        row.addInt("id", ThreadLocalRandom.current().nextInt(1, 99));
        row.addString("title", "Mr.");
        row.addString("name", "Samuel");
        row.addString("lastname", "Smith");
        row.addString("address", "4359  Plainfield Avenue");

        client.newSession().apply(insert);
    }
}
 
Example #11
Source File: KuduUpdatablePageSource.java    From presto with Apache License 2.0 6 votes vote down vote up
@Override
public void deleteRows(Block rowIds)
{
    Schema schema = table.getSchema();
    KuduSession session = clientSession.newSession();
    session.setFlushMode(FlushMode.AUTO_FLUSH_BACKGROUND);
    try {
        try {
            for (int i = 0; i < rowIds.getPositionCount(); i++) {
                int len = rowIds.getSliceLength(i);
                Slice slice = rowIds.getSlice(i, 0, len);
                PartialRow row = KeyEncoderAccessor.decodePrimaryKey(schema, slice.getBytes());
                Delete delete = table.newDelete();
                RowHelper.copyPrimaryKey(schema, row, delete.getRow());
                session.apply(delete);
            }
        }
        finally {
            session.close();
        }
    }
    catch (KuduException e) {
        throw new RuntimeException(e);
    }
}
 
Example #12
Source File: KuduRecordConverter.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public void convert(Record record, PartialRow row, int operation) throws OnRecordErrorException {
  for (Map.Entry<String, String> entry : fieldsToColumns.entrySet()) {
    String fieldName =  entry.getKey();
    if (fieldConverter != null) {
      fieldName = fieldConverter.getFieldPath(fieldName, operation);
    }
    String column = entry.getValue();  // column name in Kudu table
    // For delete, we only need to fill primary key column name & value in PartialRow
    if (operation == KuduOperationType.DELETE.code) {
      for(ColumnSchema col : schema.getPrimaryKeyColumns()) {
        if (col.getName().equals(column))
          recordToRow(record, row, fieldName, column, operation);
      }
    } else {
      // For other operations, we need to know the operation
      // to correctly fill the record.
      recordToRow(record, row, fieldName, column, operation);
    }
  }
}
 
Example #13
Source File: KuduTableProperties.java    From presto with Apache License 2.0 6 votes vote down vote up
public static PartialRow toRangeBoundToPartialRow(Schema schema, RangePartitionDefinition definition,
        RangeBoundValue boundValue)
{
    PartialRow partialRow = new PartialRow(schema);
    if (boundValue != null) {
        List<Integer> rangeColumns = definition.getColumns().stream()
                .map(schema::getColumnIndex).collect(toImmutableList());

        if (rangeColumns.size() != boundValue.getValues().size()) {
            throw new IllegalStateException("Expected " + rangeColumns.size()
                    + " range columns, but got " + boundValue.getValues().size());
        }
        for (int i = 0; i < rangeColumns.size(); i++) {
            Object obj = boundValue.getValues().get(i);
            int idx = rangeColumns.get(i);
            ColumnSchema columnSchema = schema.getColumnByIndex(idx);
            setColumnValue(partialRow, idx, obj, columnSchema.getType(), columnSchema.getName());
        }
    }
    return partialRow;
}
 
Example #14
Source File: KuduTableProperties.java    From presto with Apache License 2.0 6 votes vote down vote up
private static Object toValue(Schema schema, PartialRow bound, Integer idx)
{
    Type type = schema.getColumnByIndex(idx).getType();
    switch (type) {
        case UNIXTIME_MICROS:
            long millis = bound.getLong(idx) / 1000;
            return ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC).print(millis);
        case STRING:
            return bound.getString(idx);
        case INT64:
            return bound.getLong(idx);
        case INT32:
            return bound.getInt(idx);
        case INT16:
            return bound.getShort(idx);
        case INT8:
            return (short) bound.getByte(idx);
        case BOOL:
            return bound.getBoolean(idx);
        case BINARY:
            return bound.getBinaryCopy(idx);
        default:
            throw new IllegalStateException("Unhandled type " + type + " for range partition");
    }
}
 
Example #15
Source File: KuduTableProperties.java    From presto with Apache License 2.0 6 votes vote down vote up
private static RangeBoundValue buildRangePartitionBound(KuduTable table, byte[] rangeKey)
{
    if (rangeKey.length == 0) {
        return null;
    }
    else {
        Schema schema = table.getSchema();
        PartitionSchema partitionSchema = table.getPartitionSchema();
        PartitionSchema.RangeSchema rangeSchema = partitionSchema.getRangeSchema();
        List<Integer> rangeColumns = rangeSchema.getColumns();

        final int numColumns = rangeColumns.size();

        PartialRow bound = KeyEncoderAccessor.decodeRangePartitionKey(schema, partitionSchema, rangeKey);

        ArrayList<Object> list = new ArrayList<>();
        for (int i = 0; i < numColumns; i++) {
            Object obj = toValue(schema, bound, rangeColumns.get(i));
            list.add(obj);
        }
        return new RangeBoundValue(list);
    }
}
 
Example #16
Source File: KuduClientTestCommons.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public static void createTestTable(String tableName, KuduClient client) throws Exception
{
  List<String> rangeKeys = new ArrayList<>();
  rangeKeys.add("introwkey");
  List<String> hashPartitions = new ArrayList<>();
  hashPartitions.add("stringrowkey");
  hashPartitions.add("timestamprowkey");
  CreateTableOptions thisTableOptions = new CreateTableOptions()
      .setNumReplicas(1)
      .addHashPartitions(hashPartitions,HASH_BUCKETS_SIZE_FOR_ALL_HASH_COL)
      .setRangePartitionColumns(rangeKeys);
  int stepsize = Integer.MAX_VALUE / SPLIT_COUNT_FOR_INT_ROW_KEY;
  int splitBoundary = stepsize;
  Schema schema = buildSchemaForUnitTestsTable();
  for ( int i = 0; i < SPLIT_COUNT_FOR_INT_ROW_KEY; i++) {
    PartialRow splitRowBoundary = schema.newPartialRow();
    splitRowBoundary.addInt("introwkey",splitBoundary);
    thisTableOptions = thisTableOptions.addSplitRow(splitRowBoundary);
    splitBoundary += stepsize;
  }
  try {
    client.createTable(tableName, schema,thisTableOptions);
  } catch (KuduException e) {
    LOG.error("Error while creating table for unit tests " + e.getMessage(), e);
    throw e;
  }

}
 
Example #17
Source File: KuduInputOperatorCommons.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public void addTestDataRows(int numRowsInEachPartition) throws Exception
{
  int intRowKeyStepsize = Integer.MAX_VALUE / SPLIT_COUNT_FOR_INT_ROW_KEY;
  int splitBoundaryForIntRowKey = intRowKeyStepsize;
  int[] inputrowkeyPartitionEntries = new int[SPLIT_COUNT_FOR_INT_ROW_KEY + 1];
  // setting the int keys that will fall in the range of all partitions
  for ( int i = 0; i < SPLIT_COUNT_FOR_INT_ROW_KEY; i++) {
    inputrowkeyPartitionEntries[i] = splitBoundaryForIntRowKey + 3; // 3 to fall into the partition next to boundary
    splitBoundaryForIntRowKey += intRowKeyStepsize;
  }
  inputrowkeyPartitionEntries[SPLIT_COUNT_FOR_INT_ROW_KEY] = splitBoundaryForIntRowKey + 3;
  AbstractKuduPartitionScanner<UnitTestTablePojo,InputOperatorControlTuple> scannerForAddingRows =
      unitTestStepwiseScanInputOperator.getScanner();
  ApexKuduConnection aCurrentConnection = scannerForAddingRows.getConnectionPoolForThreads().get(0);
  KuduSession aSessionForInserts = aCurrentConnection.getKuduClient().newSession();
  KuduTable currentTable = aCurrentConnection.getKuduTable();
  long seedValueForTimestampRowKey = 0L; // constant to allow for data landing on first partition for unit tests
  for ( int i = 0; i <= SPLIT_COUNT_FOR_INT_ROW_KEY; i++) { // range key iterator
    int intRowKeyBaseValue = inputrowkeyPartitionEntries[i] + i;
    for ( int k = 0; k < 2; k++) { // hash key iterator . The table defines two hash partitions
      long timestampRowKeyValue = seedValueForTimestampRowKey + k; // to avoid spilling to another tablet
      String stringRowKeyValue = "" + timestampRowKeyValue + k; // to avoid spilling to another tablet randomly
      for ( int y = 0; y < numRowsInEachPartition; y++) {
        Upsert aNewRow = currentTable.newUpsert();
        PartialRow rowValue  = aNewRow.getRow();
        // Start assigning row keys below the current split boundary.
        rowValue.addInt("introwkey",intRowKeyBaseValue - y - 1);
        rowValue.addString("stringrowkey",stringRowKeyValue);
        rowValue.addLong("timestamprowkey",timestampRowKeyValue);
        rowValue.addLong("longdata",(seedValueForTimestampRowKey + y));
        rowValue.addString("stringdata", ("" + seedValueForTimestampRowKey + y));
        OperationResponse response = aSessionForInserts.apply(aNewRow);
      }
    }
  }
  List<OperationResponse> insertResponse = aSessionForInserts.flush();
  aSessionForInserts.close();
  Thread.sleep(2000); // Sleep to allow for scans to complete
}
 
Example #18
Source File: KuduPageSink.java    From presto-kudu with Apache License 2.0 5 votes vote down vote up
private void appendColumn(PartialRow row, Page page, int position, int channel, int destChannel) {
    Block block = page.getBlock(channel);
    Type type = columnTypes.get(destChannel);
    if (block.isNull(position)) {
        row.setNull(destChannel);
    } else if (TIMESTAMP.equals(type)) {
        row.addLong(destChannel, type.getLong(block, position) * 1000);
    } else if (REAL.equals(type)) {
        row.addFloat(destChannel, intBitsToFloat((int) type.getLong(block, position)));
    } else if (BIGINT.equals(type)) {
        row.addLong(destChannel, type.getLong(block, position));
    } else if (INTEGER.equals(type)) {
        row.addInt(destChannel, (int) type.getLong(block, position));
    } else if (SMALLINT.equals(type)) {
        row.addShort(destChannel, (short) type.getLong(block, position));
    } else if (TINYINT.equals(type)) {
        row.addByte(destChannel, (byte) type.getLong(block, position));
    } else if (BOOLEAN.equals(type)) {
        row.addBoolean(destChannel, type.getBoolean(block, position));
    } else if (DOUBLE.equals(type)) {
        row.addDouble(destChannel, type.getDouble(block, position));
    } else if (isVarcharType(type)) {
        Type originalType = originalColumnTypes.get(destChannel);
        if (DATE.equals(originalType)) {
            SqlDate date = (SqlDate) originalType.getObjectValue(connectorSession, block, position);
            LocalDateTime ldt = LocalDateTime.ofEpochSecond(TimeUnit.DAYS.toSeconds(date.getDays()), 0, ZoneOffset.UTC);
            byte[] bytes = ldt.format(DateTimeFormatter.ISO_LOCAL_DATE).getBytes(Charsets.UTF_8);
            row.addStringUtf8(destChannel, bytes);
        } else {
            row.addString(destChannel, type.getSlice(block, position).toStringUtf8());
        }
    } else if (VARBINARY.equals(type)) {
        row.addBinary(destChannel, type.getSlice(block, position).toByteBuffer());
    } else if (type instanceof DecimalType) {
        SqlDecimal sqlDecimal = (SqlDecimal) type.getObjectValue(connectorSession, block, position);
        row.addDecimal(destChannel, sqlDecimal.toBigDecimal());
    } else {
        throw new UnsupportedOperationException("Type is not supported: " + type);
    }
}
 
Example #19
Source File: Tables.java    From kudu-ts with Apache License 2.0 5 votes vote down vote up
/**
 * The {@code metrics} table is hash partitioned on the {@code metric} and
 * {@code tagset_id} columns, and range partitioned on the {@code time}
 * column. The hash partitioning allows writes and scans at the current time
 * to be evenly distributed over the cluster. Range partitioning on time
 * allows whole tablets to be pruned based on the time constraint, and allows
 * old data to be dropped if desired.
 * @param options the create options
 * @param numTabletServers the number of tablet servers
 * @return the tags table create options
 */
static CreateTableOptions metricsCreateTableOptions(CreateOptions options,
                                                    int numTabletServers) {
  CreateTableOptions create = new CreateTableOptions();
  create.setNumReplicas(options.getNumReplicas());
  create.addHashPartitions(ImmutableList.of("metric", "tagset_id"),
                           options.getNumMetricsHashBuckets(numTabletServers));
  create.setRangePartitionColumns(ImmutableList.of("time"));
  for (Long time : options.getMetricsSplits()) {
    PartialRow split = METRICS_SCHEMA.newPartialRow();
    split.addLong(METRICS_TIME_INDEX, time);
    create.addSplitRow(split);
  }
  return create;
}
 
Example #20
Source File: KuduTestUtils.java    From beam with Apache License 2.0 5 votes vote down vote up
@Override
public Operation apply(TableAndRecord<Long> input) {
  Upsert upsert = input.getTable().newUpsert();
  PartialRow row = upsert.getRow();
  row.addLong(COL_ID, input.getRecord());
  row.addString(COL_NAME, input.getRecord() + ": name");
  return upsert;
}
 
Example #21
Source File: KuduRow.java    From geowave with Apache License 2.0 5 votes vote down vote up
@Override
public void populatePartialRow(final PartialRow partialRow) {
  populatePartialRowPrimaryKey(partialRow);
  partialRow.addBinary(KuduField.GW_FIELD_MASK_KEY.getFieldName(), fieldMask);
  partialRow.addBinary(KuduField.GW_VALUE_KEY.getFieldName(), value);
  partialRow.addByte(KuduField.GW_NUM_DUPLICATES_KEY.getFieldName(), (byte) numDuplicates);
}
 
Example #22
Source File: KuduRow.java    From geowave with Apache License 2.0 5 votes vote down vote up
@Override
public void populatePartialRowPrimaryKey(final PartialRow partialRow) {
  partialRow.addBinary(KuduField.GW_PARTITION_ID_KEY.getFieldName(), partitionKey);
  partialRow.addShort(KuduField.GW_ADAPTER_ID_KEY.getFieldName(), adapterId);
  partialRow.addBinary(KuduField.GW_SORT_KEY.getFieldName(), sortKey);
  partialRow.addBinary(KuduField.GW_DATA_ID_KEY.getFieldName(), dataId);
  partialRow.addBinary(KuduField.GW_FIELD_VISIBILITY_KEY.getFieldName(), fieldVisibility);
  partialRow.addBinary(KuduField.GW_NANO_TIME_KEY.getFieldName(), nanoTime);
}
 
Example #23
Source File: TypeHelper.java    From presto-kudu with Apache License 2.0 5 votes vote down vote up
public static NullableValue getColumnValue(Type type, PartialRow row, int i) {
    if (row.isNull(i)) {
        return NullableValue.asNull(type);
    } else {
        if (type instanceof VarcharType) {
            return NullableValue.of(type, utf8Slice(row.getString(i)));
        } else if (type == TimestampType.TIMESTAMP) {
            return NullableValue.of(type, row.getLong(i) / 1000);
        } else if (type == BigintType.BIGINT) {
            return NullableValue.of(type, row.getLong(i));
        } else if (type == IntegerType.INTEGER) {
            return NullableValue.of(type, row.getInt(i));
        } else if (type == SmallintType.SMALLINT) {
            return NullableValue.of(type, row.getShort(i));
        } else if (type == TinyintType.TINYINT) {
            return NullableValue.of(type, row.getByte(i));
        } else if (type == DoubleType.DOUBLE) {
            return NullableValue.of(type, row.getDouble(i));
        } else if (type == RealType.REAL) {
            return NullableValue.of(type, (long) floatToRawIntBits(row.getFloat(i)));
        } else if (type == BooleanType.BOOLEAN) {
            return NullableValue.of(type, row.getBoolean(i));
        } else if (type instanceof VarbinaryType) {
            return NullableValue.of(type, wrappedBuffer(row.getBinary(i)));
        } else if (type instanceof DecimalType) {
            return NullableValue.of(type, row.getDecimal(i));
        } else {
            throw new IllegalStateException("Handling of type " + type + " is not implemented");
        }
    }
}
 
Example #24
Source File: KuduMetadataWriter.java    From geowave with Apache License 2.0 5 votes vote down vote up
@Override
public void write(final GeoWaveMetadata metadata) {
  try {
    final Insert insert = operations.getTable(tableName).newInsert();
    final PartialRow partialRow = insert.getRow();
    final KuduMetadataRow row = new KuduMetadataRow(metadata);
    row.populatePartialRow(partialRow);
    final OperationResponse resp = session.apply(insert);
    if (resp.hasRowError()) {
      LOGGER.error("Encountered error while writing metadata: {}", resp.getRowError());
    }
  } catch (final KuduException e) {
    LOGGER.error("Kudu error when writing metadata", e);
  }
}
 
Example #25
Source File: KuduMetadataRow.java    From geowave with Apache License 2.0 5 votes vote down vote up
@Override
public void populatePartialRow(final PartialRow partialRow) {
  populatePartialRowPrimaryKey(partialRow);
  partialRow.addBinary(
      KuduMetadataField.GW_VISIBILITY_KEY.getFieldName(),
      visibility == null ? KuduUtils.EMPTY_KEY : visibility);
  partialRow.addBinary(
      KuduMetadataField.GW_VALUE_KEY.getFieldName(),
      value == null ? KuduUtils.EMPTY_KEY : value);
}
 
Example #26
Source File: KuduMetadataRow.java    From geowave with Apache License 2.0 5 votes vote down vote up
@Override
public void populatePartialRowPrimaryKey(final PartialRow partialRow) {
  partialRow.addBinary(KuduMetadataField.GW_PRIMARY_ID_KEY.getFieldName(), primaryId);
  partialRow.addBinary(
      KuduMetadataField.GW_SECONDARY_ID_KEY.getFieldName(),
      secondaryId == null ? KuduUtils.EMPTY_KEY : secondaryId);
  partialRow.addBinary(KuduMetadataField.GW_TIMESTAMP_KEY.getFieldName(), timestamp);
}
 
Example #27
Source File: RowHelper.java    From presto-kudu with Apache License 2.0 5 votes vote down vote up
public static void copyPrimaryKey(Schema schema, RowResult from, PartialRow to) {
    for (int i = 0; i < schema.getPrimaryKeyColumnCount(); i++) {
        switch (schema.getColumnByIndex(i).getType()) {
            case STRING:
                to.addStringUtf8(i, from.getString(i).getBytes(Charsets.UTF_8));
                break;
            case INT64:
            case UNIXTIME_MICROS:
                to.addLong(i, from.getLong(i));
                break;
            case INT32:
                to.addInt(i, from.getInt(i));
                break;
            case INT16:
                to.addShort(i, from.getShort(i));
                break;
            case INT8:
                to.addByte(i, from.getByte(i));
                break;
            case DOUBLE:
                to.addDouble(i, from.getDouble(i));
                break;
            case FLOAT:
                to.addFloat(i, from.getFloat(i));
                break;
            case BOOL:
                to.addBoolean(i, from.getBoolean(i));
                break;
            case BINARY:
                to.addBinary(i, from.getBinary(i));
                break;
            default:
                throw new IllegalStateException("Unknown type " + schema.getColumnByIndex(i).getType()
                        + " for column " + schema.getColumnByIndex(i).getName());
        }
    }
}
 
Example #28
Source File: TestPutKudu.java    From nifi with Apache License 2.0 5 votes vote down vote up
private PartialRow buildPartialRow(Long id, String name, Short age, String kuduIdName, String recordIdName, String airport_code, Boolean lowercaseFields) {
    final Schema kuduSchema = new Schema(Arrays.asList(
        new ColumnSchema.ColumnSchemaBuilder(kuduIdName, Type.INT64).key(true).build(),
        new ColumnSchema.ColumnSchemaBuilder("name", Type.STRING).nullable(true).build(),
        new ColumnSchema.ColumnSchemaBuilder("age", Type.INT16).nullable(false).build(),
        new ColumnSchema.ColumnSchemaBuilder("updated_at", Type.UNIXTIME_MICROS).nullable(false).build(),
        new ColumnSchema.ColumnSchemaBuilder("score", Type.DECIMAL).nullable(true).typeAttributes(
            new ColumnTypeAttributes.ColumnTypeAttributesBuilder().precision(9).scale(0).build()
        ).build(),
        new ColumnSchema.ColumnSchemaBuilder("airport_code", Type.VARCHAR).nullable(true).typeAttributes(
                new ColumnTypeAttributes.ColumnTypeAttributesBuilder().length(3).build()
        ).build()));

    final RecordSchema schema = new SimpleRecordSchema(Arrays.asList(
        new RecordField(recordIdName, RecordFieldType.BIGINT.getDataType()),
        new RecordField("name", RecordFieldType.STRING.getDataType()),
        new RecordField("age", RecordFieldType.SHORT.getDataType()),
        new RecordField("updated_at", RecordFieldType.TIMESTAMP.getDataType()),
        new RecordField("score", RecordFieldType.LONG.getDataType()),
        new RecordField("airport_code", RecordFieldType.STRING.getDataType())));

    Map<String, Object> values = new HashMap<>();
    PartialRow row = kuduSchema.newPartialRow();
    values.put(recordIdName, id);
    values.put("name", name);
    values.put("age", age);
    values.put("updated_at", new Timestamp(System.currentTimeMillis()));
    values.put("score", 10000L);
    values.put("airport_code", airport_code);
    processor.buildPartialRow(
        kuduSchema,
        row,
        new MapRecord(schema, values),
        schema.getFieldNames(),
            true,
        lowercaseFields
    );
    return row;
}
 
Example #29
Source File: RowHelper.java    From presto with Apache License 2.0 5 votes vote down vote up
public static void copyPrimaryKey(Schema schema, RowResult from, PartialRow to)
{
    for (int i = 0; i < schema.getPrimaryKeyColumnCount(); i++) {
        switch (schema.getColumnByIndex(i).getType()) {
            case STRING:
                to.addStringUtf8(i, from.getString(i).getBytes(StandardCharsets.UTF_8));
                break;
            case INT64:
            case UNIXTIME_MICROS:
                to.addLong(i, from.getLong(i));
                break;
            case INT32:
                to.addInt(i, from.getInt(i));
                break;
            case INT16:
                to.addShort(i, from.getShort(i));
                break;
            case INT8:
                to.addByte(i, from.getByte(i));
                break;
            case DOUBLE:
                to.addDouble(i, from.getDouble(i));
                break;
            case FLOAT:
                to.addFloat(i, from.getFloat(i));
                break;
            case BOOL:
                to.addBoolean(i, from.getBoolean(i));
                break;
            case BINARY:
                to.addBinary(i, from.getBinary(i));
                break;
            default:
                throw new IllegalStateException("Unknown type " + schema.getColumnByIndex(i).getType()
                        + " for column " + schema.getColumnByIndex(i).getName());
        }
    }
}
 
Example #30
Source File: RowHelper.java    From presto with Apache License 2.0 5 votes vote down vote up
public static void copyPrimaryKey(Schema schema, PartialRow from, PartialRow to)
{
    for (int i = 0; i < schema.getPrimaryKeyColumnCount(); i++) {
        switch (schema.getColumnByIndex(i).getType()) {
            case STRING:
                to.addStringUtf8(i, from.getString(i).getBytes(StandardCharsets.UTF_8));
                break;
            case INT64:
            case UNIXTIME_MICROS:
                to.addLong(i, from.getLong(i));
                break;
            case INT32:
                to.addInt(i, from.getInt(i));
                break;
            case INT16:
                to.addShort(i, from.getShort(i));
                break;
            case INT8:
                to.addByte(i, from.getByte(i));
                break;
            case DOUBLE:
                to.addDouble(i, from.getDouble(i));
                break;
            case FLOAT:
                to.addFloat(i, from.getFloat(i));
                break;
            case BOOL:
                to.addBoolean(i, from.getBoolean(i));
                break;
            case BINARY:
                to.addBinary(i, from.getBinary(i));
                break;
            default:
                throw new IllegalStateException("Unknown type " + schema.getColumnByIndex(i).getType()
                        + " for column " + schema.getColumnByIndex(i).getName());
        }
    }
}