Java Code Examples for org.apache.kafka.connect.data.SchemaBuilder#float64()

The following examples show how to use org.apache.kafka.connect.data.SchemaBuilder#float64() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OracleValueConverters.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Override
public SchemaBuilder schemaBuilder(Column column) {
    logger.debug("Building schema for column {} of type {} named {} with constraints ({},{})",
            column.name(),
            column.jdbcType(),
            column.typeName(),
            column.length(),
            column.scale());

    switch (column.jdbcType()) {
        // Oracle's float is not float as in Java but a NUMERIC without scale
        case Types.FLOAT:
            return variableScaleSchema(column);
        case Types.NUMERIC:
            return getNumericSchema(column);
        case OracleTypes.BINARY_FLOAT:
            return SchemaBuilder.float32();
        case OracleTypes.BINARY_DOUBLE:
            return SchemaBuilder.float64();
        case OracleTypes.TIMESTAMPTZ:
        case OracleTypes.TIMESTAMPLTZ:
            return ZonedTimestamp.builder();
        case OracleTypes.INTERVALYM:
        case OracleTypes.INTERVALDS:
            return MicroDuration.builder();
        default:
            return super.schemaBuilder(column);
    }
}
 
Example 2
Source File: CloudPubSubSinkTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/** Tests that an exception is thrown when the schema of the value is not BYTES. */
@Test
public void testPutPrimitives() {
  task.start(props);
  SinkRecord record8 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int8(), (byte) 5, -1);
  SinkRecord record16 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int16(), (short) 5, -1);
  SinkRecord record32 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int32(), (int) 5, -1);
  SinkRecord record64 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.int64(), (long) 5, -1);
  SinkRecord recordFloat32 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.float32(), (float) 8, -1);
  SinkRecord recordFloat64 =
      new SinkRecord(null, -1, null, null, SchemaBuilder.float64(), (double) 8, -1);
  SinkRecord recordBool =
      new SinkRecord(null, -1, null, null, SchemaBuilder.bool(), true, -1);
  SinkRecord recordString =
      new SinkRecord(null, -1, null, null, SchemaBuilder.string(), "Test put.", -1);
  List<SinkRecord> list = new ArrayList<>();
  list.add(record8);
  list.add(record16);
  list.add(record32);
  list.add(record64);
  list.add(recordFloat32);
  list.add(recordFloat64);
  list.add(recordBool);
  list.add(recordString);
  task.put(list);
}
 
Example 3
Source File: MsSqlTableMetadataProvider.java    From kafka-connect-cdc-mssql with Apache License 2.0 4 votes vote down vote up
Schema generateSchema(ResultSet resultSet,
                      final ChangeKey changeKey,
                      final String columnName) throws SQLException {
  boolean optional = resultSet.getBoolean(2);
  String dataType = resultSet.getString(3);
  int scale = resultSet.getInt(4);
  SchemaBuilder builder;

  log.trace("{}: columnName='{}' dataType='{}' scale={} optional={}", changeKey, columnName, dataType, scale, optional);

  switch (dataType) {
    case "bigint":
      builder = SchemaBuilder.int64();
      break;
    case "bit":
      builder = SchemaBuilder.bool();
      break;
    case "char":
    case "varchar":
    case "text":
    case "nchar":
    case "nvarchar":
    case "ntext":
    case "uniqueidentifier":
      builder = SchemaBuilder.string();
      break;
    case "smallmoney":
    case "money":
    case "decimal":
    case "numeric":
      builder = Decimal.builder(scale);
      break;
    case "binary":
    case "image":
    case "varbinary":
      builder = SchemaBuilder.bytes();
      break;
    case "date":
      builder = Date.builder();
      break;
    case "datetime":
    case "datetime2":
    case "smalldatetime":
      builder = Timestamp.builder();
      break;
    case "time":
      builder = Time.builder();
      break;
    case "int":
      builder = SchemaBuilder.int32();
      break;
    case "smallint":
      builder = SchemaBuilder.int16();
      break;
    case "tinyint":
      builder = SchemaBuilder.int8();
      break;
    case "real":
      builder = SchemaBuilder.float32();
      break;
    case "float":
      builder = SchemaBuilder.float64();
      break;

    default:
      throw new DataException(
          String.format("Could not process (dataType = '%s', optional = %s, scale = %d) for %s.",
              dataType, optional, scale, changeKey
          )
      );
  }

  log.trace("{}: columnName='{}' schema.type='{}' schema.name='{}'", changeKey, columnName, builder.type(), builder.name());

  builder.parameters(
      ImmutableMap.of(Change.ColumnValue.COLUMN_NAME, columnName)
  );

  if (optional) {
    builder.optional();
  }

  return builder.build();
}
 
Example 4
Source File: DataUtilityTest.java    From kinesis-kafka-connector with Apache License 2.0 3 votes vote down vote up
@Test
public void parseDoubleValueTest(){

	Schema schema = SchemaBuilder.float64();
	ByteBuffer actual = DataUtility.parseValue(schema, (double) 2);
	ByteBuffer expected = ByteBuffer.allocate(8).putDouble((double) 2);
	
	Assert.assertTrue(actual.equals(expected));
	
}