Java Code Examples for org.apache.kafka.connect.data.Schema#OPTIONAL_INT64_SCHEMA

The following examples show how to use org.apache.kafka.connect.data.Schema#OPTIONAL_INT64_SCHEMA . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StringParserTest.java    From connect-utils with Apache License 2.0 7 votes vote down vote up
@Test
public void nullableTests() {
  final Schema[] schemas = new Schema[]{
      Schema.OPTIONAL_BOOLEAN_SCHEMA,
      Schema.OPTIONAL_FLOAT32_SCHEMA,
      Schema.OPTIONAL_FLOAT64_SCHEMA,
      Schema.OPTIONAL_INT8_SCHEMA,
      Schema.OPTIONAL_INT16_SCHEMA,
      Schema.OPTIONAL_INT32_SCHEMA,
      Schema.OPTIONAL_INT64_SCHEMA,
      Schema.OPTIONAL_STRING_SCHEMA,
      Decimal.builder(1).optional().build(),
      Timestamp.builder().optional().build(),
      Date.builder().optional().build(),
      Time.builder().optional().build(),
  };

  for (Schema schema : schemas) {
    Object actual = this.parser.parseString(schema, null);
    assertNull(actual);
  }

}
 
Example 2
Source File: UnivocityFileReader.java    From kafka-connect-fs with Apache License 2.0 6 votes vote down vote up
private Schema strToSchema(String dataType) {
    switch (DataType.valueOf(dataType.trim().toUpperCase())) {
        case BYTE:
            return dataTypeMappingError && !allowNulls ? Schema.INT8_SCHEMA : Schema.OPTIONAL_INT8_SCHEMA;
        case SHORT:
            return dataTypeMappingError && !allowNulls ? Schema.INT16_SCHEMA : Schema.OPTIONAL_INT16_SCHEMA;
        case INT:
            return dataTypeMappingError && !allowNulls ? Schema.INT32_SCHEMA : Schema.OPTIONAL_INT32_SCHEMA;
        case LONG:
            return dataTypeMappingError && !allowNulls ? Schema.INT64_SCHEMA : Schema.OPTIONAL_INT64_SCHEMA;
        case FLOAT:
            return dataTypeMappingError && !allowNulls ? Schema.FLOAT32_SCHEMA : Schema.OPTIONAL_FLOAT32_SCHEMA;
        case DOUBLE:
            return dataTypeMappingError && !allowNulls ? Schema.FLOAT64_SCHEMA : Schema.OPTIONAL_FLOAT64_SCHEMA;
        case BOOLEAN:
            return dataTypeMappingError && !allowNulls ? Schema.BOOLEAN_SCHEMA : Schema.OPTIONAL_BOOLEAN_SCHEMA;
        case BYTES:
            return dataTypeMappingError && !allowNulls ? Schema.BYTES_SCHEMA : Schema.OPTIONAL_BYTES_SCHEMA;
        case STRING:
        default:
            return dataTypeMappingError && !allowNulls ? Schema.STRING_SCHEMA : Schema.OPTIONAL_STRING_SCHEMA;
    }
}
 
Example 3
Source File: SchemaTest.java    From schema-registry-transfer-smt with Apache License 2.0 5 votes vote down vote up
@Test
public void testNonByteTypeSchemas() {
    Schema[] schemas = new Schema[]{
            // Boolean
            Schema.BOOLEAN_SCHEMA,
            Schema.OPTIONAL_BOOLEAN_SCHEMA,
            // Integers
            Schema.INT8_SCHEMA,
            Schema.INT16_SCHEMA,
            Schema.INT32_SCHEMA,
            Schema.INT64_SCHEMA,
            Schema.OPTIONAL_INT8_SCHEMA,
            Schema.OPTIONAL_INT16_SCHEMA,
            Schema.OPTIONAL_INT32_SCHEMA,
            Schema.OPTIONAL_INT64_SCHEMA,
            // Floats
            Schema.FLOAT32_SCHEMA,
            Schema.FLOAT64_SCHEMA,
            Schema.OPTIONAL_FLOAT32_SCHEMA,
            Schema.OPTIONAL_FLOAT64_SCHEMA,
            // String
            Schema.STRING_SCHEMA,
            Schema.OPTIONAL_STRING_SCHEMA,
            // Struct with a field of bytes
            SchemaBuilder.struct().name("record").
                    field("foo", Schema.BYTES_SCHEMA)
                    .build(),
            SchemaBuilder.struct().name("record").
                    field("foo", Schema.OPTIONAL_BYTES_SCHEMA)
                    .build(),
            // map<bytes, bytes>
            SchemaBuilder.map(Schema.BYTES_SCHEMA, Schema.OPTIONAL_BYTES_SCHEMA).build(),
            // array<bytes>
            SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).build()
    };

    for (Schema s : schemas) {
        assertFalse(ConnectSchemaUtil.isBytesSchema(s));
    }
}
 
Example 4
Source File: JsonNodeTest.java    From connect-utils with Apache License 2.0 5 votes vote down vote up
@Test
public void nullableTests() throws IOException {
  final Schema[] schemas = new Schema[]{
      Schema.OPTIONAL_BOOLEAN_SCHEMA,
      Schema.OPTIONAL_FLOAT32_SCHEMA,
      Schema.OPTIONAL_FLOAT64_SCHEMA,
      Schema.OPTIONAL_INT8_SCHEMA,
      Schema.OPTIONAL_INT16_SCHEMA,
      Schema.OPTIONAL_INT32_SCHEMA,
      Schema.OPTIONAL_INT64_SCHEMA,
      Schema.OPTIONAL_STRING_SCHEMA,
      Decimal.builder(1).optional().build(),
      Timestamp.builder().optional().build(),
      Date.builder().optional().build(),
      Time.builder().optional().build(),
  };

  for (Schema schema : schemas) {
    JsonNode inputNode = null;
    Object actual = this.parser.parseJsonNode(schema, inputNode);
    assertNull(actual);
    inputNode = objectMapper.readTree("{\"foo\": null}");
    inputNode = inputNode.findValue("foo");
    actual = this.parser.parseJsonNode(schema, inputNode);
    assertNull(actual);
  }

}
 
Example 5
Source File: OracleSourceConnectorUtils.java    From kafka-connect-oracle with Apache License 2.0 4 votes vote down vote up
protected void loadTable(String owner,String tableName,String operation) throws SQLException{
  log.info("Getting dictionary details for table : {}",tableName);
  //SchemaBuilder dataSchemaBuiler = SchemaBuilder.struct().name((config.getDbNameAlias()+DOT+owner+DOT+tableName+DOT+"Value").toLowerCase());
  SchemaBuilder dataSchemaBuiler = SchemaBuilder.struct().name("value");
  String mineTableColsSql=OracleConnectorSQL.TABLE_WITH_COLS;
  if (config.getMultitenant()){
    mineTableColsSql=OracleConnectorSQL.TABLE_WITH_COLS_CDB;
  }
  mineTableColsSql=mineTableColsSql.replace("$TABLE_OWNER$", owner).replace("$TABLE_NAME$", tableName);
  
  /*if (config.getMultitenant()) {
	  mineTableCols=dbConn.prepareCall(sql.getContainerDictionarySQL());
  } else {
      mineTableCols=dbConn.prepareCall(sql.getDictionarySQL());
  }
  mineTableCols.setString(ConnectorSQL.PARAMETER_OWNER, owner);
  mineTableCols.setString(ConnectorSQL.PARAMETER_TABLE_NAME, tableName);*/
  mineTableCols = dbConn.prepareCall(mineTableColsSql);
  mineTableColsResultSet=mineTableCols.executeQuery();
  if (!mineTableColsResultSet.isBeforeFirst()) {
	  // TODO: consider throwing up here, or an NPE will be thrown in OracleSourceTask.poll()
      log.warn("mineTableCols has no results for {}.{}", owner, tableName);
  }
  while(mineTableColsResultSet.next()){
    String columnName = mineTableColsResultSet.getString(COLUMN_NAME_FIELD);
    Boolean nullable = mineTableColsResultSet.getString(NULLABLE_FIELD).equals("Y") ? true:false;
    String dataType = mineTableColsResultSet.getString(DATA_TYPE_FIELD);
    if (dataType.contains(TIMESTAMP_TYPE)) dataType=TIMESTAMP_TYPE;
    int dataLength = mineTableColsResultSet.getInt(DATA_LENGTH_FIELD);
    int dataScale = mineTableColsResultSet.getInt(DATA_SCALE_FIELD);
    int dataPrecision = mineTableColsResultSet.getInt(DATA_PRECISION_FIELD);
    Boolean pkColumn = mineTableColsResultSet.getInt(PK_COLUMN_FIELD)==1 ? true:false;
    Boolean uqColumn = mineTableColsResultSet.getInt(UQ_COLUMN_FIELD)==1 ? true:false;
    Schema columnSchema = null;       
             
    switch (dataType){
      case NUMBER_TYPE:
      {                        
        if (dataScale>0 || dataPrecision == 0){              
          columnSchema = nullable ? Schema.OPTIONAL_FLOAT64_SCHEMA  : Schema.FLOAT64_SCHEMA;                            
        }else{
          switch (dataPrecision){
            case 1:
            case 2:              
              columnSchema = nullable ? Schema.OPTIONAL_INT8_SCHEMA : Schema.INT8_SCHEMA;
              break;                
            case 3:
            case 4:
              columnSchema = nullable ? Schema.OPTIONAL_INT16_SCHEMA : Schema.INT16_SCHEMA;
              break;
            case 5:
            case 6:
            case 7:
            case 8:
            case 9:
              columnSchema = nullable ? Schema.OPTIONAL_INT32_SCHEMA : Schema.INT32_SCHEMA;
              break;
            default:
              columnSchema = nullable ? Schema.OPTIONAL_INT64_SCHEMA : Schema.INT64_SCHEMA;
              break;
          }
        }
        break;
      }
      case "CHAR":
      case "VARCHAR":
      case "VARCHAR2":
      case "NCHAR":
      case "NVARCHAR":
      case "NVARCHAR2":          
      case "LONG":
      case "CLOB":
      {            
        columnSchema = nullable ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA;            
        break;
      }
      case DATE_TYPE:
      case TIMESTAMP_TYPE:
      {            
        columnSchema = nullable ? OPTIONAL_TIMESTAMP_SCHEMA : TIMESTAMP_SCHEMA;           
        break;
      }          
      default:                        
        columnSchema = nullable ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA;            
        break;
    }
    dataSchemaBuiler.field(columnName,columnSchema);
    com.ecer.kafka.connect.oracle.models.Column column = new com.ecer.kafka.connect.oracle.models.Column(owner, tableName, columnName, nullable, dataType, dataLength, dataScale, pkColumn, uqColumn,columnSchema);
    String keyTabCols = owner+DOT+tableName+DOT+columnName;
    tabColsMap.put(keyTabCols, column); 
    log.debug("tabColsMap entry added: {} = {}", keyTabCols, column.toString());
  }
  Schema tSchema = dataSchemaBuiler.optional().build();
  tableSchema.put(owner+DOT+tableName, tSchema);
  mineTableColsResultSet.close();
  mineTableCols.close();      
}
 
Example 6
Source File: JsonFileReader.java    From kafka-connect-fs with Apache License 2.0 4 votes vote down vote up
private static Schema extractSchema(JsonNode jsonNode) {
    switch (jsonNode.getNodeType()) {
        case BOOLEAN:
            return Schema.OPTIONAL_BOOLEAN_SCHEMA;
        case NUMBER:
            if (jsonNode.isShort()) {
                return Schema.OPTIONAL_INT8_SCHEMA;
            } else if (jsonNode.isInt()) {
                return Schema.OPTIONAL_INT32_SCHEMA;
            } else if (jsonNode.isLong()) {
                return Schema.OPTIONAL_INT64_SCHEMA;
            } else if (jsonNode.isFloat()) {
                return Schema.OPTIONAL_FLOAT32_SCHEMA;
            } else if (jsonNode.isDouble()) {
                return Schema.OPTIONAL_FLOAT64_SCHEMA;
            } else if (jsonNode.isBigInteger()) {
                return Schema.OPTIONAL_INT64_SCHEMA;
            } else if (jsonNode.isBigDecimal()) {
                return Schema.OPTIONAL_FLOAT64_SCHEMA;
            } else {
                return Schema.OPTIONAL_FLOAT64_SCHEMA;
            }
        case STRING:
            return Schema.OPTIONAL_STRING_SCHEMA;
        case BINARY:
            return Schema.OPTIONAL_BYTES_SCHEMA;
        case ARRAY:
            Iterable<JsonNode> elements = jsonNode::elements;
            Schema arraySchema = StreamSupport.stream(elements.spliterator(), false)
                    .findFirst().map(JsonFileReader::extractSchema)
                    .orElse(SchemaBuilder.struct().build());
            return SchemaBuilder.array(arraySchema).build();
        case OBJECT:
            SchemaBuilder builder = SchemaBuilder.struct();
            jsonNode.fields()
                    .forEachRemaining(field -> builder.field(field.getKey(), extractSchema(field.getValue())));
            return builder.build();
        default:
            return SchemaBuilder.struct().optional().build();
    }
}