Java Code Examples for org.apache.kafka.connect.data.SchemaBuilder#field()

The following examples show how to use org.apache.kafka.connect.data.SchemaBuilder#field() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LogicalPlanner.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
private ProjectNode buildProjectNode(final Schema inputSchema, final PlanNode sourcePlanNode) {
  SchemaBuilder projectionSchema = SchemaBuilder.struct();
  ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(
      inputSchema,
      functionRegistry
  );
  for (int i = 0; i < analysis.getSelectExpressions().size(); i++) {
    Expression expression = analysis.getSelectExpressions().get(i);
    String alias = analysis.getSelectExpressionAlias().get(i);

    Schema expressionType = expressionTypeManager.getExpressionType(expression);

    projectionSchema = projectionSchema.field(alias, expressionType);

  }

  return new ProjectNode(
      new PlanNodeId("Project"),
      sourcePlanNode,
      projectionSchema,
      analysis.getSelectExpressions()
  );
}
 
Example 2
Source File: TupleTypeDeserializer.java    From debezium-incubator with Apache License 2.0 6 votes vote down vote up
@Override
public SchemaBuilder getSchemaBuilder(AbstractType<?> abstractType) {
    TupleType tupleType = (TupleType) abstractType;
    List<AbstractType<?>> tupleInnerTypes = tupleType.allTypes();

    String recordName = createTupleName(tupleInnerTypes);

    SchemaBuilder schemaBuilder = SchemaBuilder.struct().name(recordName);

    for (int i = 0; i < tupleInnerTypes.size(); i++) {
        AbstractType<?> innerType = tupleInnerTypes.get(i);
        schemaBuilder.field(createFieldNameForIndex(i), CassandraTypeDeserializer.getSchemaBuilder(innerType).build());
    }

    return schemaBuilder;
}
 
Example 3
Source File: SchemaKStream.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
Pair<Schema, SelectValueMapper> createSelectValueMapperAndSchema(
    final List<Pair<String,
        Expression>> expressionPairList
) {
  try {
    final CodeGenRunner codeGenRunner = new CodeGenRunner(schema, functionRegistry);
    final SchemaBuilder schemaBuilder = SchemaBuilder.struct();
    final List<ExpressionMetadata> expressionEvaluators = new ArrayList<>();
    for (Pair<String, Expression> expressionPair : expressionPairList) {
      final ExpressionMetadata expressionEvaluator =
          codeGenRunner.buildCodeGenFromParseTree(expressionPair.getRight());
      schemaBuilder.field(expressionPair.getLeft(), expressionEvaluator.getExpressionType());
      expressionEvaluators.add(expressionEvaluator);
    }
    return new Pair<>(schemaBuilder.build(), new SelectValueMapper(
        genericRowValueTypeEnforcer,
        expressionPairList,
        expressionEvaluators
    ));
  } catch (Exception e) {
    throw new KsqlException("Code generation failed for SelectValueMapper", e);
  }
}
 
Example 4
Source File: DataConverter.java    From kafka-mysql-connector with Apache License 2.0 6 votes vote down vote up
private static void addFieldSchema(Table table, int columnNumber,
        SchemaBuilder builder) {
    // TODO Auto-generated method stub
    ColumnDef def = table.getColumnList().get(columnNumber);
    String columnName = def.getName();
    ColumnType type = def.getType();
    switch (type) {
    case TINYINT:
        builder.field(columnName, Schema.INT16_SCHEMA);
        break;
    case INT:
        builder.field(columnName, Schema.INT32_SCHEMA);
        break;
    case CHAR:
        builder.field(columnName, Schema.STRING_SCHEMA);
        break;
    case BIGINT:
        builder.field(columnName, Schema.INT64_SCHEMA);
        break;
    default:
        throw new RuntimeException("unsupported type");
    }

}
 
Example 5
Source File: AstBuilder.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
private StructuredDataSource getResultDatasource(Select select, Table into) {

    SchemaBuilder dataSource = SchemaBuilder.struct().name(into.toString());
    for (SelectItem selectItem : select.getSelectItems()) {
      if (selectItem instanceof SingleColumn) {
        SingleColumn singleColumn = (SingleColumn) selectItem;
        String fieldName = singleColumn.getAlias().get();
        dataSource = dataSource.field(fieldName, Schema.BOOLEAN_SCHEMA);
      }
    }

    KsqlTopic ksqlTopic = new KsqlTopic(into.getName().toString(), into.getName().toString(), null);
    StructuredDataSource resultStream =
        new KsqlStream(
            "AstBuilder-Into",
            into.getName().toString(),
            dataSource.schema(),
            dataSource.fields().get(0),
            null,
            ksqlTopic
        );
    return resultStream;
  }
 
Example 6
Source File: SchemaUtil.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
public static Schema buildSchemaWithAlias(final Schema schema, final String alias) {
  SchemaBuilder newSchema = SchemaBuilder.struct().name(schema.name());
  for (Field field : schema.fields()) {
    newSchema.field((alias + "." + field.name()), field.schema());
  }
  return newSchema;
}
 
Example 7
Source File: RowData.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
/**
 * Assemble the Kafka connect {@link Schema} for the "after" field of the change event
 * based on the Cassandra table schema.
 * @param tm metadata of a table that contains the Cassandra table schema
 * @return a schema for the "after" field of a change event
 */
static Schema rowSchema(TableMetadata tm) {
    SchemaBuilder schemaBuilder = SchemaBuilder.struct().name(Record.AFTER);
    for (ColumnMetadata cm : tm.getColumns()) {
        Schema optionalCellSchema = CellData.cellSchema(cm, true);
        if (optionalCellSchema != null) {
            schemaBuilder.field(cm.getName(), optionalCellSchema);
        }
    }
    return schemaBuilder.build();
}
 
Example 8
Source File: AbstractSchemaGenerator.java    From kafka-connect-spooldir with Apache License 2.0 5 votes vote down vote up
void addField(SchemaBuilder builder, String name, Schema.Type schemaType) {
  log.trace("addField() - name = {} schemaType = {}", name, schemaType);
  builder.field(
      name,
      SchemaBuilder.type(schemaType).optional().build()
  );
}
 
Example 9
Source File: UserTypeDeserializer.java    From debezium-incubator with Apache License 2.0 5 votes vote down vote up
@Override
public SchemaBuilder getSchemaBuilder(AbstractType<?> abstractType) {
    UserType userType = (UserType) abstractType;
    SchemaBuilder schemaBuilder = SchemaBuilder.struct().name(userType.keyspace + "." + userType.getNameAsString());
    List<org.apache.cassandra.cql3.FieldIdentifier> fieldIdentifiers = userType.fieldNames();
    List<AbstractType<?>> fieldTypes = userType.fieldTypes();
    for (int i = 0; i < fieldIdentifiers.size(); i++) {
        Schema fieldSchema = CassandraTypeDeserializer.getSchemaBuilder(fieldTypes.get(i)).build();
        schemaBuilder.field(fieldIdentifiers.get(i).toString(), fieldSchema);
    }
    return schemaBuilder;
}
 
Example 10
Source File: PatternRename.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
@Override
protected SchemaAndValue processStruct(R record, Schema inputSchema, Struct inputStruct) {
  final SchemaBuilder outputSchemaBuilder = SchemaBuilder.struct();
  outputSchemaBuilder.name(inputSchema.name());
  outputSchemaBuilder.doc(inputSchema.doc());
  if (null != inputSchema.defaultValue()) {
    outputSchemaBuilder.defaultValue(inputSchema.defaultValue());
  }
  if (null != inputSchema.parameters() && !inputSchema.parameters().isEmpty()) {
    outputSchemaBuilder.parameters(inputSchema.parameters());
  }
  if (inputSchema.isOptional()) {
    outputSchemaBuilder.optional();
  }
  Map<String, String> fieldMappings = new HashMap<>(inputSchema.fields().size());
  for (final Field inputField : inputSchema.fields()) {
    log.trace("process() - Processing field '{}'", inputField.name());
    final Matcher fieldMatcher = this.config.pattern.matcher(inputField.name());
    final String outputFieldName;
    if (fieldMatcher.find()) {
      outputFieldName = fieldMatcher.replaceAll(this.config.replacement);
    } else {
      outputFieldName = inputField.name();
    }
    log.trace("process() - Mapping field '{}' to '{}'", inputField.name(), outputFieldName);
    fieldMappings.put(inputField.name(), outputFieldName);
    outputSchemaBuilder.field(outputFieldName, inputField.schema());
  }
  final Schema outputSchema = outputSchemaBuilder.build();
  final Struct outputStruct = new Struct(outputSchema);
  for (Map.Entry<String, String> entry : fieldMappings.entrySet()) {
    final String inputField = entry.getKey(), outputField = entry.getValue();
    log.trace("process() - Copying '{}' to '{}'", inputField, outputField);
    final Object value = inputStruct.get(inputField);
    outputStruct.put(outputField, value);
  }
  return new SchemaAndValue(outputSchema, outputStruct);
}
 
Example 11
Source File: SchemaUtil.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
/**
 * Rename field names to be consistent with the internal column names.
 */
public static Schema getAvroSerdeKsqlSchema(Schema schema) {
  SchemaBuilder schemaBuilder = SchemaBuilder.struct();
  for (Field field : schema.fields()) {
    schemaBuilder.field(field.name().replace(".", "_"), field.schema());
  }

  return schemaBuilder.build();
}
 
Example 12
Source File: SchemaUtil.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
public static Schema removeImplicitRowTimeRowKeyFromSchema(Schema schema) {
  SchemaBuilder schemaBuilder = SchemaBuilder.struct();
  for (Field field : schema.fields()) {
    String fieldName = field.name();
    fieldName = fieldName.substring(fieldName.indexOf('.') + 1);
    if (!fieldName.equalsIgnoreCase(SchemaUtil.ROWTIME_NAME)
        && !fieldName.equalsIgnoreCase(SchemaUtil.ROWKEY_NAME)) {
      schemaBuilder.field(fieldName, field.schema());
    }
  }
  return schemaBuilder.build();
}
 
Example 13
Source File: SchemaNameToTopicTest.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
Schema exampleSchema(List<String> fieldNames, final int version) {
  SchemaBuilder builder = SchemaBuilder.struct()
      .name(this.getClass().getName());
  for (String fieldName : fieldNames) {
    builder.field(fieldName, Schema.STRING_SCHEMA);
  }
  builder.version(version);
  return builder.build();
}
 
Example 14
Source File: SerDeUtil.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
public static Schema getSchemaFromAvro(String avroSchemaString) {
  org.apache.avro.Schema.Parser parser = new org.apache.avro.Schema.Parser();
  org.apache.avro.Schema avroSchema = parser.parse(avroSchemaString);

  SchemaBuilder inferredSchema = SchemaBuilder.struct().name(avroSchema.getName());
  for (org.apache.avro.Schema.Field avroField: avroSchema.getFields()) {
    inferredSchema.field(avroField.name(), getKsqlSchemaForAvroSchema(avroField.schema()));
  }

  return inferredSchema.build();
}
 
Example 15
Source File: AggregateNode.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private Schema buildAggregateSchema(
    final Schema schema,
    final FunctionRegistry functionRegistry,
    final InternalSchema internalSchema
) {
  final SchemaBuilder schemaBuilder = SchemaBuilder.struct();
  final List<Field> fields = schema.fields();
  for (int i = 0; i < getRequiredColumnList().size(); i++) {
    schemaBuilder.field(fields.get(i).name(), fields.get(i).schema());
  }
  for (int aggFunctionVarSuffix = 0;
       aggFunctionVarSuffix < getFunctionList().size(); aggFunctionVarSuffix++) {
    String udafName = getFunctionList().get(aggFunctionVarSuffix).getName()
        .getSuffix();
    KsqlAggregateFunction aggregateFunction = functionRegistry.getAggregateFunction(
        udafName,
        internalSchema.getInternalExpressionList(
            getFunctionList().get(aggFunctionVarSuffix).getArguments()),
        schema
    );
    schemaBuilder.field(
        AggregateExpressionRewriter.AGGREGATE_FUNCTION_VARIABLE_PREFIX
        + aggFunctionVarSuffix,
        aggregateFunction.getReturnType()
    );
  }

  return schemaBuilder.build();
}
 
Example 16
Source File: AggregateNode.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private Map<Integer, KsqlAggregateFunction> createAggValToFunctionMap(
    final SchemaKStream aggregateArgExpanded,
    final SchemaBuilder aggregateSchema,
    final KudafInitializer initializer,
    final int initialUdafIndex,
    final FunctionRegistry functionRegistry,
    final InternalSchema internalSchema
) {
  try {
    int udafIndexInAggSchema = initialUdafIndex;
    final Map<Integer, KsqlAggregateFunction> aggValToAggFunctionMap = new HashMap<>();
    for (FunctionCall functionCall : getFunctionList()) {
      KsqlAggregateFunction aggregateFunctionInfo = functionRegistry
          .getAggregateFunction(functionCall.getName().toString(),
                                internalSchema.getInternalExpressionList(
                                    functionCall.getArguments()),
                                aggregateArgExpanded.getSchema()
          );
      KsqlAggregateFunction aggregateFunction = aggregateFunctionInfo.getInstance(
          internalSchema.getInternalNameToIndexMap(),
          internalSchema.getInternalExpressionList(functionCall.getArguments())
      );

      aggValToAggFunctionMap.put(udafIndexInAggSchema++, aggregateFunction);
      initializer.addAggregateIntializer(aggregateFunction.getInitialValueSupplier());

      aggregateSchema.field("AGG_COL_"
                            + udafIndexInAggSchema, aggregateFunction.getReturnType());
    }
    return aggValToAggFunctionMap;
  } catch (final Exception e) {
    throw new KsqlException(
        String.format(
            "Failed to create aggregate val to function map. expressionNames:%s",
            internalSchema.getInternalNameToIndexMap()
        ),
        e
    );
  }
}
 
Example 17
Source File: NormalizeSchemaTest.java    From kafka-connect-transform-common with Apache License 2.0 5 votes vote down vote up
Schema exampleSchema(List<String> fieldNames, final int version) {
  SchemaBuilder builder = SchemaBuilder.struct()
      .name(this.getClass().getName());
  for (String fieldName : fieldNames) {
    builder.field(fieldName, Schema.STRING_SCHEMA);
  }
  builder.version(version);
  return builder.build();
}
 
Example 18
Source File: LogicalPlanner.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private AggregateNode buildAggregateNode(
    final Schema inputSchema,
    final PlanNode sourcePlanNode
) {
  SchemaBuilder aggregateSchema = SchemaBuilder.struct();
  ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(
      inputSchema,
      functionRegistry
  );
  for (int i = 0; i < analysis.getSelectExpressions().size(); i++) {
    Expression expression = analysis.getSelectExpressions().get(i);
    String alias = analysis.getSelectExpressionAlias().get(i);

    Schema expressionType = expressionTypeManager.getExpressionType(expression);

    aggregateSchema = aggregateSchema.field(alias, expressionType);
  }

  return new AggregateNode(
      new PlanNodeId("Aggregate"),
      sourcePlanNode,
      aggregateSchema,
      analysis.getGroupByExpressions(),
      analysis.getWindowExpression(),
      aggregateAnalysis.getAggregateFunctionArguments(),
      aggregateAnalysis.getFunctionList(),
      aggregateAnalysis.getRequiredColumnsList(),
      aggregateAnalysis.getFinalSelectExpressions(),
      aggregateAnalysis.getHavingExpression()
  );
}
 
Example 19
Source File: MetaStoreFixture.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
public static MetaStore getNewMetaStore() {

    final MetadataTimestampExtractionPolicy timestampExtractionPolicy
        = new MetadataTimestampExtractionPolicy();
    final MetaStore metaStore = new MetaStoreImpl();

    SchemaBuilder schemaBuilder1 = SchemaBuilder.struct()
        .field("ROWTIME", SchemaBuilder.INT64_SCHEMA)
        .field("ROWKEY", SchemaBuilder.INT64_SCHEMA)
        .field("COL0", SchemaBuilder.INT64_SCHEMA)
        .field("COL1", SchemaBuilder.STRING_SCHEMA)
        .field("COL2", SchemaBuilder.STRING_SCHEMA)
        .field("COL3", SchemaBuilder.FLOAT64_SCHEMA)
        .field("COL4", SchemaBuilder.array(SchemaBuilder.FLOAT64_SCHEMA))
        .field("COL5", SchemaBuilder.map(SchemaBuilder.STRING_SCHEMA, SchemaBuilder.FLOAT64_SCHEMA));

    KsqlTopic
        ksqlTopic1 =
        new KsqlTopic("TEST1", "test1", new KsqlJsonTopicSerDe());

    KsqlStream ksqlStream = new KsqlStream("sqlexpression",
        "TEST1",
        schemaBuilder1,
        schemaBuilder1.field("COL0"),
        timestampExtractionPolicy,
        ksqlTopic1);

    metaStore.putTopic(ksqlTopic1);
    metaStore.putSource(ksqlStream);

    SchemaBuilder schemaBuilder2 = SchemaBuilder.struct()
        .field("COL0", SchemaBuilder.INT64_SCHEMA)
        .field("COL1", SchemaBuilder.STRING_SCHEMA)
        .field("COL2", SchemaBuilder.STRING_SCHEMA)
        .field("COL3", SchemaBuilder.FLOAT64_SCHEMA)
        .field("COL4", SchemaBuilder.BOOLEAN_SCHEMA);

    KsqlTopic
        ksqlTopic2 =
        new KsqlTopic("TEST2", "test2", new KsqlJsonTopicSerDe());
    KsqlTable ksqlTable = new KsqlTable(
        "sqlexpression",
        "TEST2",
        schemaBuilder2,
        schemaBuilder2.field("COL0"),
        timestampExtractionPolicy,
        ksqlTopic2,
        "TEST2",
        false);

    metaStore.putTopic(ksqlTopic2);
    metaStore.putSource(ksqlTable);

    SchemaBuilder schemaBuilderOrders = SchemaBuilder.struct()
        .field("ORDERTIME", SchemaBuilder.INT64_SCHEMA)
        .field("ORDERID", SchemaBuilder.STRING_SCHEMA)
        .field("ITEMID", SchemaBuilder.STRING_SCHEMA)
        .field("ORDERUNITS", SchemaBuilder.FLOAT64_SCHEMA);

    KsqlTopic
        ksqlTopicOrders =
        new KsqlTopic("ORDERS_TOPIC", "orders_topic", new KsqlJsonTopicSerDe());

    KsqlStream ksqlStreamOrders = new KsqlStream(
        "sqlexpression",
        "ORDERS",
        schemaBuilderOrders,
        schemaBuilderOrders.field("ORDERTIME"),
        timestampExtractionPolicy,
        ksqlTopicOrders);

    metaStore.putTopic(ksqlTopicOrders);
    metaStore.putSource(ksqlStreamOrders);

    return metaStore;
  }
 
Example 20
Source File: OracleSourceConnectorUtils.java    From kafka-connect-oracle with Apache License 2.0 4 votes vote down vote up
protected void loadTable(String owner,String tableName,String operation) throws SQLException{
  log.info("Getting dictionary details for table : {}",tableName);
  //SchemaBuilder dataSchemaBuiler = SchemaBuilder.struct().name((config.getDbNameAlias()+DOT+owner+DOT+tableName+DOT+"Value").toLowerCase());
  SchemaBuilder dataSchemaBuiler = SchemaBuilder.struct().name("value");
  String mineTableColsSql=OracleConnectorSQL.TABLE_WITH_COLS;
  if (config.getMultitenant()){
    mineTableColsSql=OracleConnectorSQL.TABLE_WITH_COLS_CDB;
  }
  mineTableColsSql=mineTableColsSql.replace("$TABLE_OWNER$", owner).replace("$TABLE_NAME$", tableName);
  
  /*if (config.getMultitenant()) {
	  mineTableCols=dbConn.prepareCall(sql.getContainerDictionarySQL());
  } else {
      mineTableCols=dbConn.prepareCall(sql.getDictionarySQL());
  }
  mineTableCols.setString(ConnectorSQL.PARAMETER_OWNER, owner);
  mineTableCols.setString(ConnectorSQL.PARAMETER_TABLE_NAME, tableName);*/
  mineTableCols = dbConn.prepareCall(mineTableColsSql);
  mineTableColsResultSet=mineTableCols.executeQuery();
  if (!mineTableColsResultSet.isBeforeFirst()) {
	  // TODO: consider throwing up here, or an NPE will be thrown in OracleSourceTask.poll()
      log.warn("mineTableCols has no results for {}.{}", owner, tableName);
  }
  while(mineTableColsResultSet.next()){
    String columnName = mineTableColsResultSet.getString(COLUMN_NAME_FIELD);
    Boolean nullable = mineTableColsResultSet.getString(NULLABLE_FIELD).equals("Y") ? true:false;
    String dataType = mineTableColsResultSet.getString(DATA_TYPE_FIELD);
    if (dataType.contains(TIMESTAMP_TYPE)) dataType=TIMESTAMP_TYPE;
    int dataLength = mineTableColsResultSet.getInt(DATA_LENGTH_FIELD);
    int dataScale = mineTableColsResultSet.getInt(DATA_SCALE_FIELD);
    int dataPrecision = mineTableColsResultSet.getInt(DATA_PRECISION_FIELD);
    Boolean pkColumn = mineTableColsResultSet.getInt(PK_COLUMN_FIELD)==1 ? true:false;
    Boolean uqColumn = mineTableColsResultSet.getInt(UQ_COLUMN_FIELD)==1 ? true:false;
    Schema columnSchema = null;       
             
    switch (dataType){
      case NUMBER_TYPE:
      {                        
        if (dataScale>0 || dataPrecision == 0){              
          columnSchema = nullable ? Schema.OPTIONAL_FLOAT64_SCHEMA  : Schema.FLOAT64_SCHEMA;                            
        }else{
          switch (dataPrecision){
            case 1:
            case 2:              
              columnSchema = nullable ? Schema.OPTIONAL_INT8_SCHEMA : Schema.INT8_SCHEMA;
              break;                
            case 3:
            case 4:
              columnSchema = nullable ? Schema.OPTIONAL_INT16_SCHEMA : Schema.INT16_SCHEMA;
              break;
            case 5:
            case 6:
            case 7:
            case 8:
            case 9:
              columnSchema = nullable ? Schema.OPTIONAL_INT32_SCHEMA : Schema.INT32_SCHEMA;
              break;
            default:
              columnSchema = nullable ? Schema.OPTIONAL_INT64_SCHEMA : Schema.INT64_SCHEMA;
              break;
          }
        }
        break;
      }
      case "CHAR":
      case "VARCHAR":
      case "VARCHAR2":
      case "NCHAR":
      case "NVARCHAR":
      case "NVARCHAR2":          
      case "LONG":
      case "CLOB":
      {            
        columnSchema = nullable ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA;            
        break;
      }
      case DATE_TYPE:
      case TIMESTAMP_TYPE:
      {            
        columnSchema = nullable ? OPTIONAL_TIMESTAMP_SCHEMA : TIMESTAMP_SCHEMA;           
        break;
      }          
      default:                        
        columnSchema = nullable ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA;            
        break;
    }
    dataSchemaBuiler.field(columnName,columnSchema);
    com.ecer.kafka.connect.oracle.models.Column column = new com.ecer.kafka.connect.oracle.models.Column(owner, tableName, columnName, nullable, dataType, dataLength, dataScale, pkColumn, uqColumn,columnSchema);
    String keyTabCols = owner+DOT+tableName+DOT+columnName;
    tabColsMap.put(keyTabCols, column); 
    log.debug("tabColsMap entry added: {} = {}", keyTabCols, column.toString());
  }
  Schema tSchema = dataSchemaBuiler.optional().build();
  tableSchema.put(owner+DOT+tableName, tSchema);
  mineTableColsResultSet.close();
  mineTableCols.close();      
}