Java Code Examples for org.apache.kafka.connect.data.Schema

The following examples show how to use org.apache.kafka.connect.data.Schema. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: pubsub   Source File: CloudPubSubSourceTaskTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Compare two SourceRecords. This is necessary because the records' values contain a byte[] and
 * the .equals on a SourceRecord does not take this into account.
 */
public void assertRecordsEqual(SourceRecord sr1, SourceRecord sr2) {
  assertEquals(sr1.key(), sr2.key());
  assertEquals(sr1.keySchema(), sr2.keySchema());
  assertEquals(sr1.valueSchema(), sr2.valueSchema());
  assertEquals(sr1.topic(), sr2.topic());

  if (sr1.valueSchema() == Schema.BYTES_SCHEMA) {
    assertArrayEquals((byte[])sr1.value(), (byte[])sr2.value());
  } else {
    for(Field f : sr1.valueSchema().fields()) {
      if (f.name().equals(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD)) {
        assertArrayEquals(((Struct)sr1.value()).getBytes(f.name()),
                          ((Struct)sr2.value()).getBytes(f.name()));
      } else {
        assertEquals(((Struct)sr1.value()).getString(f.name()),
                     ((Struct)sr2.value()).getString(f.name()));
      }
    }
  }
}
 
Example 2
@Test
public void testUDFExpr() throws Exception {
    String simpleQuery = "SELECT FLOOR(col3), CEIL(col3*3), ABS(col0+1.34), RANDOM()+10, ROUND(col3*2)+12 FROM test1;";
    Analysis analysis = analyzeQuery(simpleQuery);
    ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema,
                                                                            functionRegistry);
    Schema exprType0 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(0));
    Schema exprType1 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(1));
    Schema exprType2 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(2));
    Schema exprType3 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(3));
    Schema exprType4 = expressionTypeManager.getExpressionType(analysis.getSelectExpressions().get(4));

    Assert.assertTrue(exprType0.type() == Schema.Type.FLOAT64);
    Assert.assertTrue(exprType1.type() == Schema.Type.FLOAT64);
    Assert.assertTrue(exprType2.type() == Schema.Type.FLOAT64);
    Assert.assertTrue(exprType3.type() == Schema.Type.FLOAT64);
    Assert.assertTrue(exprType4.type() == Schema.Type.INT64);
}
 
Example 3
Source Project: pubsub   Source File: CloudPubSubSourceTaskTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageKeyAttribute() throws Exception {
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example 4
Source Project: streamx   Source File: TopicPartitionWriterTest.java    License: Apache License 2.0 6 votes vote down vote up
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException {
  Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC));
  FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter());
  assertEquals(expectedFiles.size(), statuses.length);
  int index = 0;
  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    assertTrue(expectedFiles.contains(status.getPath()));
    Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath);
    assertEquals(3, avroRecords.size());
    for (Object avroRecord: avroRecords) {
      assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord);
    }
    index++;
  }
}
 
Example 5
R filter(R record, Struct struct) {
  for (Field field : struct.schema().fields()) {
    if (this.config.fields.contains(field.name())) {
      if (field.schema().type() == Schema.Type.STRING) {
        String input = struct.getString(field.name());
        if (null != input) {
          Matcher matcher = this.config.pattern.matcher(input);
          if (matcher.matches()) {
            return null;
          }
        }
      }
    }
  }
  return record;
}
 
Example 6
@Test
public void testGetJavaType() {
  Class booleanClazz = SchemaUtil.getJavaType(Schema.BOOLEAN_SCHEMA);
  Class intClazz = SchemaUtil.getJavaType(Schema.INT32_SCHEMA);
  Class longClazz = SchemaUtil.getJavaType(Schema.INT64_SCHEMA);
  Class doubleClazz = SchemaUtil.getJavaType(Schema.FLOAT64_SCHEMA);
  Class StringClazz = SchemaUtil.getJavaType(Schema.STRING_SCHEMA);
  Class arrayClazz = SchemaUtil.getJavaType(SchemaBuilder.array(Schema.FLOAT64_SCHEMA));
  Class mapClazz = SchemaUtil.getJavaType(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.FLOAT64_SCHEMA));

  Assert.assertTrue(booleanClazz.getCanonicalName().equals("java.lang.Boolean"));
  Assert.assertTrue(intClazz.getCanonicalName().equals("java.lang.Integer"));
  Assert.assertTrue(longClazz.getCanonicalName().equals("java.lang.Long"));
  Assert.assertTrue(doubleClazz.getCanonicalName().equals("java.lang.Double"));
  Assert.assertTrue(StringClazz.getCanonicalName().equals("java.lang.String"));
  Assert.assertTrue(arrayClazz.getCanonicalName().equals("java.lang.Double[]"));
  Assert.assertTrue(mapClazz.getCanonicalName().equals("java.util.HashMap"));

}
 
Example 7
Source Project: connect-utils   Source File: DecimalTypeParser.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Object parseJsonNode(JsonNode input, Schema schema) {
  Object result;

  if (input.isNumber()) {
    int scale = scale(schema);
    result = input.decimalValue().setScale(scale);
  } else if (input.isTextual()) {
    result = parseString(input.textValue(), schema);
  } else {
    throw new UnsupportedOperationException(
        String.format(
            "Could not parse '%s' to %s",
            input,
            this.expectedClass().getSimpleName()
        )
    );
  }

  return result;
}
 
Example 8
private SinkRecord createRecord(final String topic,
                                final int partition,
                                final String key,
                                final String value,
                                final int offset,
                                final long timestamp) {
    return new SinkRecord(
        topic,
        partition,
        Schema.BYTES_SCHEMA,
        key.getBytes(StandardCharsets.UTF_8),
        Schema.BYTES_SCHEMA,
        value.getBytes(StandardCharsets.UTF_8),
        offset,
        timestamp,
        TimestampType.CREATE_TIME);
}
 
Example 9
Source Project: connect-utils   Source File: Plugin.java    License: Apache License 2.0 6 votes vote down vote up
@Value.Derived
default String getRefLink() {
  StringBuilder builder = new StringBuilder();

  if (Schema.Type.MAP == getType()) {
    builder.append(":ref:`schema-map`");
    builder.append(" < ");
    builder.append(":ref:`");
    builder.append(key().getSchemaLink());
    builder.append("` , :ref:`");
    builder.append(value().getSchemaLink());
    builder.append("` > ");
  } else if (Schema.Type.ARRAY == getType()) {
    builder.append(":ref:`schema-array`");
    builder.append(" < ");
    builder.append(":ref:`");
    builder.append(value().getSchemaLink());
    builder.append("` >");
  } else {
    builder.append(":ref:`");
    builder.append(getSchemaLink());
    builder.append('`');
  }

  return builder.toString();
}
 
Example 10
private void testSelectProjectKeyTimestamp(String resultStream,
                                           String inputStreamName,
                                           DataSource.DataSourceSerDe dataSourceSerDe,
                                           Map<String, RecordMetadata> recordMetadataMap)
    throws Exception {

  ksqlContext.sql(String.format("CREATE STREAM %s AS SELECT ROWKEY AS RKEY, ROWTIME "
                                + "AS RTIME, ITEMID FROM %s WHERE ORDERUNITS > 20 AND ITEMID = "
                                + "'ITEM_8';", resultStream, inputStreamName));

  Schema resultSchema = ksqlContext.getMetaStore().getSource(resultStream).getSchema();

  Map<String, GenericRow> results = testHarness.consumeData(resultStream, resultSchema ,
                                                            dataProvider.data().size(),
                                                            new StringDeserializer(),
                                                            IntegrationTestHarness
                                                                .RESULTS_POLL_MAX_TIME_MS,
                                                            dataSourceSerDe);

  Map<String, GenericRow> expectedResults =
      Collections.singletonMap("8",
                               new GenericRow(
                                   Arrays.asList(null,
                                                 null,
                                                 "8",
                                                 recordMetadataMap.get("8").timestamp(),
                                                 "ITEM_8")));

  assertThat(results, equalTo(expectedResults));
}
 
Example 11
Source Project: connect-utils   Source File: StructHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static Struct struct(
    String name,
    String f1,
    Schema.Type t1,
    boolean o1,
    Object v1,
    String f2,
    Schema.Type t2,
    boolean o2,
    Object v2,
    String f3,
    Schema.Type t3,
    boolean o3,
    Object v3,
    String f4,
    Schema.Type t4,
    boolean o4,
    Object v4,
    String f5,
    Schema.Type t5,
    boolean o5,
    Object v5,
    String f6,
    Schema.Type t6,
    boolean o6,
    Object v6
) {
  return struct(
      name,
      Arrays.asList(
          FieldState.of(f1, t1, o1, v1),
          FieldState.of(f2, t2, o2, v2),
          FieldState.of(f3, t3, o3, v3),
          FieldState.of(f4, t4, o4, v4),
          FieldState.of(f5, t5, o5, v5),
          FieldState.of(f6, t6, o6, v6)
      )
  );
}
 
Example 12
Source Project: connect-utils   Source File: StringParserTest.java    License: Apache License 2.0 5 votes vote down vote up
@TestFactory
Stream<DynamicTest> parseString() {
  List<TestCase> tests = new ArrayList<>();
  of(tests, Schema.FLOAT64_SCHEMA, new Double(Double.MAX_VALUE).toString(), new Double(Double.MAX_VALUE));
  of(tests, Schema.FLOAT64_SCHEMA, new Double(Double.MIN_VALUE).toString(), new Double(Double.MIN_VALUE));

  of(tests, Schema.INT8_SCHEMA, new Byte(Byte.MAX_VALUE).toString(), new Byte(Byte.MAX_VALUE));
  of(tests, Schema.INT8_SCHEMA, new Byte(Byte.MIN_VALUE).toString(), new Byte(Byte.MIN_VALUE));

  of(tests, Schema.INT16_SCHEMA, new Short(Short.MAX_VALUE).toString(), new Short(Short.MAX_VALUE));
  of(tests, Schema.INT16_SCHEMA, new Short(Short.MIN_VALUE).toString(), new Short(Short.MIN_VALUE));

  of(tests, Schema.INT32_SCHEMA, new Integer(Integer.MAX_VALUE).toString(), new Integer(Integer.MAX_VALUE));
  of(tests, Schema.INT32_SCHEMA, new Integer(Integer.MIN_VALUE).toString(), new Integer(Integer.MIN_VALUE));

  of(tests, Schema.INT64_SCHEMA, new Long(Long.MAX_VALUE).toString(), new Long(Long.MAX_VALUE));
  of(tests, Schema.INT64_SCHEMA, new Long(Long.MIN_VALUE).toString(), new Long(Long.MIN_VALUE));

  of(tests, Schema.STRING_SCHEMA, "", "");
  of(tests, Schema.STRING_SCHEMA, "mirror", "mirror");

  for (int SCALE = 3; SCALE < 30; SCALE++) {
    Schema schema = Decimal.schema(SCALE);
    of(tests, schema, "12345", new BigDecimal("12345").setScale(SCALE));
    of(tests, schema, "0", new BigDecimal("0").setScale(SCALE));
    of(tests, schema, "-12345.001", new BigDecimal("-12345.001").setScale(SCALE));
  }

  return tests.stream().map(testCase -> dynamicTest(testCase.toString(), () -> {
    final Object actual = parser.parseString(testCase.schema, testCase.input);
    assertEquals(testCase.expected, actual);
  }));
}
 
Example 13
public static Schema getSchemaFromAvro(String avroSchemaString) {
  org.apache.avro.Schema.Parser parser = new org.apache.avro.Schema.Parser();
  org.apache.avro.Schema avroSchema = parser.parse(avroSchemaString);

  SchemaBuilder inferredSchema = SchemaBuilder.struct().name(avroSchema.getName());
  for (org.apache.avro.Schema.Field avroField: avroSchema.getFields()) {
    inferredSchema.field(avroField.name(), getKsqlSchemaForAvroSchema(avroField.schema()));
  }

  return inferredSchema.build();
}
 
Example 14
Source Project: apicurio-registry   Source File: AvroData.java    License: Apache License 2.0 5 votes vote down vote up
private static Schema.Type schemaTypeForSchemalessJavaType(Object value) {
    if (value == null) {
        return null;
    } else if (value instanceof Byte) {
        return Schema.Type.INT8;
    } else if (value instanceof Short) {
        return Schema.Type.INT16;
    } else if (value instanceof Integer) {
        return Schema.Type.INT32;
    } else if (value instanceof Long) {
        return Schema.Type.INT64;
    } else if (value instanceof Float) {
        return Schema.Type.FLOAT32;
    } else if (value instanceof Double) {
        return Schema.Type.FLOAT64;
    } else if (value instanceof Boolean) {
        return Schema.Type.BOOLEAN;
    } else if (value instanceof String) {
        return Schema.Type.STRING;
    } else if (value instanceof Collection) {
        return Schema.Type.ARRAY;
    } else if (value instanceof Map) {
        return Schema.Type.MAP;
    } else {
        throw new DataException("Unknown Java type for schemaless data: " + value.getClass());
    }
}
 
Example 15
@Test
public void shouldCreateCorrectSchema() {
  final Schema schema = stream.getSchema();
  assertThat(schema.fields(), equalTo(Arrays.asList(new Field("COL0", 0, Schema.INT64_SCHEMA),
      new Field("COL2", 1, Schema.STRING_SCHEMA),
      new Field("COL3", 2, Schema.FLOAT64_SCHEMA))));
}
 
Example 16
@Override
public StructuredDataSource cloneWithTimeKeyColumns() {
  Schema newSchema = SchemaUtil.addImplicitRowTimeRowKeyToSchema(schema);
  return new KsqlStream(
      sqlExpression,
      dataSourceName,
      newSchema,
      keyField,
      timestampExtractionPolicy,
      ksqlTopic
  );
}
 
Example 17
@Test
public void shouldBuildCorrectAggregateSchema() {
  SchemaKStream stream = build();
  final List<Field> expected = Arrays.asList(
      new Field("COL0", 0, Schema.INT64_SCHEMA),
      new Field("KSQL_COL_1", 1, Schema.FLOAT64_SCHEMA),
      new Field("KSQL_COL_2", 2, Schema.INT64_SCHEMA));
  assertThat(stream.getSchema().fields(), equalTo(expected));
}
 
Example 18
public static String getSchemaFieldName(Field field) {
  if (field.schema().type() == Schema.Type.ARRAY) {
    return "ARRAY[" + TYPE_MAP.get(field.schema().valueSchema().type().name()) + "]";
  } else if (field.schema().type() == Schema.Type.MAP) {
    return "MAP[" + TYPE_MAP.get(field.schema().keySchema().type().name()) + ","
        + TYPE_MAP.get(field.schema().valueSchema().type().name()) + "]";
  } else {
    return TYPE_MAP.get(field.schema().type().name());
  }
}
 
Example 19
/**
 * Handle a valid deletion event resulted from a partition-level deletion by converting Cassandra representation
 * of this event into a {@link Record} object and queue the record to {@link ChangeEventQueue}. A valid deletion
 * event means a partition only has a single row, this implies there are no clustering keys.
 *
 * The steps are:
 *      (1) Populate the "source" field for this event
 *      (2) Fetch the cached key/value schemas from {@link SchemaHolder}
 *      (3) Populate the "after" field for this event
 *          a. populate partition columns
 *          b. populate regular columns with null values
 *      (4) Assemble a {@link Record} object from the populated data and queue the record
 */
private void handlePartitionDeletion(PartitionUpdate pu, OffsetPosition offsetPosition, KeyspaceTable keyspaceTable) {
    try {

        SchemaHolder.KeyValueSchema keyValueSchema = schemaHolder.getOrUpdateKeyValueSchema(keyspaceTable);
        Schema keySchema = keyValueSchema.keySchema();
        Schema valueSchema = keyValueSchema.valueSchema();

        RowData after = new RowData();

        populatePartitionColumns(after, pu);

        // For partition deletions, the PartitionUpdate only specifies the partition key, it does not
        // contains any info on regular (non-partition) columns, as if they were not modified. In order
        // to differentiate deleted columns from unmodified columns, we populate the deleted columns
        // with null value and timestamps
        TableMetadata tableMetadata = keyValueSchema.tableMetadata();
        List<ColumnMetadata> clusteringColumns = tableMetadata.getClusteringColumns();
        if (!clusteringColumns.isEmpty()) {
            throw new CassandraConnectorSchemaException("Uh-oh... clustering key should not exist for partition deletion");
        }
        List<ColumnMetadata> columns = tableMetadata.getColumns();
        columns.removeAll(tableMetadata.getPartitionKey());
        for (ColumnMetadata cm : columns) {
            String name = cm.getName();
            long deletionTs = pu.deletionInfo().getPartitionDeletion().markedForDeleteAt();
            CellData cellData = new CellData(name, null, deletionTs, CellData.ColumnType.REGULAR);
            after.addCell(cellData);
        }

        recordMaker.delete(DatabaseDescriptor.getClusterName(), offsetPosition, keyspaceTable, false,
                Conversions.toInstantFromMicros(pu.maxTimestamp()), after, keySchema, valueSchema,
                MARK_OFFSET, queue::enqueue);
    }
    catch (Exception e) {
        LOGGER.error("Fail to delete partition at {}. Reason: {}", offsetPosition, e);
    }
}
 
Example 20
Source Project: connect-utils   Source File: SchemaKey.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean equals(Object obj) {
  if (obj instanceof Schema) {
    return equals(of((Schema) obj));
  }

  if (!(obj instanceof SchemaKey)) {
    return false;
  }

  return 0 == compareTo((SchemaKey) obj);
}
 
Example 21
Source Project: kafka-mysql-connector   Source File: MySqlSourceTaskTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testBigint() throws InterruptedException, IOException, SQLException {
    // add tests for boundary conditions
    // add tests for signed, unsigned
    // http://dev.mysql.com/doc/refman/5.7/en/integer-types.html
    String insertSql = "insert into test.users (bigintcol) values (1844674407370955160);";
    
    testSchemaType("bigintcol", "bigint", Schema.INT64_SCHEMA, 1844674407370955160L, insertSql);
}
 
Example 22
@Test
public void multiplePrimaryKey() throws SQLException {
  TableMetadataProvider.TableMetadata tableMetadata = tableMetadata(
      ImmutableMap.of(
          "first_key", Schema.STRING_SCHEMA,
          "second_key", Schema.STRING_SCHEMA,
          "field_one", Schema.STRING_SCHEMA,
          "field_two", Schema.STRING_SCHEMA
      ),
      "first_key", "second_key"
  );

  MsSqlQueryBuilder builder = new MsSqlQueryBuilder(this.connection);

  final String expected = "SELECT " +
      "[ct].[sys_change_version] AS [__metadata_sys_change_version], " +
      "[ct].[sys_change_creation_version] AS [__metadata_sys_change_creation_version], " +
      "[ct].[sys_change_operation] AS [__metadata_sys_change_operation], " +
      "[ct].[first_key], " +
      "[ct].[second_key], " +
      "[u].[field_one], " +
      "[u].[field_two] " +
      "FROM [dbo].[users] AS [u] " +
      "RIGHT OUTER JOIN " +
      "CHANGETABLE(CHANGES [dbo].[users], ?) AS [ct] " +
      "ON " +
      "[ct].[first_key] = [u].[first_key] AND " +
      "[ct].[second_key] = [u].[second_key]";

  final String actual = builder.changeTrackingStatementQuery(tableMetadata);
  assertEquals(expected, actual, "Query should match.");
}
 
Example 23
Source Project: apicurio-registry   Source File: AvroData.java    License: Apache License 2.0 5 votes vote down vote up
private Object defaultValueFromAvro(Schema schema,
                                    org.apache.avro.Schema avroSchema,
                                    Object value,
                                    ToConnectContext toConnectContext) {
    Object result = defaultValueFromAvroWithoutLogical(schema, avroSchema, value, toConnectContext);
    // If the schema is a logical type, convert the primitive Avro default into the logical form
    return toConnectLogical(schema, result);
}
 
Example 24
@Override
protected Pair<String, Schema> visitBooleanLiteral(
    final BooleanLiteral node,
    final Boolean unmangleNames
) {
  return new Pair<>(String.valueOf(node.getValue()), Schema.BOOLEAN_SCHEMA);
}
 
Example 25
private Deserializer<GenericRow> getDeserializer(Schema schema,
                                                 DataSource.DataSourceSerDe dataSourceSerDe) {
  switch (dataSourceSerDe) {
    case JSON:
      return new KsqlJsonDeserializer(schema);
    case AVRO:
      return new KsqlGenericRowAvroDeserializer(schema,
                                                this.schemaRegistryClient,
                                                false);
    case DELIMITED:
      return new KsqlDelimitedDeserializer(schema);
    default:
      throw new KsqlException("Format not supported: " + dataSourceSerDe);
  }
}
 
Example 26
Source Project: kafka-connect-transform-common   Source File: ToJsonTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void struct() {
  this.transformation.configure(ImmutableMap.of());
  final Schema inputSchema = SchemaBuilder.struct()
      .field("FIRST_NAME", Schema.STRING_SCHEMA)
      .field("LAST_NAME", Schema.STRING_SCHEMA)
      .build();
  final Schema expectedSchema = SchemaBuilder.struct()
      .field("first_name", Schema.STRING_SCHEMA)
      .field("last_name", Schema.STRING_SCHEMA)
      .build();
  final Struct inputStruct = new Struct(inputSchema)
      .put("FIRST_NAME", "test")
      .put("LAST_NAME", "user");
  final Struct expectedStruct = new Struct(expectedSchema)
      .put("first_name", "test")
      .put("last_name", "user");

  final SinkRecord inputRecord = new SinkRecord(
      "topic",
      1,
      null,
      null,
      inputSchema,
      inputStruct,
      1L
  );

  final SinkRecord transformedRecord = this.transformation.apply(inputRecord);
  assertNotNull(transformedRecord, "transformedRecord should not be null.");
}
 
Example 27
Source Project: streamx   Source File: ParquetHiveUtilTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAlterSchema() throws Exception {
  prepareData(TOPIC, PARTITION);
  Partitioner partitioner = HiveTestUtils.getPartitioner();
  Schema schema = createSchema();
  hive.createTable(hiveDatabase, TOPIC, schema, partitioner);

  String location = "partition=" + String.valueOf(PARTITION);
  hiveMetaStore.addPartition(hiveDatabase, TOPIC, location);

  List<String> expectedColumnNames = new ArrayList<>();
  for (Field field: schema.fields()) {
    expectedColumnNames.add(field.name());
  }

  Table table = hiveMetaStore.getTable(hiveDatabase, TOPIC);
  List<String> actualColumnNames = new ArrayList<>();
  for (FieldSchema column: table.getSd().getCols()) {
    actualColumnNames.add(column.getName());
  }

  assertEquals(expectedColumnNames, actualColumnNames);

  Schema newSchema = createNewSchema();

  hive.alterSchema(hiveDatabase, TOPIC, newSchema);

  String[] expectedResult = {"true", "12", "12", "12.2", "12.2", "NULL", "12"};
  String result = HiveTestUtils.runHive(hiveExec, "SELECT * from " + TOPIC);
  String[] rows = result.split("\n");
  // Only 6 of the 7 records should have been delivered due to flush_size = 3
  assertEquals(6, rows.length);
  for (String row: rows) {
    String[] parts = HiveTestUtils.parseOutput(row);
    for (int j = 0; j < expectedResult.length; ++j) {
      assertEquals(expectedResult[j], parts[j]);
    }
  }
}
 
Example 28
public OracleSourceInfoStructMaker(String connector, String version, CommonConnectorConfig connectorConfig) {
    super(connector, version, connectorConfig);
    schema = commonSchemaBuilder()
            .name("io.debezium.connector.oracle.Source")
            .field(SourceInfo.SCHEMA_NAME_KEY, Schema.STRING_SCHEMA)
            .field(SourceInfo.TABLE_NAME_KEY, Schema.STRING_SCHEMA)
            .field(SourceInfo.TXID_KEY, Schema.OPTIONAL_STRING_SCHEMA)
            .field(SourceInfo.SCN_KEY, Schema.OPTIONAL_INT64_SCHEMA)
            .field(SourceInfo.LCR_POSITION_KEY, Schema.OPTIONAL_STRING_SCHEMA)
            .build();
}
 
Example 29
Source Project: connect-utils   Source File: SchemaBuilders.java    License: Apache License 2.0 5 votes vote down vote up
public static SchemaBuilder of(Schema schema, Collection<String> excludeFields) {

    Set<String> exclude = null != excludeFields ? ImmutableSet.copyOf(excludeFields) : ImmutableSet.of();
    SchemaBuilder builder;

    if (Schema.Type.ARRAY == schema.type()) {
      builder = SchemaBuilder.array(schema.valueSchema());
    } else if (Schema.Type.MAP == schema.type()) {
      builder = SchemaBuilder.map(schema.keySchema(), schema.valueSchema());
    } else {
      builder = SchemaBuilder.type(schema.type());
    }

    if (schema.isOptional()) {
      builder.optional();
    }
    if (!Strings.isNullOrEmpty(schema.name())) {
      builder.name(schema.name());
    }
    if (!Strings.isNullOrEmpty(schema.doc())) {
      builder.doc(schema.doc());
    }
    builder.version(schema.version());

    if (null != schema.parameters()) {
      builder.parameters(schema.parameters());
    }

    if (Schema.Type.STRUCT == schema.type()) {
      schema.fields()
          .stream()
          .filter(field -> !exclude.contains(field.name()))
          .forEach(field -> builder.field(field.name(), field.schema()));
    }

    return builder;
  }
 
Example 30
@Override
protected Pair<String, Schema> visitComparisonExpression(
    ComparisonExpression node,
    Boolean unmangleNames
) {
  Pair<String, Schema> left = process(node.getLeft(), unmangleNames);
  Pair<String, Schema> right = process(node.getRight(), unmangleNames);

  String exprFormat = nullCheckPrefix(node.getType());
  switch (left.getRight().type()) {
    case STRING:
      exprFormat += visitStringComparisonExpression(node.getType());
      break;
    case MAP:
      throw new KsqlException("Cannot compare MAP values");
    case ARRAY:
      throw new KsqlException("Cannot compare ARRAY values");
    case BOOLEAN:
      exprFormat += visitBooleanComparisonExpression(node.getType());
      break;
    default:
      exprFormat += visitScalarComparisonExpression(node.getType());
      break;
  }
  String expr = "(" + String.format(exprFormat, left.getLeft(), right.getLeft()) + ")";
  return new Pair<>(expr, Schema.BOOLEAN_SCHEMA);
}