Java Code Examples for com.fasterxml.jackson.databind.ObjectMapper#registerSubtypes()

The following examples show how to use com.fasterxml.jackson.databind.ObjectMapper#registerSubtypes() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ObjectMapperResolver.java    From clouditor with Apache License 2.0 6 votes vote down vote up
public static void configureObjectMapper(ObjectMapper mapper) {
  mapper.registerModule(new JavaTimeModule());
  mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
  mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
  mapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS);
  mapper.enable(SerializationFeature.INDENT_OUTPUT);
  mapper.setSerializationInclusion(Include.NON_NULL);
  mapper.setConfig(mapper.getSerializationConfig().withView(ApiOnly.class));

  // add all sub types of CloudAccount
  for (var type : REFLECTIONS_SUBTYPE_SCANNER.getSubTypesOf(CloudAccount.class)) {
    mapper.registerSubtypes(type);
  }

  // set injectable value to null
  var values = new InjectableValues.Std();
  values.addValue("hash", null);

  mapper.setInjectableValues(values);
}
 
Example 2
Source File: ConfigurationJSONParser.java    From pnc with Apache License 2.0 5 votes vote down vote up
private <T> T getModuleGroup(ObjectMapper mapper, String configContent, Class<T> type)
        throws IOException, ConfigurationParseException {
    mapper.registerSubtypes(type);
    ModuleConfigJson jsonConfig = mapper.readValue(configContent, ModuleConfigJson.class);

    for (AbstractModuleGroup group : jsonConfig.getConfigs()) {
        if (group.getClass().isAssignableFrom(type)) {
            return (T) group;
        }
    }
    throw new ConfigurationParseException("Config group could not be parsed");
}
 
Example 3
Source File: JacksonUtil.java    From jasperreports with GNU Lesser General Public License v3.0 5 votes vote down vote up
/**
 *
 */
private static void register(ObjectMapper mapper, JacksonMapping mapping)
{
	try
	{
		Class<?> clazz = Class.forName(mapping.getClassName());
		mapper.registerSubtypes(new NamedType(clazz, mapping.getName()));
	}
	catch (ClassNotFoundException e)
	{
		throw new JRRuntimeException(e);
	}
}
 
Example 4
Source File: EsLogPointerJsonTest.java    From logsniffer with GNU Lesser General Public License v3.0 5 votes vote down vote up
@Before
public void setUp() {
	jsonMapper = new ObjectMapper();
	jsonMapper.configure(MapperFeature.USE_STATIC_TYPING, true);
	jsonMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
	jsonMapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, true);
	jsonMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
	jsonMapper.registerSubtypes(LogEntry.class);
	final SimpleModule esModule = new SimpleModule();
	esModule.addSerializer(LogPointer.class, new EsLogPointerSerializer());
	esModule.addDeserializer(LogPointer.class, new EsLogPointerDeserializer());
	esModule.addDeserializer(JsonLogPointer.class, new EsLogPointerDeserializer());
	jsonMapper.registerModule(esModule);
}
 
Example 5
Source File: BenderConfig.java    From bender with Apache License 2.0 5 votes vote down vote up
public BenderSchema(File schemaFile) {
  ObjectMapper objectMapper = new ObjectMapper();
  objectMapper.registerSubtypes(BenderConfig.subtypes);
  try {
    this.schema = objectMapper.readTree(schemaFile);
  } catch (IOException e) {
    throw new ConfigurationException("unable to load schema file", e);
  }
}
 
Example 6
Source File: DistributedTableMetadataManagerTest.java    From foxtrot with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
    objectMapper = new ObjectMapper();
    objectMapper.registerSubtypes(GroupResponse.class);

    this.dataStore = Mockito.mock(DataStore.class);

    EmailConfig emailConfig = new EmailConfig();
    emailConfig.setHost("127.0.0.1");

    HazelcastConnection hazelcastConnection = Mockito.mock(HazelcastConnection.class);
    when(hazelcastConnection.getHazelcast()).thenReturn(hazelcastInstance);
    when(hazelcastConnection.getHazelcastConfig()).thenReturn(new Config());
    hazelcastConnection.start();

    this.distributedTableMetadataManager = new DistributedTableMetadataManager(hazelcastConnection, elasticsearchConnection,
            objectMapper, new CardinalityConfig()
    );
    distributedTableMetadataManager.start();

    tableDataStore = hazelcastInstance.getMap("tablemetadatamap");
    List<IndexerEventMutator> mutators = Lists.newArrayList(new LargeTextNodeRemover(objectMapper,
            TextNodeRemoverConfiguration.builder().build()));
    this.queryStore = new ElasticsearchQueryStore(distributedTableMetadataManager, elasticsearchConnection, dataStore, mutators, objectMapper,
            new CardinalityConfig()
    );
}
 
Example 7
Source File: DTOTest.java    From james-project with Apache License 2.0 5 votes vote down vote up
@BeforeEach
void setUp() {
    objectMapper = new ObjectMapper();
    objectMapper.registerModule(new Jdk8Module());
    objectMapper.setSerializationInclusion(JsonInclude.Include.NON_ABSENT);
    objectMapper.registerSubtypes(new NamedType(QuotaThresholdChangedEventDTO.class, "quota-threshold-change"));
}
 
Example 8
Source File: VideosContainerResourceTest.java    From data-transfer-project with Apache License 2.0 5 votes vote down vote up
@Test
public void verifySerializeDeserialize() throws Exception {
  ObjectMapper objectMapper = new ObjectMapper();
  objectMapper.registerSubtypes(VideosContainerResource.class);

  List<VideoAlbum> albums =
          ImmutableList.of(new VideoAlbum("id1", "album1", "This is a fake album"));

  List<VideoObject> videos =
          ImmutableList.of(
                  new VideoObject("Vid1", "http://example.com/1.mp4", "A video", "video/mp4", "v1", "id1",
                          false),
                  new VideoObject(
                          "Vid2", "https://example.com/2.mpeg", "A 2. video", "video/mpeg", "v2", "id1", false));
  
  ContainerResource data = new VideosContainerResource(albums, videos);

  String serialized = objectMapper.writeValueAsString(data);

  ContainerResource deserializedModel =
          objectMapper.readValue(serialized, ContainerResource.class);

  Truth.assertThat(deserializedModel).isNotNull();
  Truth.assertThat(deserializedModel).isInstanceOf(VideosContainerResource.class);
  VideosContainerResource deserialized = (VideosContainerResource) deserializedModel;
  Truth.assertThat(deserialized.getAlbums()).hasSize(1);
  Truth.assertThat(deserialized.getVideos()).hasSize(2);
}
 
Example 9
Source File: PhotosContainerResourceTest.java    From data-transfer-project with Apache License 2.0 5 votes vote down vote up
@Test
public void verifySerializeDeserialize() throws Exception {
  ObjectMapper objectMapper = new ObjectMapper();
  objectMapper.registerSubtypes(PhotosContainerResource.class);

  List<PhotoAlbum> albums =
      ImmutableList.of(new PhotoAlbum("id1", "albumb1", "This is a fake albumb"));

  List<PhotoModel> photos =
      ImmutableList.of(
          new PhotoModel("Pic1", "http://fake.com/1.jpg", "A pic", "image/jpg", "p1", "id1",
              false),
          new PhotoModel(
              "Pic2", "https://fake.com/pic.png", "fine art", "image/png", "p2", "id1", false));

  ContainerResource data = new PhotosContainerResource(albums, photos);

  String serialized = objectMapper.writeValueAsString(data);

  ContainerResource deserializedModel =
      objectMapper.readValue(serialized, ContainerResource.class);

  Truth.assertThat(deserializedModel).isNotNull();
  Truth.assertThat(deserializedModel).isInstanceOf(PhotosContainerResource.class);
  PhotosContainerResource deserialized = (PhotosContainerResource) deserializedModel;
  Truth.assertThat(deserialized.getAlbums()).hasSize(1);
  Truth.assertThat(deserialized.getPhotos()).hasSize(2);
}
 
Example 10
Source File: MailContainerResourceTest.java    From data-transfer-project with Apache License 2.0 5 votes vote down vote up
@Test
public void verifySerializeDeserialize() throws Exception {
  ObjectMapper objectMapper = new ObjectMapper();
  objectMapper.registerSubtypes(MailContainerResource.class);

  List<MailContainerModel> containers =
      ImmutableList.of(
          new MailContainerModel("id1", "container1"),
          new MailContainerModel("id2", "container2"));

  List<MailMessageModel> messages =
      ImmutableList.of(
          new MailMessageModel("foo", ImmutableList.of("1")),
          new MailMessageModel("bar", ImmutableList.of("1", "2'")));

  ContainerResource data = new MailContainerResource(containers, messages);

  String serialized = objectMapper.writeValueAsString(data);

  ContainerResource deserializedModel =
      objectMapper.readValue(serialized, ContainerResource.class);

  Truth.assertThat(deserializedModel).isNotNull();
  Truth.assertThat(deserializedModel).isInstanceOf(MailContainerResource.class);
  MailContainerResource deserialized = (MailContainerResource) deserializedModel;
  Truth.assertThat(deserialized.getMessages()).hasSize(2);
  Truth.assertThat(deserialized.getFolders()).hasSize(2);
}
 
Example 11
Source File: JsonGenericSerializer.java    From james-project with Apache License 2.0 5 votes vote down vote up
private ObjectMapper buildObjectMapper(Set<? extends DTOModule<?, ?>> modules) {
    ObjectMapper objectMapper = new ObjectMapper()
        .registerModule(new Jdk8Module())
        .registerModule(new JavaTimeModule())
        .registerModule(new GuavaModule())
        .setSerializationInclusion(JsonInclude.Include.NON_ABSENT)
        .disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)
        .enable(DeserializationFeature.FAIL_ON_READING_DUP_TREE_KEY);
    NamedType[] namedTypes = modules.stream()
        .map(module -> new NamedType(module.getDTOClass(), module.getDomainObjectType()))
        .toArray(NamedType[]::new);
    objectMapper.registerSubtypes(namedTypes);
    return objectMapper;
}
 
Example 12
Source File: AuthDataSerializationTest.java    From data-transfer-project with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
  objectMapper = new ObjectMapper();
  objectMapper.registerSubtypes(
      CookiesAndUrlAuthData.class,
      TokenAuthData.class,
      TokensAndUrlAuthData.class,
      TokenSecretAuthData.class);
}
 
Example 13
Source File: ModuleConfigJsonTest.java    From pnc with Apache License 2.0 5 votes vote down vote up
@Test
public void deserializationTest() throws JsonParseException, JsonMappingException, IOException {
    ObjectMapper mapper = new ObjectMapper();
    PncConfigProvider<AuthenticationModuleConfig> pncProvider = new PncConfigProvider<>(
            AuthenticationModuleConfig.class);
    pncProvider.registerProvider(mapper);
    mapper.registerSubtypes(PNCModuleGroup.class);
    ModuleConfigJson config = mapper.readValue(loadConfig("testConfigNoSpaces.json"), ModuleConfigJson.class);

    assertNotNull(config);
    assertEquals(1, config.getConfigs().size());
    PNCModuleGroup group = (PNCModuleGroup) config.getConfigs().get(0);
    assertEquals(2, group.getConfigs().size());
}
 
Example 14
Source File: PhysicalPlanReader.java    From Bats with Apache License 2.0 5 votes vote down vote up
public PhysicalPlanReader(DrillConfig config, ScanResult scanResult, LogicalPlanPersistence lpPersistance,
                          final DrillbitEndpoint endpoint, final StoragePluginRegistry pluginRegistry) {

  ObjectMapper lpMapper = lpPersistance.getMapper();

  // Endpoint serializer/deserializer.
  SimpleModule serDeModule = new SimpleModule("PhysicalOperatorModule")
      .addSerializer(DrillbitEndpoint.class, new DrillbitEndpointSerDe.Se())
      .addDeserializer(DrillbitEndpoint.class, new DrillbitEndpointSerDe.De())
      .addSerializer(MajorType.class, new MajorTypeSerDe.Se())
      .addDeserializer(MajorType.class, new MajorTypeSerDe.De())
      .addDeserializer(DynamicPojoRecordReader.class,
          new StdDelegatingDeserializer<>(new DynamicPojoRecordReader.Converter(lpMapper)))
      .addSerializer(Path.class, new PathSerDe.Se());

  lpMapper.registerModule(serDeModule);
  Set<Class<? extends PhysicalOperator>> subTypes = PhysicalOperatorUtil.getSubTypes(scanResult);
  subTypes.forEach(lpMapper::registerSubtypes);
  lpMapper.registerSubtypes(DynamicPojoRecordReader.class);
  InjectableValues injectables = new InjectableValues.Std()
      .addValue(StoragePluginRegistry.class, pluginRegistry)
      .addValue(DrillbitEndpoint.class, endpoint);

  this.mapper = lpMapper;
  this.physicalPlanReader = mapper.readerFor(PhysicalPlan.class).with(injectables);
  this.operatorReader = mapper.readerFor(PhysicalOperator.class).with(injectables);
  this.logicalPlanReader = mapper.readerFor(LogicalPlan.class).with(injectables);
}
 
Example 15
Source File: AbstractConfigProvider.java    From pnc with Apache License 2.0 4 votes vote down vote up
public void registerProvider(ObjectMapper mapper) {
    for (ProviderNameType<T> providerNameType : moduleConfigs) {
        mapper.registerSubtypes(new NamedType(providerNameType.getType(), providerNameType.getTypeName()));
    }
}
 
Example 16
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    // create send test messages to Kafka
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
    while (msgIterator.hasNext()) {
        StringMessage next = new StringMessage(msgIterator.next());
        sink.writeTo(next); // send
        sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
    }
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    // get data back from Kafka
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    for( int i=0; i<messageSet.size(); i++ ){
        // ensure that received message was one that was sent
        String receivedPayload = new String(extractMessage(messageSet, i));
        System.out.println( "Got message: " + new String( receivedPayload ) );
        assert( sentPayloads.remove( receivedPayload ) );
    }
    assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
 
Example 17
Source File: IntegrationTest.java    From kayenta with Apache License 2.0 4 votes vote down vote up
private void configureObjectMapper(ObjectMapper objectMapper) {
  objectMapper.registerSubtypes(AtlasCanaryMetricSetQueryConfig.class);
}
 
Example 18
Source File: JsonUtils.java    From spring-credhub with Apache License 2.0 4 votes vote down vote up
private static void registerSubtypes(ObjectMapper objectMapper, List<NamedType> subtypes) {
	objectMapper.registerSubtypes(subtypes.toArray(new NamedType[] {}));
}
 
Example 19
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultithread() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "    \"batchSize\": 10,\n" +
            "    \"jobQueueSize\": 3\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    int msgCount = 10000;
    for (int i = 0; i < msgCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", Integer.toString(i))
                .put("value", "message:" + i).build();
        sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    System.out.println(sink.getStat());
    assertEquals(sink.getNumOfPendingMessages(), 0);

    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
    for (int i = 0; i < msgCount; ++i) {
        stream.iterator().next();
    }

    try {
        stream.iterator().next();
        fail();
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example 20
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigBackwardCompatible() throws IOException {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"ack\": 1,\n" +
            "     \"compression.type\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";
    String description2 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "     \"compression.codec\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sink1.open();
    sink2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sink1);
    sinks.add(sink2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
            sink.writeTo(new DefaultMessageContainer(
                    new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                    jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sink1.close();
    sink2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}