org.apache.flink.streaming.api.functions.sink.SinkContextUtil Java Examples

The following examples show how to use org.apache.flink.streaming.api.functions.sink.SinkContextUtil. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraConnectorITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastSink() throws Exception {
	CassandraScalaProductSink<scala.Tuple3<String, Integer, Integer>> sink = new CassandraScalaProductSink<>(injectTableName(INSERT_DATA_QUERY), builder);

	List<scala.Tuple3<String, Integer, Integer>> scalaTupleCollection = new ArrayList<>(20);
	for (int i = 0; i < 20; i++) {
		scalaTupleCollection.add(new scala.Tuple3<>(UUID.randomUUID().toString(), i, 0));
	}
	try {
		sink.open(new Configuration());
		for (scala.Tuple3<String, Integer, Integer> value : scalaTupleCollection) {
			sink.invoke(value, SinkContextUtil.forTimestamp(0));
		}
	} finally {
		sink.close();
	}

	ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY));
	List<com.datastax.driver.core.Row> rows = rs.all();
	Assert.assertEquals(scalaTupleCollection.size(), rows.size());

	for (com.datastax.driver.core.Row row : rows) {
		scalaTupleCollection.remove(new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id")));
	}
	Assert.assertEquals(0, scalaTupleCollection.size());
}
 
Example #2
Source File: CassandraConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastSink() throws Exception {
	CassandraScalaProductSink<scala.Tuple3<String, Integer, Integer>> sink = new CassandraScalaProductSink<>(injectTableName(INSERT_DATA_QUERY), builder);

	List<scala.Tuple3<String, Integer, Integer>> scalaTupleCollection = new ArrayList<>(20);
	for (int i = 0; i < 20; i++) {
		scalaTupleCollection.add(new scala.Tuple3<>(UUID.randomUUID().toString(), i, 0));
	}
	try {
		sink.open(new Configuration());
		for (scala.Tuple3<String, Integer, Integer> value : scalaTupleCollection) {
			sink.invoke(value, SinkContextUtil.forTimestamp(0));
		}
	} finally {
		sink.close();
	}

	ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY));
	List<com.datastax.driver.core.Row> rows = rs.all();
	Assert.assertEquals(scalaTupleCollection.size(), rows.size());

	for (com.datastax.driver.core.Row row : rows) {
		scalaTupleCollection.remove(new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id")));
	}
	Assert.assertEquals(0, scalaTupleCollection.size());
}
 
Example #3
Source File: CassandraConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testCassandraScalaTupleAtLeastSink() throws Exception {
	CassandraScalaProductSink<scala.Tuple3<String, Integer, Integer>> sink = new CassandraScalaProductSink<>(injectTableName(INSERT_DATA_QUERY), builder);

	List<scala.Tuple3<String, Integer, Integer>> scalaTupleCollection = new ArrayList<>(20);
	for (int i = 0; i < 20; i++) {
		scalaTupleCollection.add(new scala.Tuple3<>(UUID.randomUUID().toString(), i, 0));
	}
	try {
		sink.open(new Configuration());
		for (scala.Tuple3<String, Integer, Integer> value : scalaTupleCollection) {
			sink.invoke(value, SinkContextUtil.forTimestamp(0));
		}
	} finally {
		sink.close();
	}

	ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY));
	List<com.datastax.driver.core.Row> rows = rs.all();
	Assert.assertEquals(scalaTupleCollection.size(), rows.size());

	for (com.datastax.driver.core.Row row : rows) {
		scalaTupleCollection.remove(new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id")));
	}
	Assert.assertEquals(0, scalaTupleCollection.size());
}
 
Example #4
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueue() throws Exception {
	RMQSink<String> rmqSink = createRMQSink();

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish("", QUEUE_NAME, null, MESSAGE);
}
 
Example #5
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueueWithOptionsImmediateReturnHandler() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptionsAndReturnHandler(false, true);

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, true,
			publishOptions.computeProperties(""), MESSAGE);
}
 
Example #6
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test(expected = RuntimeException.class)
public void exceptionDuringWithOptionsPublishingIsNotIgnored() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptions(false, false);

	doThrow(IOException.class).when(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, false,
			publishOptions.computeProperties(""), MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #7
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void exceptionDuringWithOptionsPublishingIsIgnoredIfLogFailuresOnly() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptions(false, false);
	rmqSink.setLogFailuresOnly(true);

	doThrow(IOException.class).when(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, false,
			publishOptions.computeProperties(""), MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #8
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkStdOut() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>();
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));

	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #9
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkStdErr() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>(true);
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.err", printSink.toString());
	assertEquals("hello world!" + line, arrayErrorStream.toString());
	printSink.close();
}
 
Example #10
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkWithPrefix() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>();
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 2, 1));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("2> hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #11
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkWithIdentifierAndPrefix() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>("mySink", false);
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 2, 1));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("mySink:2> hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #12
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkWithIdentifierButNoPrefix() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>("mySink", false);
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("mySink> hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #13
Source File: CassandraConnectorITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCassandraScalaTuplePartialColumnUpdate() throws Exception {
	CassandraSinkBaseConfig config = CassandraSinkBaseConfig.newBuilder().setIgnoreNullFields(true).build();
	CassandraScalaProductSink<scala.Tuple3<String, Integer, Integer>> sink = new CassandraScalaProductSink<>(injectTableName(INSERT_DATA_QUERY), builder, config);

	String id = UUID.randomUUID().toString();
	Integer counter = 1;
	Integer batchId = 0;

	// Send partial records across multiple request
	scala.Tuple3<String, Integer, Integer> scalaTupleRecordFirst = new scala.Tuple3<>(id, counter, null);
	scala.Tuple3<String, Integer, Integer> scalaTupleRecordSecond = new scala.Tuple3<>(id, null, batchId);

	try {
		sink.open(new Configuration());
		sink.invoke(scalaTupleRecordFirst, SinkContextUtil.forTimestamp(0));
		sink.invoke(scalaTupleRecordSecond, SinkContextUtil.forTimestamp(0));
	} finally {
		sink.close();
	}

	ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY));
	List<com.datastax.driver.core.Row> rows = rs.all();
	Assert.assertEquals(1, rows.size());
	// Since nulls are ignored, we should be reading one complete record
	for (com.datastax.driver.core.Row row : rows) {
		Assert.assertEquals(new scala.Tuple3<>(id, counter, batchId), new scala.Tuple3<>(row.getString("id"), row.getInt("counter"), row.getInt("batch_id")));
	}
}
 
Example #14
Source File: FlinkKafkaProducerBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that partitions list is determinate and correctly provided to custom partitioner.
 */
@SuppressWarnings("unchecked")
@Test
public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception {
	FlinkKafkaPartitioner<String> mockPartitioner = mock(FlinkKafkaPartitioner.class);

	RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
	when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0);
	when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1);

	// out-of-order list of 4 partitions
	List<PartitionInfo> mockPartitionsList = new ArrayList<>(4);
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null));
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 1, null, null, null));
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 0, null, null, null));
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 2, null, null, null));

	final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
		FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), mockPartitioner);
	producer.setRuntimeContext(mockRuntimeContext);

	final KafkaProducer mockProducer = producer.getMockKafkaProducer();
	when(mockProducer.partitionsFor(anyString())).thenReturn(mockPartitionsList);
	when(mockProducer.metrics()).thenReturn(null);

	producer.open(new Configuration());
	verify(mockPartitioner, times(1)).open(0, 1);

	producer.invoke("foobar", SinkContextUtil.forTimestamp(0));
	verify(mockPartitioner, times(1)).partition(
		"foobar", null, "foobar".getBytes(), DummyFlinkKafkaProducer.DUMMY_TOPIC, new int[] {0, 1, 2, 3});
}
 
Example #15
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueueWithOptionsMandatoryReturnHandler() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptionsAndReturnHandler(true, false);

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish(EXCHANGE, ROUTING_KEY, true, false,
			publishOptions.computeProperties(""), MESSAGE);
}
 
Example #16
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test(expected = RuntimeException.class)
public void exceptionDuringPublishingIsNotIgnored() throws Exception {
	RMQSink<String> rmqSink = createRMQSink();

	doThrow(IOException.class).when(channel).basicPublish("", QUEUE_NAME, null, MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #17
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void exceptionDuringPublishingIsIgnoredIfLogFailuresOnly() throws Exception {
	RMQSink<String> rmqSink = createRMQSink();
	rmqSink.setLogFailuresOnly(true);

	doThrow(IOException.class).when(channel).basicPublish("", QUEUE_NAME, null, MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #18
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueueWithOptions() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptions(false, false);

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, false,
			publishOptions.computeProperties(""), MESSAGE);
}
 
Example #19
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueueWithOptionsMandatoryReturnHandler() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptionsAndReturnHandler(true, false);

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish(EXCHANGE, ROUTING_KEY, true, false,
			publishOptions.computeProperties(""), MESSAGE);
}
 
Example #20
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueueWithOptionsImmediateReturnHandler() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptionsAndReturnHandler(false, true);

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, true,
			publishOptions.computeProperties(""), MESSAGE);
}
 
Example #21
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test(expected = RuntimeException.class)
public void exceptionDuringWithOptionsPublishingIsNotIgnored() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptions(false, false);

	doThrow(IOException.class).when(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, false,
			publishOptions.computeProperties(""), MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #22
Source File: RMQSinkTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void exceptionDuringWithOptionsPublishingIsIgnoredIfLogFailuresOnly() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptions(false, false);
	rmqSink.setLogFailuresOnly(true);

	doThrow(IOException.class).when(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, false,
			publishOptions.computeProperties(""), MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #23
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkStdOut() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>();
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));

	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #24
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkStdErr() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>(true);
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.err", printSink.toString());
	assertEquals("hello world!" + line, arrayErrorStream.toString());
	printSink.close();
}
 
Example #25
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkWithPrefix() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>();
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 2, 1));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("2> hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #26
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkWithIdentifierAndPrefix() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>("mySink", false);
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 2, 1));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("mySink:2> hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #27
Source File: PrintSinkFunctionTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrintSinkWithIdentifierButNoPrefix() throws Exception {
	PrintSinkFunction<String> printSink = new PrintSinkFunction<>("mySink", false);
	printSink.setRuntimeContext(new MockStreamingRuntimeContext(false, 1, 0));
	printSink.open(new Configuration());

	printSink.invoke("hello world!", SinkContextUtil.forTimestamp(0));

	assertEquals("Print to System.out", printSink.toString());
	assertEquals("mySink> hello world!" + line, arrayOutputStream.toString());
	printSink.close();
}
 
Example #28
Source File: RMQSinkTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void exceptionDuringWithOptionsPublishingIsIgnoredIfLogFailuresOnly() throws Exception {
	RMQSink<String> rmqSink = createRMQSinkWithOptions(false, false);
	rmqSink.setLogFailuresOnly(true);

	doThrow(IOException.class).when(channel).basicPublish(EXCHANGE, ROUTING_KEY, false, false,
			publishOptions.computeProperties(""), MESSAGE);
	rmqSink.invoke("msg", SinkContextUtil.forTimestamp(0));
}
 
Example #29
Source File: FlinkKafkaProducerBaseTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that partitions list is determinate and correctly provided to custom partitioner.
 */
@SuppressWarnings("unchecked")
@Test
public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception {
	FlinkKafkaPartitioner<String> mockPartitioner = mock(FlinkKafkaPartitioner.class);

	RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
	when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0);
	when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1);

	// out-of-order list of 4 partitions
	List<PartitionInfo> mockPartitionsList = new ArrayList<>(4);
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null));
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 1, null, null, null));
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 0, null, null, null));
	mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 2, null, null, null));

	final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
		FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), mockPartitioner);
	producer.setRuntimeContext(mockRuntimeContext);

	final KafkaProducer mockProducer = producer.getMockKafkaProducer();
	when(mockProducer.partitionsFor(anyString())).thenReturn(mockPartitionsList);
	when(mockProducer.metrics()).thenReturn(null);

	producer.open(new Configuration());
	verify(mockPartitioner, times(1)).open(0, 1);

	producer.invoke("foobar", SinkContextUtil.forTimestamp(0));
	verify(mockPartitioner, times(1)).partition(
		"foobar", null, "foobar".getBytes(), DummyFlinkKafkaProducer.DUMMY_TOPIC, new int[] {0, 1, 2, 3});
}
 
Example #30
Source File: RMQSinkTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void invokePublishBytesToQueue() throws Exception {
	RMQSink<String> rmqSink = createRMQSink();

	rmqSink.invoke(MESSAGE_STR, SinkContextUtil.forTimestamp(0));
	verify(serializationSchema).serialize(MESSAGE_STR);
	verify(channel).basicPublish("", QUEUE_NAME, null, MESSAGE);
}