org.apache.kafka.test.TestUtils Java Examples
The following examples show how to use
org.apache.kafka.test.TestUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EmbeddedKafka.java From mongo-kafka with Apache License 2.0 | 6 votes |
/** * Deletes multiple topics and blocks until all topics got deleted. * * @param duration the max time to wait for the topics to be deleted (does not block if {@code <= * 0}) * @param topics the name of the topics */ public void deleteTopicsAndWait(final Duration duration, final String... topics) throws InterruptedException { for (final String topic : topics) { try { broker.deleteTopic(topic); } catch (final UnknownTopicOrPartitionException e) { } } if (!duration.isNegative()) { TestUtils.waitForCondition( new TopicsDeletedCondition(topics), duration.toMillis(), format("Topics not deleted after %s milli seconds.", duration.toMillis())); } }
Example #2
Source File: VersionCheckerIntegrationTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void testMetricsAgent() throws InterruptedException, IOException { KsqlVersionCheckerAgent versionCheckerAgent = new KsqlVersionCheckerAgent(false); Properties versionCheckProps = new Properties(); versionCheckProps.setProperty(BaseSupportConfig .CONFLUENT_SUPPORT_METRICS_ENDPOINT_SECURE_ENABLE_CONFIG, "false"); versionCheckProps.setProperty( BaseSupportConfig.CONFLUENT_SUPPORT_PROXY_CONFIG, "http://localhost:" + proxyPort ); versionCheckerAgent.start(KsqlModuleType.LOCAL_CLI, versionCheckProps); TestUtils.waitForCondition(() -> { try { clientAndProxy.verify(request().withPath("/ksql/anon").withMethod("POST")); return true; } catch (AssertionError e) { return false; } }, 30000, "Version not submitted" ); }
Example #3
Source File: JsonFormatTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void testSinkProperties() throws Exception { final String streamName = "SinkPropertiesStream".toUpperCase(); final int resultPartitionCount = 3; final String queryString = String.format("CREATE STREAM %s WITH (PARTITIONS = %d) AS SELECT * " + "FROM %s;", streamName, resultPartitionCount, inputStream); executePersistentQuery(queryString); TestUtils.waitForCondition( () -> topicClient.isTopicExists(streamName), "Wait for async topic creation" ); assertThat( topicClient.describeTopics(ImmutableList.of(streamName)).get(streamName).partitions(), hasSize(3)); assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo( KafkaTopicClient.TopicCleanupPolicy.DELETE)); }
Example #4
Source File: JsonFormatTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void testTableSinkCleanupProperty() throws Exception { final String tableName = "SinkCleanupTable".toUpperCase(); final int resultPartitionCount = 3; final String queryString = String.format("CREATE TABLE %s AS SELECT * " + "FROM %s;", tableName, usersTable); executePersistentQuery(queryString); TestUtils.waitForCondition( () -> topicClient.isTopicExists(tableName), "Wait for async topic creation" ); assertThat(topicClient.getTopicCleanupPolicy(tableName), equalTo( KafkaTopicClient.TopicCleanupPolicy.COMPACT)); }
Example #5
Source File: IntegrationTestHarness.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
public void start() throws Exception { embeddedKafkaCluster = new EmbeddedSingleNodeKafkaCluster(); embeddedKafkaCluster.start(); Map<String, Object> configMap = new HashMap<>(); configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers()); configMap.put("application.id", "KSQL"); configMap.put("commit.interval.ms", 0); configMap.put("cache.max.bytes.buffering", 0); configMap.put("auto.offset.reset", "earliest"); configMap.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); this.ksqlConfig = new KsqlConfig(configMap); this.adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps()); this.topicClient = new KafkaTopicClientImpl(adminClient); }
Example #6
Source File: KafkaStreamsYellingIntegrationTest.java From kafka-streams-in-action with Apache License 2.0 | 6 votes |
@Before public void setUp() { Properties properties = StreamsTestUtils.getStreamsConfig("integrationTest", EMBEDDED_KAFKA.bootstrapServers(), STRING_SERDE_CLASSNAME, STRING_SERDE_CLASSNAME, new Properties()); properties.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true); streamsConfig = new StreamsConfig(properties); producerConfig = TestUtils.producerConfig(EMBEDDED_KAFKA.bootstrapServers(), StringSerializer.class, StringSerializer.class); consumerConfig = TestUtils.consumerConfig(EMBEDDED_KAFKA.bootstrapServers(), StringDeserializer.class, StringDeserializer.class); }
Example #7
Source File: ServerOptionsTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
@Test public void shouldOverrideFilePropertiesWithSystemProperties() throws IOException { final Properties sysProperties = new Properties(); sysProperties.setProperty("bootstrap.servers", "blah:9092"); sysProperties.setProperty("listeners", "http://localhost:8088"); final File propsFile = TestUtils.tempFile(); try (final PrintWriter writer = new PrintWriter(new FileWriter(propsFile))) { writer.println("bootstrap.servers=localhost:9092"); writer.println("listeners=http://some-server"); writer.println("num.stream.threads=1"); } final ServerOptions options = ServerOptions.parse(propsFile.getPath()); final Properties properties = options.loadProperties(() -> sysProperties); assertThat(properties.getProperty("bootstrap.servers"), equalTo("blah:9092")); assertThat(properties.getProperty("listeners"), equalTo("http://localhost:8088")); assertThat(properties.get("num.stream.threads"), equalTo("1")); }
Example #8
Source File: KeyStoreUtil.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
/** * Write the supplied store to a temporary file. * @param name the name of the store being written. * @param base64EncodedStore the base64 encode store content. * @return the path to the */ static Path createTemporaryStore(final String name, final String base64EncodedStore) { try { final byte[] decoded = Base64.getDecoder().decode(base64EncodedStore); final File tempFile = TestUtils.tempFile(); Files.write(tempFile.toPath(), decoded); final Path path = tempFile.toPath(); System.out.println("Wrote temporary " + name + " for testing: " + path); return path; } catch (Exception e) { throw new RuntimeException("Failed to create temporary store", e); } }
Example #9
Source File: IntegrationTestUtils.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
public static void waitUntilMetadataIsPropagated(final List<KafkaServer> servers, final String topic, final int partition, final long timeout) throws InterruptedException { TestUtils.waitForCondition(new TestCondition() { @Override public boolean conditionMet() { for (final KafkaServer server : servers) { final MetadataCache metadataCache = server.apis().metadataCache(); final Option<UpdateMetadataRequest.PartitionState> partitionInfo = metadataCache.getPartitionInfo(topic, partition); if (partitionInfo.isEmpty()) { return false; } final UpdateMetadataRequest.PartitionState metadataPartitionState = partitionInfo.get(); if (!Request.isValidBrokerId(metadataPartitionState.basePartitionState.leader)) { return false; } } return true; } }, timeout, "metadata for topic=" + topic + " partition=" + partition + " not propagated to all brokers"); }
Example #10
Source File: IntegrationTestUtils.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
/** * Wait until enough data (value records) has been consumed. * * @param consumerConfig Kafka Consumer configuration * @param topic Topic to consume from * @param expectedNumRecords Minimum number of expected records * @param waitTime Upper bound in waiting time in milliseconds * @return All the records consumed, or null if no records are consumed * @throws InterruptedException * @throws AssertionError if the given wait time elapses */ public static <V> List<V> waitUntilMinValuesRecordsReceived(final Properties consumerConfig, final String topic, final int expectedNumRecords, final long waitTime) throws InterruptedException { final List<V> accumData = new ArrayList<>(); try (final Consumer<Object, V> consumer = createConsumer(consumerConfig)) { final TestCondition valuesRead = new TestCondition() { @Override public boolean conditionMet() { final List<V> readData = readValues(topic, consumer, waitTime, expectedNumRecords); accumData.addAll(readData); return accumData.size() >= expectedNumRecords; } }; final String conditionDetails = "Expecting " + expectedNumRecords + " records from topic " + topic + " while only received " + accumData.size() + ": " + accumData; TestUtils.waitForCondition(valuesRead, waitTime, conditionDetails); } return accumData; }
Example #11
Source File: IntegrationTestUtils.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 6 votes |
/** * Wait until enough data (key-value records) has been consumed. * * @param consumerConfig Kafka Consumer configuration * @param topic Topic to consume from * @param expectedNumRecords Minimum number of expected records * @param waitTime Upper bound in waiting time in milliseconds * @return All the records consumed, or null if no records are consumed * @throws InterruptedException * @throws AssertionError if the given wait time elapses */ public static <K, V> List<KeyValue<K, V>> waitUntilMinKeyValueRecordsReceived(final Properties consumerConfig, final String topic, final int expectedNumRecords, final long waitTime) throws InterruptedException { final List<KeyValue<K, V>> accumData = new ArrayList<>(); try (final Consumer<K, V> consumer = createConsumer(consumerConfig)) { final TestCondition valuesRead = new TestCondition() { @Override public boolean conditionMet() { final List<KeyValue<K, V>> readData = readKeyValues(topic, consumer, waitTime, expectedNumRecords); accumData.addAll(readData); return accumData.size() >= expectedNumRecords; } }; final String conditionDetails = "Expecting " + expectedNumRecords + " records from topic " + topic + " while only received " + accumData.size() + ": " + accumData; TestUtils.waitForCondition(valuesRead, waitTime, conditionDetails); } return accumData; }
Example #12
Source File: KafkaStreamsLiveTest.java From tutorials with MIT License | 5 votes |
@Test @Ignore("it needs to have kafka broker running on local") public void shouldTestKafkaStreams() throws InterruptedException { //given String inputTopic = "inputTopic"; Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-live-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); //when KStreamBuilder builder = new KStreamBuilder(); KStream<String, String> textLines = builder.stream(inputTopic); Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); KTable<String, Long> wordCounts = textLines .flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))) .groupBy((key, word) -> word) .count(); wordCounts.foreach((word, count) -> System.out.println("word: " + word + " -> " + count)); String outputTopic = "outputTopic"; final Serde<String> stringSerde = Serdes.String(); final Serde<Long> longSerde = Serdes.Long(); wordCounts.to(stringSerde, longSerde, outputTopic); KafkaStreams streams = new KafkaStreams(builder, streamsConfiguration); streams.start(); //then Thread.sleep(30000); streams.close(); }
Example #13
Source File: KafkaConnectEmbedded.java From camel-kafka-connector with Apache License 2.0 | 5 votes |
@Override public void initializeConnectorBlocking(ConnectorPropertyFactory propertyFactory, Integer expectedTaskNumber) throws InterruptedException { initializeConnector(propertyFactory); TestUtils.waitForCondition(() -> { ConnectorStateInfo connectorStateInfo = null; do { connectorStateInfo = cluster.connectorStatus(connectorName); Thread.sleep(20L); } while (connectorStateInfo == null); return connectorStateInfo.tasks().size() >= expectedTaskNumber && connectorStateInfo.connector().state().equals(AbstractStatus.State.RUNNING.toString()) && connectorStateInfo.tasks().stream().allMatch(s -> s.state().equals(AbstractStatus.State.RUNNING.toString())); }, 30000L, "The connector " + connectorName + " did not start within a reasonable time"); }
Example #14
Source File: KafkaStreamsLiveTest.java From tutorials with MIT License | 5 votes |
@Test @Ignore("it needs to have kafka broker running on local") public void shouldTestKafkaStreams() throws InterruptedException { // given String inputTopic = "inputTopic"; Properties streamsConfiguration = new Properties(); streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-live-test"); streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000); streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // Use a temporary directory for storing state, which will be automatically removed after the test. streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath()); // when StreamsBuilder builder = new StreamsBuilder(); KStream<String, String> textLines = builder.stream(inputTopic); Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS); KTable<String, Long> wordCounts = textLines.flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))).groupBy((key, word) -> word).count(); textLines.foreach((word, count) -> System.out.println("word: " + word + " -> " + count)); String outputTopic = "outputTopic"; final Serde<String> stringSerde = Serdes.String(); final Serde<String> longSerde = Serdes.String(); textLines.to(outputTopic, Produced.with(stringSerde,longSerde)); KafkaStreams streams = new KafkaStreams(new Topology(), streamsConfiguration); streams.start(); // then Thread.sleep(30000); streams.close(); }
Example #15
Source File: StandaloneExecutorTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
@Before public void before() throws IOException { final String queriesFile = TestUtils.tempFile().getPath(); executor = new StandaloneExecutor(engine, queriesFile); try(final FileOutputStream out = new FileOutputStream(queriesFile)) { out.write(query.getBytes(StandardCharsets.UTF_8)); } }
Example #16
Source File: IntegrationTestUtils.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
/** * Removes local state stores. Useful to reset state in-between integration test runs. * * @param streamsConfiguration Streams configuration settings */ public static void purgeLocalStreamsState(final Properties streamsConfiguration) throws IOException { final String tmpDir = TestUtils.IO_TMP_DIR.getPath(); final String path = streamsConfiguration.getProperty(StreamsConfig.STATE_DIR_CONFIG); if (path != null) { final File node = Paths.get(path).normalize().toFile(); // Only purge state when it's under java.io.tmpdir. This is a safety net to prevent accidentally // deleting important local directory trees. if (node.getAbsolutePath().startsWith(tmpDir)) { Utils.delete(new File(node.getAbsolutePath())); } } }
Example #17
Source File: EmbeddedSingleNodeKafkaCluster.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private static void createServerJaasConfig() { try { final String jaasConfigContent = createJaasConfigContent(); final File jaasConfig = TestUtils.tempFile(); Files.write(jaasConfigContent, jaasConfig, StandardCharsets.UTF_8); System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasConfig.getAbsolutePath()); System.setProperty(JaasUtils.ZK_SASL_CLIENT, "false"); } catch (final Exception e) { throw new RuntimeException(e); } }
Example #18
Source File: SecureIntegrationTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 5 votes |
private void assertCanRunKsqlQuery(final String queryString, final Object... args) throws Exception { executePersistentQuery(queryString, args); TestUtils.waitForCondition( () -> topicClient.isTopicExists(this.outputTopic), "Wait for async topic creation" ); final TopicConsumer consumer = new TopicConsumer(SECURE_CLUSTER); consumer.verifyRecordsReceived(outputTopic, greaterThan(0)); }
Example #19
Source File: EndToEndIntegrationTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
@Test public void shouldSelectAllFromDerivedStream() throws Exception { executeStatement( "CREATE STREAM pageviews_female" + " AS SELECT %s.userid AS userid, pageid, regionid, gender " + " FROM %s " + " LEFT JOIN %s ON %s.userid = %s.userid" + " WHERE gender = 'FEMALE';", userTable, pageViewStream, userTable, pageViewStream, userTable); final QueuedQueryMetadata queryMetadata = executeQuery( "SELECT * from pageviews_female;"); List<KeyValue<String, GenericRow>> results = new ArrayList<>(); BlockingQueue<KeyValue<String, GenericRow>> rowQueue = queryMetadata.getRowQueue(); // From the mock data, we expect exactly 3 page views from female users. List<String> expectedPages = Arrays.asList("PAGE_2", "PAGE_5", "PAGE_5"); List<String> expectedUsers = Arrays.asList("USER_2", "USER_0", "USER_2"); List<String> actualPages = new ArrayList<>(); List<String> actualUsers = new ArrayList<>(); TestUtils.waitForCondition(() -> { try { log.debug("polling from pageviews_female"); KeyValue<String, GenericRow> nextRow = rowQueue.poll(8000, TimeUnit.MILLISECONDS); if (nextRow != null) { results.add(nextRow); } else { // If we didn't receive any records on the output topic for 8 seconds, it probably means that the join // failed because the table data wasn't populated when the stream data was consumed. We should just // re populate the stream data to try the join again. log.warn("repopulating data in {} because the join returned empty results.", pageViewTopic); testHarness .publishTestData(pageViewTopic, pageViewDataProvider, System.currentTimeMillis()); } } catch (Exception e) { log.error("Got exception when polling from pageviews_female", e); } return 3 <= results.size(); }, 30000, "Could not consume any records from " + pageViewTopic + " for 30 seconds"); for (KeyValue<String, GenericRow> result : results) { List<Object> columns = result.value.getColumns(); log.debug("pageview join: {}", columns); assertEquals(6, columns.size()); String user = (String) columns.get(2); actualUsers.add(user); String page = (String) columns.get(3); actualPages.add(page); } assertEquals(expectedPages, actualPages); assertEquals(expectedUsers, actualUsers); }
Example #20
Source File: JoinIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
@Test public void shouldUseTimeStampFieldFromStream() throws Exception { final String queryString = String.format( "CREATE STREAM JOINED AS SELECT ORDERID, ITEMID, ORDERUNITS, DESCRIPTION FROM %s LEFT JOIN" + " %s on %s.ITEMID = %s.ID WHERE %s.ITEMID = 'ITEM_1';" + "CREATE STREAM OUTPUT AS SELECT ORDERID, DESCRIPTION, ROWTIME AS RT FROM JOINED;", orderStreamNameAvro, itemTableNameAvro, orderStreamNameAvro, itemTableNameAvro, orderStreamNameAvro); ksqlContext.sql(queryString); final String outputStream = "OUTPUT"; Schema resultSchema = ksqlContext.getMetaStore().getSource(outputStream).getSchema(); Map<String, GenericRow> expectedResults = Collections.singletonMap("ITEM_1", new GenericRow(Arrays.asList( null, null, "ORDER_1", "home cinema", 1))); final Map<String, GenericRow> results = new HashMap<>(); TestUtils.waitForCondition(() -> { results.putAll(testHarness.consumeData(outputStream, resultSchema, 1, new StringDeserializer(), IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS, DataSource.DataSourceSerDe.AVRO)); final boolean success = results.equals(expectedResults); if (!success) { try { // The join may not be triggered fist time around due to order in which the // consumer pulls the records back. So we publish again to make the stream // trigger the join. testHarness.publishTestData(orderStreamTopicAvro, orderDataProvider, now, DataSource.DataSourceSerDe.AVRO); } catch(Exception e) { throw new RuntimeException(e); } } return success; }, 120000, "failed to complete join correctly"); }
Example #21
Source File: JoinIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
private void shouldLeftJoinOrderAndItems(String testStreamName, String orderStreamTopic, String orderStreamName, String itemTableName, DataSource.DataSourceSerDe dataSourceSerDe) throws Exception { final String queryString = String.format( "CREATE STREAM %s AS SELECT ORDERID, ITEMID, ORDERUNITS, DESCRIPTION FROM %s LEFT JOIN" + " %s on %s.ITEMID = %s.ID WHERE %s.ITEMID = 'ITEM_1' ;", testStreamName, orderStreamName, itemTableName, orderStreamName, itemTableName, orderStreamName); ksqlContext.sql(queryString); Schema resultSchema = ksqlContext.getMetaStore().getSource(testStreamName).getSchema(); Map<String, GenericRow> expectedResults = Collections.singletonMap("ITEM_1", new GenericRow(Arrays.asList( null, null, "ORDER_1", "ITEM_1", 10.0, "home cinema"))); final Map<String, GenericRow> results = new HashMap<>(); TestUtils.waitForCondition(() -> { results.putAll(testHarness.consumeData(testStreamName, resultSchema, 1, new StringDeserializer(), IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS, dataSourceSerDe)); final boolean success = results.equals(expectedResults); if (!success) { try { // The join may not be triggered fist time around due to order in which the // consumer pulls the records back. So we publish again to make the stream // trigger the join. testHarness.publishTestData(orderStreamTopic, orderDataProvider, now, dataSourceSerDe); } catch(Exception e) { throw new RuntimeException(e); } } return success; }, IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS * 2 + 30000, "failed to complete join correctly"); }
Example #22
Source File: WindowingIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
@Test public void shouldAggregateHoppingWindow() throws Exception { testHarness.publishTestData(topicName, dataProvider, now); final String streamName = "HOPPING_AGGTEST"; final String queryString = String.format( "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(ORDERUNITS * 10)", "HOPPING ( SIZE 10 SECONDS, ADVANCE BY 5 SECONDS)" ); ksqlContext.sql(queryString); Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema(); final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x items **/, 20.0, 200.0)); final Map<String, GenericRow> results = new HashMap<>(); TestUtils.waitForCondition(() -> { final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), 1000); updateResults(results, windowedResults); final GenericRow actual = results.get("ITEM_1"); return expected.equals(actual); }, 60000, "didn't receive correct results within timeout"); AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps()); KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient); Set<String> topicBeforeCleanup = topicClient.listTopicNames(); assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5)); QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next(); queryMetadata.close(); Set<String> topicsAfterCleanUp = topicClient.listTopicNames(); assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size (), topicsAfterCleanUp.size(), equalTo(3)); assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo( KafkaTopicClient.TopicCleanupPolicy.DELETE)); }
Example #23
Source File: AppEmbeddedTest.java From Kafka-Streams-Real-time-Stream-Processing with The Unlicense | 4 votes |
@Test @DisplayName("End to End flow testing with embedded cluster") void testImpressionFlow() throws ExecutionException, InterruptedException { //Setup data for Impressions List<KeyValue<String, AdImpression>> impList = new ArrayList<>(); impList.add(KeyValue.pair("100001", new AdImpression() .withImpressionID("100001").withCampaigner("ABC Ltd"))); impList.add(KeyValue.pair("100002", new AdImpression() .withImpressionID("100002").withCampaigner("ABC Ltd"))); //Setup a producer for Impressions Properties impProperties = TestUtils.producerConfig( kafkaCluster.bootstrapServers(), AppSerdes.String().serializer().getClass(), AppSerdes.AdImpression().serializer().getClass()); IntegrationTestUtils.produceKeyValuesSynchronously( AppConfigs.impressionTopic, impList, impProperties, Time.SYSTEM); //Setup data for Clicks List<KeyValue<String, AdClick>> clkList = new ArrayList<>(); clkList.add(KeyValue.pair("100001", new AdClick() .withImpressionID("100001").withCampaigner("ABC Ltd"))); //Setup a producer for Clicks Properties clkProperties = TestUtils.producerConfig( kafkaCluster.bootstrapServers(), AppSerdes.String().serializer().getClass(), AppSerdes.AdClick().serializer().getClass()); IntegrationTestUtils.produceKeyValuesSynchronously( AppConfigs.clicksTopic, clkList, clkProperties, Time.SYSTEM); Properties serdeProps = new Properties(); serdeProps.put("specific.class.name", CampaignPerformance.class); Properties cmpProperties = TestUtils.consumerConfig( kafkaCluster.bootstrapServers(), CONSUMER_GROUP, AppSerdes.String().deserializer().getClass(), AppSerdes.CampaignPerformance().deserializer().getClass(), serdeProps ); List<KeyValue<String, CampaignPerformance>> outputList = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived( cmpProperties, AppConfigs.outputTopic, 1 ); outputList.forEach((record) -> { logger.info(record.value); assertAll(() -> assertEquals("ABC Ltd", record.value.getCampaigner()), () -> assertEquals("2", record.value.getAdImpressions().toString()), () -> assertEquals("1", record.value.getAdClicks().toString()) ); } ); }
Example #24
Source File: RocksDBCacheTest.java From kcache with Apache License 2.0 | 4 votes |
@Before public void setUp() { dir = TestUtils.tempDirectory(); RocksDBCache = getRocksDBCache(); }
Example #25
Source File: MockKafkaProducer.java From samza with Apache License 2.0 | 4 votes |
public MockKafkaProducer(int numNodes, String topicName, int numPartitions) { this.cluster = TestUtils.clusterWith(numNodes, topicName, numPartitions); }
Example #26
Source File: WindowingIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
@Test public void shouldAggregateTumblingWindow() throws Exception { testHarness.publishTestData(topicName, dataProvider, now); final String streamName = "TUMBLING_AGGTEST"; final String queryString = String.format( "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(ORDERUNITS * 10)/COUNT(*)", "TUMBLING ( SIZE 10 SECONDS)" ); ksqlContext.sql(queryString); Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema(); final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x items **/, 20.0, 100.0)); final Map<String, GenericRow> results = new HashMap<>(); TestUtils.waitForCondition(() -> { final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), MAX_POLL_PER_ITERATION); updateResults(results, windowedResults); final GenericRow actual = results.get("ITEM_1"); return expected.equals(actual); }, 60000, "didn't receive correct results within timeout"); AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps()); KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient); Set<String> topicBeforeCleanup = topicClient.listTopicNames(); assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5)); QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next(); queryMetadata.close(); Set<String> topicsAfterCleanUp = topicClient.listTopicNames(); assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size (), topicsAfterCleanUp.size(), equalTo(3)); assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo( KafkaTopicClient.TopicCleanupPolicy.DELETE)); }
Example #27
Source File: WindowingIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 4 votes |
@Test public void shouldAggregateWithNoWindow() throws Exception { testHarness.publishTestData(topicName, dataProvider, now); final String streamName = "NOWINDOW_AGGTEST"; final String queryString = String.format( "CREATE TABLE %s AS SELECT %s FROM ORDERS WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(KEYVALUEMAP['key2']/2)" ); ksqlContext.sql(queryString); Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema(); final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x items **/, 20.0, 2.0)); final Map<String, GenericRow> results = new HashMap<>(); TestUtils.waitForCondition(() -> { final Map<String, GenericRow> aggregateResults = testHarness.consumeData(streamName, resultSchema, 1, new StringDeserializer(), MAX_POLL_PER_ITERATION); final GenericRow actual = aggregateResults.get("ITEM_1"); return expected.equals(actual); }, 60000, "didn't receive correct results within timeout"); AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps()); KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient); Set<String> topicBeforeCleanup = topicClient.listTopicNames(); assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5)); QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next(); queryMetadata.close(); Set<String> topicsAfterCleanUp = topicClient.listTopicNames(); assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size (), topicsAfterCleanUp.size(), equalTo(3)); assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo( KafkaTopicClient.TopicCleanupPolicy.COMPACT)); }
Example #28
Source File: WindowingIntTest.java From ksql-fork-with-deep-learning-function with Apache License 2.0 | 2 votes |
@Test public void shouldAggregateSessionWindow() throws Exception { testHarness.publishTestData(topicName, dataProvider, now); final String streamName = "SESSION_AGGTEST"; final String queryString = String.format( "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s GROUP BY ORDERID;", streamName, "ORDERID, COUNT(*), SUM(ORDERUNITS)", "SESSION (10 SECONDS)" ); ksqlContext.sql(queryString); Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema(); GenericRow expectedResults = new GenericRow(Arrays.asList(null, null, "ORDER_6", 6 /** 2 x items **/, 420.0)); final Map<String, GenericRow> results = new HashMap<>(); TestUtils.waitForCondition(() -> { final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, datasetOneMetaData.size(), new TimeWindowedDeserializer<>(new StringDeserializer()), 1000); updateResults(results, windowedResults); final GenericRow actual = results.get("ORDER_6"); return expectedResults.equals(actual) && results.size() == 6; }, 60000, "didn't receive correct results within timeout"); AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps()); KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient); Set<String> topicBeforeCleanup = topicClient.listTopicNames(); assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5)); QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next(); queryMetadata.close(); Set<String> topicsAfterCleanUp = topicClient.listTopicNames(); assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size (), topicsAfterCleanUp.size(), equalTo(3)); assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo( KafkaTopicClient.TopicCleanupPolicy.DELETE)); }