Java Code Examples for org.apache.kafka.test.TestUtils#waitForCondition()

The following examples show how to use org.apache.kafka.test.TestUtils#waitForCondition() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EmbeddedKafka.java    From mongo-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Deletes multiple topics and blocks until all topics got deleted.
 *
 * @param duration the max time to wait for the topics to be deleted (does not block if {@code <=
 *     0})
 * @param topics the name of the topics
 */
public void deleteTopicsAndWait(final Duration duration, final String... topics)
    throws InterruptedException {
  for (final String topic : topics) {
    try {
      broker.deleteTopic(topic);
    } catch (final UnknownTopicOrPartitionException e) {
    }
  }

  if (!duration.isNegative()) {
    TestUtils.waitForCondition(
        new TopicsDeletedCondition(topics),
        duration.toMillis(),
        format("Topics not deleted after %s milli seconds.", duration.toMillis()));
  }
}
 
Example 2
Source File: VersionCheckerIntegrationTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void testMetricsAgent() throws InterruptedException, IOException {
  KsqlVersionCheckerAgent versionCheckerAgent = new KsqlVersionCheckerAgent(false);
  Properties versionCheckProps = new Properties();
  versionCheckProps.setProperty(BaseSupportConfig
      .CONFLUENT_SUPPORT_METRICS_ENDPOINT_SECURE_ENABLE_CONFIG, "false");
  versionCheckProps.setProperty(
      BaseSupportConfig.CONFLUENT_SUPPORT_PROXY_CONFIG,
      "http://localhost:" + proxyPort
  );
  versionCheckerAgent.start(KsqlModuleType.LOCAL_CLI, versionCheckProps);

  TestUtils.waitForCondition(() -> {
        try {
          clientAndProxy.verify(request().withPath("/ksql/anon").withMethod("POST"));
          return true;
        } catch (AssertionError e) {
          return false;
        }
      },
      30000, "Version not submitted"
  );
}
 
Example 3
Source File: JsonFormatTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void testSinkProperties() throws Exception {
  final String streamName = "SinkPropertiesStream".toUpperCase();
  final int resultPartitionCount = 3;
  final String queryString = String.format("CREATE STREAM %s WITH (PARTITIONS = %d) AS SELECT * "
          + "FROM %s;",
      streamName, resultPartitionCount, inputStream);

  executePersistentQuery(queryString);

  TestUtils.waitForCondition(
      () -> topicClient.isTopicExists(streamName),
      "Wait for async topic creation"
  );

  assertThat(
      topicClient.describeTopics(ImmutableList.of(streamName)).get(streamName).partitions(),
      hasSize(3));
  assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(
      KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
 
Example 4
Source File: JsonFormatTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void testTableSinkCleanupProperty() throws Exception {
  final String tableName = "SinkCleanupTable".toUpperCase();
  final int resultPartitionCount = 3;
  final String queryString = String.format("CREATE TABLE %s AS SELECT * "
                                           + "FROM %s;",
                                           tableName, usersTable);
  executePersistentQuery(queryString);

  TestUtils.waitForCondition(
      () -> topicClient.isTopicExists(tableName),
      "Wait for async topic creation"
  );

  assertThat(topicClient.getTopicCleanupPolicy(tableName), equalTo(
      KafkaTopicClient.TopicCleanupPolicy.COMPACT));
}
 
Example 5
Source File: IntegrationTestUtils.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
/**
 * Wait until enough data (key-value records) has been consumed.
 *
 * @param consumerConfig     Kafka Consumer configuration
 * @param topic              Topic to consume from
 * @param expectedNumRecords Minimum number of expected records
 * @param waitTime           Upper bound in waiting time in milliseconds
 * @return All the records consumed, or null if no records are consumed
 * @throws InterruptedException
 * @throws AssertionError       if the given wait time elapses
 */
public static <K, V> List<KeyValue<K, V>> waitUntilMinKeyValueRecordsReceived(final Properties consumerConfig,
                                                                              final String topic,
                                                                              final int expectedNumRecords,
                                                                              final long waitTime) throws InterruptedException {
  final List<KeyValue<K, V>> accumData = new ArrayList<>();
  try (final Consumer<K, V> consumer = createConsumer(consumerConfig)) {
    final TestCondition valuesRead = new TestCondition() {
      @Override
      public boolean conditionMet() {
        final List<KeyValue<K, V>> readData =
                readKeyValues(topic, consumer, waitTime, expectedNumRecords);
        accumData.addAll(readData);
        return accumData.size() >= expectedNumRecords;
      }
    };
    final String conditionDetails =
            "Expecting " + expectedNumRecords + " records from topic " + topic +
                    " while only received " + accumData.size() + ": " + accumData;
    TestUtils.waitForCondition(valuesRead, waitTime, conditionDetails);
  }
  return accumData;
}
 
Example 6
Source File: IntegrationTestUtils.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
/**
 * Wait until enough data (value records) has been consumed.
 *
 * @param consumerConfig     Kafka Consumer configuration
 * @param topic              Topic to consume from
 * @param expectedNumRecords Minimum number of expected records
 * @param waitTime           Upper bound in waiting time in milliseconds
 * @return All the records consumed, or null if no records are consumed
 * @throws InterruptedException
 * @throws AssertionError       if the given wait time elapses
 */
public static <V> List<V> waitUntilMinValuesRecordsReceived(final Properties consumerConfig,
                                                            final String topic,
                                                            final int expectedNumRecords,
                                                            final long waitTime) throws InterruptedException {
  final List<V> accumData = new ArrayList<>();
  try (final Consumer<Object, V> consumer = createConsumer(consumerConfig)) {
    final TestCondition valuesRead = new TestCondition() {
      @Override
      public boolean conditionMet() {
        final List<V> readData =
                readValues(topic, consumer, waitTime, expectedNumRecords);
        accumData.addAll(readData);
        return accumData.size() >= expectedNumRecords;
      }
    };
    final String conditionDetails =
            "Expecting " + expectedNumRecords + " records from topic " + topic +
                    " while only received " + accumData.size() + ": " + accumData;
    TestUtils.waitForCondition(valuesRead, waitTime, conditionDetails);
  }
  return accumData;
}
 
Example 7
Source File: IntegrationTestUtils.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
public static void waitUntilMetadataIsPropagated(final List<KafkaServer> servers,
                                                 final String topic,
                                                 final int partition,
                                                 final long timeout) throws InterruptedException {
  TestUtils.waitForCondition(new TestCondition() {
    @Override
    public boolean conditionMet() {
      for (final KafkaServer server : servers) {
        final MetadataCache metadataCache = server.apis().metadataCache();
        final Option<UpdateMetadataRequest.PartitionState> partitionInfo =
                metadataCache.getPartitionInfo(topic, partition);
        if (partitionInfo.isEmpty()) {
          return false;
        }
        final UpdateMetadataRequest.PartitionState metadataPartitionState = partitionInfo.get();
        if (!Request.isValidBrokerId(metadataPartitionState.basePartitionState.leader)) {
          return false;
        }
      }
      return true;
    }
  }, timeout, "metadata for topic=" + topic + " partition=" + partition + " not propagated to all brokers");

}
 
Example 8
Source File: KafkaConnectEmbedded.java    From camel-kafka-connector with Apache License 2.0 5 votes vote down vote up
@Override
public void initializeConnectorBlocking(ConnectorPropertyFactory propertyFactory, Integer expectedTaskNumber) throws InterruptedException {
    initializeConnector(propertyFactory);
    TestUtils.waitForCondition(() -> {
        ConnectorStateInfo connectorStateInfo = null;
        do {
            connectorStateInfo = cluster.connectorStatus(connectorName);
            Thread.sleep(20L);
        } while (connectorStateInfo == null);
        return  connectorStateInfo.tasks().size() >= expectedTaskNumber
                && connectorStateInfo.connector().state().equals(AbstractStatus.State.RUNNING.toString())
                && connectorStateInfo.tasks().stream().allMatch(s -> s.state().equals(AbstractStatus.State.RUNNING.toString()));
    }, 30000L, "The connector " + connectorName + " did not start within a reasonable time");
}
 
Example 9
Source File: SecureIntegrationTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private void assertCanRunKsqlQuery(final String queryString,
                                   final Object... args) throws Exception {
  executePersistentQuery(queryString, args);

  TestUtils.waitForCondition(
      () -> topicClient.isTopicExists(this.outputTopic),
      "Wait for async topic creation"
  );

  final TopicConsumer consumer = new TopicConsumer(SECURE_CLUSTER);
  consumer.verifyRecordsReceived(outputTopic, greaterThan(0));
}
 
Example 10
Source File: WindowingIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldAggregateWithNoWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "NOWINDOW_AGGTEST";

  final String queryString = String.format(
      "CREATE TABLE %s AS SELECT %s FROM ORDERS WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;",
      streamName,
      "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(KEYVALUEMAP['key2']/2)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();

  final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x
   items **/, 20.0, 2.0));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    final Map<String, GenericRow> aggregateResults = testHarness.consumeData(streamName,
                                                                         resultSchema, 1, new
                                                                                 StringDeserializer(), MAX_POLL_PER_ITERATION);
    final GenericRow actual = aggregateResults.get("ITEM_1");
    return expected.equals(actual);
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
  assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(
      KafkaTopicClient.TopicCleanupPolicy.COMPACT));
}
 
Example 11
Source File: WindowingIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldAggregateTumblingWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "TUMBLING_AGGTEST";

  final String queryString = String.format(
          "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;",
          streamName,
          "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(ORDERUNITS * 10)/COUNT(*)",
          "TUMBLING ( SIZE 10 SECONDS)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();

  final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x
   items **/, 20.0, 100.0));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), MAX_POLL_PER_ITERATION);
    updateResults(results, windowedResults);
    final GenericRow actual = results.get("ITEM_1");
    return expected.equals(actual);
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
  assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(
      KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
 
Example 12
Source File: WindowingIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldAggregateHoppingWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "HOPPING_AGGTEST";

  final String queryString = String.format(
          "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;",
          streamName,
          "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS), SUM(ORDERUNITS * 10)",
          "HOPPING ( SIZE 10 SECONDS, ADVANCE BY 5 SECONDS)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();


  final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2 /** 2 x
   items **/, 20.0, 200.0));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
    updateResults(results, windowedResults);
    final GenericRow actual = results.get("ITEM_1");
    return expected.equals(actual);
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
  assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(
      KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
 
Example 13
Source File: JoinIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
private void shouldLeftJoinOrderAndItems(String testStreamName,
                                        String orderStreamTopic,
                                        String orderStreamName,
                                        String itemTableName,
                                        DataSource.DataSourceSerDe dataSourceSerDe)
    throws Exception {

  final String queryString = String.format(
          "CREATE STREAM %s AS SELECT ORDERID, ITEMID, ORDERUNITS, DESCRIPTION FROM %s LEFT JOIN"
          + " %s on %s.ITEMID = %s.ID WHERE %s.ITEMID = 'ITEM_1' ;",
          testStreamName,
          orderStreamName,
          itemTableName,
          orderStreamName,
          itemTableName,
          orderStreamName);

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(testStreamName).getSchema();

  Map<String, GenericRow> expectedResults =
      Collections.singletonMap("ITEM_1",
                               new GenericRow(Arrays.asList(
                                   null,
                                   null,
                                   "ORDER_1",
                                   "ITEM_1",
                                   10.0,
                                   "home cinema")));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    results.putAll(testHarness.consumeData(testStreamName,
                                           resultSchema,
                                           1,
                                           new StringDeserializer(),
                                           IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS,
                                           dataSourceSerDe));
    final boolean success = results.equals(expectedResults);
    if (!success) {
      try {
        // The join may not be triggered fist time around due to order in which the
        // consumer pulls the records back. So we publish again to make the stream
        // trigger the join.
        testHarness.publishTestData(orderStreamTopic, orderDataProvider, now, dataSourceSerDe);
      } catch(Exception e) {
        throw new RuntimeException(e);
      }
    }
    return success;
  }, IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS * 2 + 30000,
      "failed to complete join correctly");
}
 
Example 14
Source File: JoinIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldUseTimeStampFieldFromStream() throws Exception {
  final String queryString = String.format(
      "CREATE STREAM JOINED AS SELECT ORDERID, ITEMID, ORDERUNITS, DESCRIPTION FROM %s LEFT JOIN"
          + " %s on %s.ITEMID = %s.ID WHERE %s.ITEMID = 'ITEM_1';"
          + "CREATE STREAM OUTPUT AS SELECT ORDERID, DESCRIPTION, ROWTIME AS RT FROM JOINED;",
      orderStreamNameAvro,
      itemTableNameAvro,
      orderStreamNameAvro,
      itemTableNameAvro,
      orderStreamNameAvro);

  ksqlContext.sql(queryString);

  final String outputStream = "OUTPUT";
  Schema resultSchema = ksqlContext.getMetaStore().getSource(outputStream).getSchema();

  Map<String, GenericRow> expectedResults =
      Collections.singletonMap("ITEM_1",
          new GenericRow(Arrays.asList(
              null,
              null,
              "ORDER_1",
              "home cinema",
              1)));

  final Map<String, GenericRow> results = new HashMap<>();
  TestUtils.waitForCondition(() -> {
    results.putAll(testHarness.consumeData(outputStream,
        resultSchema,
        1,
        new StringDeserializer(),
        IntegrationTestHarness.RESULTS_POLL_MAX_TIME_MS,
        DataSource.DataSourceSerDe.AVRO));
    final boolean success = results.equals(expectedResults);
    if (!success) {
      try {
        // The join may not be triggered fist time around due to order in which the
        // consumer pulls the records back. So we publish again to make the stream
        // trigger the join.
        testHarness.publishTestData(orderStreamTopicAvro, orderDataProvider, now, DataSource.DataSourceSerDe.AVRO);
      } catch(Exception e) {
        throw new RuntimeException(e);
      }
    }
    return success;
  }, 120000, "failed to complete join correctly");

}
 
Example 15
Source File: EndToEndIntegrationTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
@Test
public void shouldSelectAllFromDerivedStream() throws Exception {

  executeStatement(
      "CREATE STREAM pageviews_female"
      + " AS SELECT %s.userid AS userid, pageid, regionid, gender "
      + " FROM %s "
      + " LEFT JOIN %s ON %s.userid = %s.userid"
      + " WHERE gender = 'FEMALE';",
      userTable, pageViewStream, userTable, pageViewStream,
      userTable);

  final QueuedQueryMetadata queryMetadata = executeQuery(
      "SELECT * from pageviews_female;");

  List<KeyValue<String, GenericRow>> results = new ArrayList<>();
  BlockingQueue<KeyValue<String, GenericRow>> rowQueue = queryMetadata.getRowQueue();

  // From the mock data, we expect exactly 3 page views from female users.
  List<String> expectedPages = Arrays.asList("PAGE_2", "PAGE_5", "PAGE_5");
  List<String> expectedUsers = Arrays.asList("USER_2", "USER_0", "USER_2");
  List<String> actualPages = new ArrayList<>();
  List<String> actualUsers = new ArrayList<>();

  TestUtils.waitForCondition(() -> {
    try {
      log.debug("polling from pageviews_female");
      KeyValue<String, GenericRow> nextRow = rowQueue.poll(8000, TimeUnit.MILLISECONDS);
      if (nextRow != null) {
        results.add(nextRow);
      } else {
        // If we didn't receive any records on the output topic for 8 seconds, it probably means that the join
        // failed because the table data wasn't populated when the stream data was consumed. We should just
        // re populate the stream data to try the join again.
        log.warn("repopulating data in {} because the join returned empty results.",
                 pageViewTopic);
        testHarness
            .publishTestData(pageViewTopic, pageViewDataProvider, System.currentTimeMillis());
      }
    } catch (Exception e) {
      log.error("Got exception when polling from pageviews_female", e);
    }
    return 3 <= results.size();
  }, 30000, "Could not consume any records from " + pageViewTopic + " for 30 seconds");

  for (KeyValue<String, GenericRow> result : results) {
    List<Object> columns = result.value.getColumns();
    log.debug("pageview join: {}", columns);

    assertEquals(6, columns.size());
    String user = (String) columns.get(2);
    actualUsers.add(user);

    String page = (String) columns.get(3);
    actualPages.add(page);
  }

  assertEquals(expectedPages, actualPages);
  assertEquals(expectedUsers, actualUsers);
}
 
Example 16
Source File: WindowingIntTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 2 votes vote down vote up
@Test
public void shouldAggregateSessionWindow() throws Exception {

  testHarness.publishTestData(topicName, dataProvider, now);


  final String streamName = "SESSION_AGGTEST";

  final String queryString = String.format(
          "CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s GROUP BY ORDERID;",
          streamName,
          "ORDERID, COUNT(*), SUM(ORDERUNITS)",
          "SESSION (10 SECONDS)"
  );

  ksqlContext.sql(queryString);

  Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();


  GenericRow expectedResults = new GenericRow(Arrays.asList(null, null, "ORDER_6", 6 /** 2 x items **/, 420.0));

  final Map<String, GenericRow> results = new HashMap<>();

  TestUtils.waitForCondition(() -> {
    final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, datasetOneMetaData.size(), new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
    updateResults(results, windowedResults);
    final GenericRow actual = results.get("ORDER_6");
    return expectedResults.equals(actual) && results.size() == 6;
  }, 60000, "didn't receive correct results within timeout");

  AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
  KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);

  Set<String> topicBeforeCleanup = topicClient.listTopicNames();

  assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(),
             topicBeforeCleanup.size(), equalTo(5));
  QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();

  queryMetadata.close();
  Set<String> topicsAfterCleanUp = topicClient.listTopicNames();

  assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size
      (), topicsAfterCleanUp.size(), equalTo(3));
  assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(
      KafkaTopicClient.TopicCleanupPolicy.DELETE));

}