org.apache.kafka.common.errors.UnknownTopicOrPartitionException Java Examples

The following examples show how to use org.apache.kafka.common.errors.UnknownTopicOrPartitionException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EmbeddedKafka.java    From mongo-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Deletes multiple topics and blocks until all topics got deleted.
 *
 * @param duration the max time to wait for the topics to be deleted (does not block if {@code <=
 *     0})
 * @param topics the name of the topics
 */
public void deleteTopicsAndWait(final Duration duration, final String... topics)
    throws InterruptedException {
  for (final String topic : topics) {
    try {
      broker.deleteTopic(topic);
    } catch (final UnknownTopicOrPartitionException e) {
    }
  }

  if (!duration.isNegative()) {
    TestUtils.waitForCondition(
        new TopicsDeletedCondition(topics),
        duration.toMillis(),
        format("Topics not deleted after %s milli seconds.", duration.toMillis()));
  }
}
 
Example #2
Source File: KafkaAdminClient.java    From df_data_service with Apache License 2.0 6 votes vote down vote up
/**
 * Given its name, deletes a topic on the Kafka broker.
 *
 * @param topicsName The name of the topic.
 */
public static void describeTopics (String BOOTSTRAP_SERVERS_HOST_PORT, String topicsName) {
    AdminClient adminClient = createAdminClient(BOOTSTRAP_SERVERS_HOST_PORT);
    // remove topic which is not already exists
    DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Arrays.asList(topicsName.split(",")));
    try {
        describeTopicsResult.all().get().forEach((key, value) -> {
            System.out.println("Key : " + key + " Value : " + value);
        });
        // real failure cause is wrapped inside the raised ExecutionException
    } catch (ExecutionException | InterruptedException e) {
        if (e.getCause() instanceof UnknownTopicOrPartitionException) {
            System.err.println("Topic not exists !!");
        } else if (e.getCause() instanceof TimeoutException) {
            System.err.println("Timeout !!");
        }
        e.printStackTrace();
    } finally {
        adminClient.close();
    }
}
 
Example #3
Source File: KafkaAdminClient.java    From df_data_service with Apache License 2.0 6 votes vote down vote up
/**
 * Given its name, deletes a topic on the Kafka broker.
 *
 * @param topicsName The name of the topic.
 */
public static void deleteTopics (String BOOTSTRAP_SERVERS_HOST_PORT, String topicsName) {
    AdminClient adminClient = createAdminClient(BOOTSTRAP_SERVERS_HOST_PORT);
    // remove topic which is not already exists
    DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(Arrays.asList(topicsName.split(",")));
    try {
        deleteTopicsResult.all().get();
        // real failure cause is wrapped inside the raised ExecutionException
    } catch (ExecutionException | InterruptedException e) {
        if (e.getCause() instanceof UnknownTopicOrPartitionException) {
            System.err.println("Topic not exists !!");
        } else if (e.getCause() instanceof TimeoutException) {
            System.err.println("Timeout !!");
        }
        e.printStackTrace();
    } finally {
        adminClient.close();
    }
}
 
Example #4
Source File: TopicAdmin.java    From kafka-message-tool with MIT License 6 votes vote down vote up
private static boolean topicExistsCheckWithClusterQuery(String topicName,
                                                        org.apache.kafka.clients.admin.AdminClient
                                                            kafkaClientsAdminClient) throws Exception {

    try {
        final DescribeTopicsResult result = kafkaClientsAdminClient.describeTopics(singleton(topicName));
        result.all().get(ApplicationConstants.FUTURE_GET_TIMEOUT_MS, TimeUnit.MILLISECONDS);
        return true;
    } catch (ExecutionException e) {
        if (Throwables.getRootCause(e) instanceof UnknownTopicOrPartitionException) {
            return false;
        } else {
            throw e;
        }
    }
}
 
Example #5
Source File: KafkaImpl.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Completes the returned Future on the Vertx event loop
 * with the topic config obtained from the Kafka AdminClient API.
 * The Future completes with a null result a topic with the given {@code topicName} does not exist.
 */
@Override
public Future<TopicMetadata> topicMetadata(TopicName topicName) {
    LOGGER.debug("Getting metadata for topic {}", topicName);
    ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName.toString());
    Future<TopicDescription> topicDescriptionFuture = mapFuture(adminClient.describeTopics(
            singleton(topicName.toString())).values().get(topicName.toString()));
    Future<Config> configFuture = mapFuture(adminClient.describeConfigs(
            singleton(resource)).values().get(resource));
    return CompositeFuture.all(topicDescriptionFuture, configFuture)
    .map(compositeFuture ->
        new TopicMetadata(compositeFuture.resultAt(0), compositeFuture.resultAt(1)))
        .recover(error -> {
            if (error instanceof UnknownTopicOrPartitionException) {
                return Future.succeededFuture(null);
            } else {
                return Future.failedFuture(error);
            }
        });
}
 
Example #6
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testCanRollThrowsExceptionWhenDescribeConfigsThrows(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(0)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()
            .addNewTopic("B", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(1)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()

            .addBroker(3)
            .describeConfigsResult(new ConfigResource(ConfigResource.Type.TOPIC, "A"), new UnknownTopicOrPartitionException());

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        if (brokerId <= 2) {
            kafkaAvailability.canRoll(brokerId).onComplete(context.failing(e -> context.verify(() -> {
                assertThat(e, instanceOf(UnknownTopicOrPartitionException.class));
                a.flag();
            })));
        } else {
            kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> a.flag()));
        }
    }
}
 
Example #7
Source File: KafkaTopicRepository.java    From nakadi with MIT License 5 votes vote down vote up
private static boolean isExceptionShouldLeadToReset(@Nullable final Exception exception) {
    if (null == exception) {
        return false;
    }
    return Stream.of(NotLeaderForPartitionException.class, UnknownTopicOrPartitionException.class,
            org.apache.kafka.common.errors.TimeoutException.class, NetworkException.class,
            UnknownServerException.class)
            .anyMatch(clazz -> clazz.isAssignableFrom(exception.getClass()));
}
 
Example #8
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeleteDeleteTimeout(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofRight(new UnknownTopicOrPartitionException())));
    mockDeleteTopics(admin, singletonMap("test", Either.ofRight(new TimeoutException())));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.deleteTopic(new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> {
        assertTrue(error instanceof TimeoutException);
        testContext.completeNow();
    })));
}
 
Example #9
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testDelete(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofRight(new UnknownTopicOrPartitionException())));
    mockDeleteTopics(admin, singletonMap("test", Either.ofLeft(null)));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.deleteTopic(new TopicName("test")).onComplete(testContext.succeeding(error -> testContext.verify(() -> {
        testContext.completeNow();
    })));
}
 
Example #10
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testTopicMetadataDescribeConfigsNotFound(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofLeft(mock(TopicDescription.class))));
    mockDescribeConfigs(admin, singletonMap(
            new ConfigResource(ConfigResource.Type.TOPIC, "test"),
            Either.ofRight(new UnknownTopicOrPartitionException())));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> {
        assertNull(topicMetadata);
        testContext.completeNow();
    })));
}
 
Example #11
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testTopicMetadataDescribeTopicNotFound(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofRight(new UnknownTopicOrPartitionException())));
    mockDescribeConfigs(admin, singletonMap(
            new ConfigResource(ConfigResource.Type.TOPIC, "test"),
            Either.ofLeft(mock(Config.class))));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> {
        assertNull(topicMetadata);
        testContext.completeNow();
    })));
}
 
Example #12
Source File: KafkaImplTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testTopicMetadataBothNotFound(VertxTestContext testContext) {
    Admin admin = mock(Admin.class);
    mockDescribeTopics(admin, singletonMap("test", Either.ofRight(new UnknownTopicOrPartitionException())));
    mockDescribeConfigs(admin, singletonMap(
            new ConfigResource(ConfigResource.Type.TOPIC, "test"),
            Either.ofRight(new UnknownTopicOrPartitionException())));

    KafkaImpl impl = new KafkaImpl(admin, vertx);
    impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> {
        assertNull(topicMetadata);
        testContext.completeNow();
    })));
}
 
Example #13
Source File: KafkaRoadConsumer.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Override
public void init(long initialRequest, RebalanceListener rebalanceListener) {
  long maxPollRecords = min(max(initialRequest, minMaxPollRecords), maxMaxPollRecords);
  properties.setProperty("max.poll.records", Long.toString(maxPollRecords));
  consumer = createConsumer();
  try {
    consumer.subscribe(singletonList(topic), new KafkaRebalanceListener(rebalanceListener));
  } catch (UnknownTopicOrPartitionException e) {
    consumer.close();
    throw new RuntimeException("Unknown topic: " + topic, e);
  }
}
 
Example #14
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testCanRollThrowsExceptionWhenTopicDescribeThrows(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(0)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()
            .addNewTopic("B", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(1)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()

            .addBroker(3)
            .describeTopicsResult("A", new UnknownTopicOrPartitionException());

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.failing(e -> context.verify(() -> {
            assertThat(e, instanceOf(UnknownTopicOrPartitionException.class));
            a.flag();
        })));
    }
}
 
Example #15
Source File: ProduceService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void start() {
  if (_running.compareAndSet(false, true)) {
    try {
      KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = _adminClient.describeTopics(Collections.singleton(_topic)).all();
      Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get();
      int partitionNum = topicDescriptions.get(_topic).partitions().size();
      initializeStateForPartitions(partitionNum);
      _handleNewPartitionsExecutor.scheduleWithFixedDelay(new NewPartitionHandler(), 1, 30, TimeUnit.SECONDS);
      LOG.info("{}/ProduceService started", _name);
    } catch (InterruptedException | UnknownTopicOrPartitionException | ExecutionException e) {
      LOG.error("Exception occurred while starting produce service for topic: {}", _topic, e);
    }
  }
}
 
Example #16
Source File: AutoCreateTopicDisabledTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void testAutoCreateTopicDisabledFailsOnProducerIfTopicNonExistentOnBroker()
		throws Throwable {

	KafkaProperties kafkaProperties = new TestKafkaProperties();
	kafkaProperties.setBootstrapServers(Collections
			.singletonList(embeddedKafka.getEmbeddedKafka().getBrokersAsString()));

	KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties(
			kafkaProperties);
	// disable auto create topic on the binder.
	configurationProperties.setAutoCreateTopics(false);
	// reduce the wait time on the producer blocking operations.
	configurationProperties.getConfiguration().put("max.block.ms", "3000");

	KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(
			configurationProperties, kafkaProperties);
	SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy(1);
	final RetryTemplate metadataRetryOperations = new RetryTemplate();
	metadataRetryOperations.setRetryPolicy(simpleRetryPolicy);
	provisioningProvider.setMetadataRetryOperations(metadataRetryOperations);

	KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(
			configurationProperties, provisioningProvider);

	final String testTopicName = "nonExistent" + System.currentTimeMillis();

	ExtendedProducerProperties<KafkaProducerProperties> properties = new ExtendedProducerProperties<>(
			new KafkaProducerProperties());

	expectedException.expect(BinderException.class);
	expectedException.expectCause(isA(UnknownTopicOrPartitionException.class));

	binder.bindProducer(testTopicName, new DirectChannel(), properties);

}
 
Example #17
Source File: AutoCreateTopicDisabledTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void testAutoCreateTopicDisabledFailsOnConsumerIfTopicNonExistentOnBroker()
		throws Throwable {

	KafkaProperties kafkaProperties = new TestKafkaProperties();
	kafkaProperties.setBootstrapServers(Collections
			.singletonList(embeddedKafka.getEmbeddedKafka().getBrokersAsString()));
	KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties(
			kafkaProperties);
	// disable auto create topic on the binder.
	configurationProperties.setAutoCreateTopics(false);

	KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(
			configurationProperties, kafkaProperties);
	provisioningProvider.setMetadataRetryOperations(new RetryTemplate());

	KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(
			configurationProperties, provisioningProvider);

	final String testTopicName = "nonExistent" + System.currentTimeMillis();

	ExtendedConsumerProperties<KafkaConsumerProperties> properties = new ExtendedConsumerProperties<>(
			new KafkaConsumerProperties());

	expectedException.expect(BinderException.class);
	expectedException.expectCause(isA(UnknownTopicOrPartitionException.class));
	binder.createConsumerEndpoint(() -> testTopicName, "group", properties);
}
 
Example #18
Source File: TopicEnsure.java    From common-docker with Apache License 2.0 5 votes vote down vote up
public boolean topicExists(TopicSpec spec, Integer timeOut) throws Exception {
  try {
    DescribeTopicsResult topicDescribeResult = adminClient.describeTopics(
        Collections.singletonList(spec.name()), new DescribeTopicsOptions().timeoutMs(timeOut)
    );
    topicDescribeResult.all().get().get(spec.name());
  } catch (ExecutionException e) {
    if (e.getCause() instanceof UnknownTopicOrPartitionException) {
      return false;
    } else {
      throw e;
    }
  }
  return true;
}
 
Example #19
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private static DescribeTopicsResult describeTopicReturningUnknownPartitionException() {
  DescribeTopicsResult describeTopicsResult = niceMock(DescribeTopicsResult.class);
  expect(describeTopicsResult.all())
      .andReturn(failedFuture(new UnknownTopicOrPartitionException("Topic doesn't exist")));
  replay(describeTopicsResult);
  return describeTopicsResult;
}
 
Example #20
Source File: NotificationEventListenerKafkaIntegrationTest.java    From stream-registry with Apache License 2.0 5 votes vote down vote up
private Optional<TopicDescription> obtainTopicDescription(AdminClient client, String topic) throws ExecutionException, InterruptedException {
  try {
    log.info("Verifying existence of topic {}", topic);

    return Optional.ofNullable(client.describeTopics(Collections.singleton(topic)).all().get().get(topic));
  } catch (ExecutionException exception) {
    if (exception.getCause() != null && exception.getCause() instanceof UnknownTopicOrPartitionException) {
      return Optional.empty();
    } else throw exception;
  }
}
 
Example #21
Source File: KafkaSetupHandler.java    From stream-registry with Apache License 2.0 5 votes vote down vote up
private Optional<TopicDescription> obtainTopicDescription(AdminClient client, String topic) throws ExecutionException, InterruptedException {
  try {
    log.info("Verifying existence of topic {}", topic);

    return Optional.ofNullable(client.describeTopics(Collections.singleton(topic)).all().get().get(topic));
  } catch (ExecutionException exception) {
    if (exception.getCause() != null && exception.getCause() instanceof UnknownTopicOrPartitionException) {
      return Optional.empty();
    } else {
      throw exception;
    }
  }
}
 
Example #22
Source File: TopicOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testOnKafkaTopicRemoved_UnknownTopicOrPartitionException(VertxTestContext context) {
    Exception deleteTopicException = new UnknownTopicOrPartitionException();
    Exception storeException = null;
    resourceRemoved(context, deleteTopicException, storeException);
}
 
Example #23
Source File: KafkaCache.java    From kcache with Apache License 2.0 4 votes vote down vote up
private void verifyTopic(AdminClient admin) throws CacheInitializationException,
    InterruptedException, ExecutionException, TimeoutException {
    log.info("Validating topic {}", topic);

    Set<String> topics = Collections.singleton(topic);
    Map<String, TopicDescription> topicDescription;
    try {
        topicDescription = admin.describeTopics(topics).all().get(initTimeout, TimeUnit.MILLISECONDS);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof UnknownTopicOrPartitionException) {
            log.warn("Could not verify existing topic.");
            return;
        } else {
            throw e;
        }
    }

    TopicDescription description = topicDescription.get(topic);
    final int numPartitions = description.partitions().size();
    if (numPartitions < desiredNumPartitions) {
        log.warn("The number of partitions for the topic "
            + topic
            + " is less than the desired value of "
            + desiredReplicationFactor
            + ".");
    }

    if (description.partitions().get(0).replicas().size() < desiredReplicationFactor) {
        log.warn("The replication factor of the topic "
            + topic
            + " is less than the desired one of "
            + desiredReplicationFactor
            + ". If this is a production environment, it's crucial to add more brokers and "
            + "increase the replication factor of the topic.");
    }

    ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic);

    Map<ConfigResource, Config> configs =
        admin.describeConfigs(Collections.singleton(topicResource)).all()
            .get(initTimeout, TimeUnit.MILLISECONDS);
    Config topicConfigs = configs.get(topicResource);
    String retentionPolicy = topicConfigs.get(TopicConfig.CLEANUP_POLICY_CONFIG).value();
    if (!TopicConfig.CLEANUP_POLICY_COMPACT.equals(retentionPolicy)) {
        String message = "The retention policy of the topic " + topic + " is not 'compact'. "
            + "You must configure the topic to 'compact' cleanup policy to avoid Kafka "
            + "deleting your data after a week. "
            + "Refer to Kafka documentation for more details on cleanup policies.";
        if (requireCompact) {
            log.error(message);
            throw new CacheInitializationException("The retention policy of the topic " + topic
                + " is incorrect. Expected cleanup.policy to be "
                + "'compact' but it is " + retentionPolicy);
        } else {
            log.warn(message);
        }
    }
}
 
Example #24
Source File: KafkaExceptionMapperTest.java    From rest-utils with Apache License 2.0 4 votes vote down vote up
@Test
public void testKafkaExceptions() {
  //exceptions mapped in KafkaExceptionMapper
  verifyMapperResponse(new BrokerNotAvailableException("some message"), Status.SERVICE_UNAVAILABLE,
      BROKER_NOT_AVAILABLE_ERROR_CODE);

  verifyMapperResponse(new InvalidReplicationFactorException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new SecurityDisabledException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new UnsupportedVersionException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new InvalidPartitionsException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new InvalidRequestException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new UnknownServerException("some message"),Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new UnknownTopicOrPartitionException("some message"), Status.NOT_FOUND,
      KAFKA_UNKNOWN_TOPIC_PARTITION_CODE);
  verifyMapperResponse(new PolicyViolationException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new TopicExistsException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);
  verifyMapperResponse(new InvalidConfigurationException("some message"), Status.BAD_REQUEST,
      KAFKA_BAD_REQUEST_ERROR_CODE);

  //test couple of retriable exceptions
  verifyMapperResponse(new NotCoordinatorException("some message"), Status.INTERNAL_SERVER_ERROR,
      KAFKA_RETRIABLE_ERROR_ERROR_CODE);
  verifyMapperResponse(new NotEnoughReplicasException("some message"), Status.INTERNAL_SERVER_ERROR,
      KAFKA_RETRIABLE_ERROR_ERROR_CODE);

  //test couple of kafka exception
  verifyMapperResponse(new CommitFailedException(), Status.INTERNAL_SERVER_ERROR,
      KAFKA_ERROR_ERROR_CODE);
  verifyMapperResponse(new ConcurrentTransactionsException("some message"), Status.INTERNAL_SERVER_ERROR,
      KAFKA_ERROR_ERROR_CODE);

  //test few general exceptions
  verifyMapperResponse(new NullPointerException("some message"), Status.INTERNAL_SERVER_ERROR,
      Status.INTERNAL_SERVER_ERROR.getStatusCode());
  verifyMapperResponse(new IllegalArgumentException("some message"), Status.INTERNAL_SERVER_ERROR,
      Status.INTERNAL_SERVER_ERROR.getStatusCode());
}