Java Code Examples for kafka.javaapi.consumer.ConsumerConnector#shutdown()

The following examples show how to use kafka.javaapi.consumer.ConsumerConnector#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Kafka08Fetcher.java    From indexr with Apache License 2.0 6 votes vote down vote up
@Override
public synchronized void close() throws IOException {
    logger.debug("Stop kafka fetcher. [topic: {}]", topics);
    ConsumerConnector connector = this.connector;
    this.connector = null;
    if (connector != null) {
        connector.commitOffsets();
        connector.shutdown();
    }

    IOUtil.closeQuietly(eventItr);
    // Some events could exists in the buffer, try to save them.
    List<byte[]> remaining = new ArrayList<>();
    try {
        while (eventItr.hasNext()) {
            remaining.add(eventItr.next());
        }
    } catch (Exception e) {
        // Ignore
    }
    eventItr = null;
    if (!remaining.isEmpty()) {
        this.remaining = remaining;
    }
}
 
Example 2
Source File: KafkaDistributed.java    From jlogstash-input-plugin with Apache License 2.0 6 votes vote down vote up
@Override
public void release() {
	try {
		for(ConsumerConnector consumer : consumerConnMap.values()){
			consumer.commitOffsets(true);
			consumer.shutdown();
		}
		for(ExecutorService executor : executorMap.values()){
			executor.shutdownNow();
		}

		if(scheduleExecutor != null){
			scheduleExecutor.shutdownNow();
		}

		this.zkDistributed.realse();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		logger.error(ExceptionUtil.getErrorMessage(e));
	}
}
 
Example 3
Source File: KafkaDistributed.java    From jlogstash-input-plugin with Apache License 2.0 6 votes vote down vote up
public void reconnConsumer(String topicName){
		
		//停止topic 对应的conn
		ConsumerConnector consumerConn = consumerConnMap.get(topicName);
		consumerConn.commitOffsets(true);
		consumerConn.shutdown();
		consumerConnMap.remove(topicName);
		
		//停止topic 对应的stream消耗线程
		ExecutorService es = executorMap.get(topicName);
		es.shutdownNow();
		executorMap.remove(topicName);

		Properties prop = geneConsumerProp();
		ConsumerConnector newConsumerConn = kafka.consumer.Consumer
				.createJavaConsumerConnector(new ConsumerConfig(prop));
		consumerConnMap.put(topicName, newConsumerConn);

		addNewConsumer(topicName, topic.get(topicName));
}
 
Example 4
Source File: Kafka.java    From jlogstash-input-plugin with Apache License 2.0 6 votes vote down vote up
public void reconnConsumer(String topicName){
	
	//停止topic 对应的conn
	ConsumerConnector consumerConn = consumerConnMap.get(topicName);
	consumerConn.commitOffsets(true);
	consumerConn.shutdown();
	consumerConnMap.remove(topicName);
	
	//停止topic 对应的stream消耗线程
	ExecutorService es = executorMap.get(topicName);
	es.shutdownNow();	
	executorMap.remove(topicName);
	
	Properties prop = geneConsumerProp();
	ConsumerConnector newConsumerConn = kafka.consumer.Consumer
			.createJavaConsumerConnector(new ConsumerConfig(prop));
	consumerConnMap.put(topicName, newConsumerConn);
	
	addNewConsumer(topicName, topic.get(topicName));
}
 
Example 5
Source File: Kafka.java    From jlogstash-input-plugin with Apache License 2.0 5 votes vote down vote up
@Override
public void release() {
	
	for(ConsumerConnector consumer : consumerConnMap.values()){
		consumer.commitOffsets(true);
		consumer.shutdown();
	}
	
	for(ExecutorService executor : executorMap.values()){
		executor.shutdownNow();
	}
	
	scheduleExecutor.shutdownNow();
}
 
Example 6
Source File: HighLevelConsumerExample.java    From pulsar with Apache License 2.0 5 votes vote down vote up
private static void consumeMessage(Arguments arguments) {

        Properties properties = new Properties();
        properties.put("zookeeper.connect", arguments.serviceUrl);
        properties.put("group.id", arguments.groupName);
        properties.put("consumer.id", "cons1");
        properties.put("auto.commit.enable", Boolean.toString(!arguments.autoCommitDisable));
        properties.put("auto.commit.interval.ms", "100");
        properties.put("queued.max.message.chunks", "100");

        ConsumerConfig conSConfig = new ConsumerConfig(properties);
        ConsumerConnector connector = Consumer.createJavaConsumerConnector(conSConfig);
        Map<String, Integer> topicCountMap = Collections.singletonMap(arguments.topicName, 2);
        Map<String, List<KafkaStream<String, Tweet>>> streams = connector.createMessageStreams(topicCountMap,
                new StringDecoder(null), new Tweet.TestDecoder());

        int count = 0;
        while (count < arguments.totalMessages || arguments.totalMessages == -1) {
            for (int i = 0; i < streams.size(); i++) {
                List<KafkaStream<String, Tweet>> kafkaStreams = streams.get(arguments.topicName);
                for (KafkaStream<String, Tweet> kafkaStream : kafkaStreams) {
                    for (MessageAndMetadata<String, Tweet> record : kafkaStream) {
                        log.info("Received tweet: {}-{}", record.message().userName, record.message().message);
                        count++;
                    }
                }
            }
        }

        connector.shutdown();

        log.info("successfully consumed message {}", count);
    }
 
Example 7
Source File: AlertKafkaPublisherTest.java    From eagle with Apache License 2.0 5 votes vote down vote up
private static void consumeWithOutput(final List<String> outputMessages) {
    Thread t = new Thread(new Runnable() {
        @Override
        public void run() {
            Properties props = new Properties();
            props.put("group.id", "B");
            props.put("zookeeper.connect", "127.0.0.1:" + + TEST_KAFKA_ZOOKEEPER_PORT);
            props.put("zookeeper.session.timeout.ms", "4000");
            props.put("zookeeper.sync.time.ms", "2000");
            props.put("auto.commit.interval.ms", "1000");
            props.put("auto.offset.reset", "smallest");

            ConsumerConnector jcc = null;
            try {
                ConsumerConfig ccfg = new ConsumerConfig(props);
                jcc = Consumer.createJavaConsumerConnector(ccfg);
                Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
                topicCountMap.put(TEST_TOPIC_NAME, 1);
                Map<String, List<KafkaStream<byte[], byte[]>>> topicMap = jcc.createMessageStreams(topicCountMap);
                KafkaStream<byte[], byte[]> cstrm = topicMap.get(TEST_TOPIC_NAME).get(0);
                for (MessageAndMetadata<byte[], byte[]> mm : cstrm) {
                    String message = new String(mm.message());
                    outputMessages.add(message);

                    try {
                        Thread.sleep(5000);
                    } catch (InterruptedException e) {
                    }
                }
            } finally {
                if (jcc != null) {
                    jcc.shutdown();
                }
            }
        }
    });
    t.start();
}
 
Example 8
Source File: HighlevelKafkaConsumer.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
@Override
public void close()
{
  if (standardConsumer != null && standardConsumer.values() != null) {
    for (ConsumerConnector consumerConnector : standardConsumer.values()) {
      consumerConnector.shutdown();
    }
  }
  if (consumerThreadExecutor != null) {
    consumerThreadExecutor.shutdown();
  }
}
 
Example 9
Source File: IngestFromKafkaDriver.java    From geowave with Apache License 2.0 5 votes vote down vote up
public <T> void consumeFromTopic(
    final String formatPluginName,
    final GeoWaveAvroFormatPlugin<T, ?> avroFormatPlugin,
    final KafkaIngestRunData ingestRunData,
    final List<String> queue) {

  final ConsumerConnector consumer = buildKafkaConsumer();
  if (consumer == null) {
    throw new RuntimeException(
        "Kafka consumer connector is null, unable to create message streams");
  }
  try {
    LOGGER.debug(
        "Kafka consumer setup for format ["
            + formatPluginName
            + "] against topic ["
            + formatPluginName
            + "]");
    final Map<String, Integer> topicCount = new HashMap<>();
    topicCount.put(formatPluginName, 1);

    final Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams =
        consumer.createMessageStreams(topicCount);
    final List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(formatPluginName);

    queue.remove(formatPluginName);
    consumeMessages(formatPluginName, avroFormatPlugin, ingestRunData, streams.get(0));
  } finally {
    consumer.shutdown();
  }
}
 
Example 10
Source File: MessageResource.java    From dropwizard-kafka-http with Apache License 2.0 5 votes vote down vote up
@GET
@Timed
public Response consume(
        @QueryParam("topic") String topic,
        @QueryParam("timeout") Integer timeout
) {
    if (Strings.isNullOrEmpty(topic))
        return Response.status(400)
                .entity(new String[]{"Undefined topic"})
                .build();

    Properties props = (Properties) consumerCfg.clone();
    if (timeout != null) props.put("consumer.timeout.ms", "" + timeout);

    ConsumerConfig config = new ConsumerConfig(props);
    ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

    Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts);
    KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0);

    List<Message> messages = new ArrayList<>();
    try {
        for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream)
            messages.add(new Message(messageAndMetadata));
    } catch (ConsumerTimeoutException ignore) {
    } finally {
        connector.commitOffsets();
        connector.shutdown();
    }

    return Response.ok(messages).build();
}
 
Example 11
Source File: NativeKafkaWithStringDecoderTest.java    From hermes with Apache License 2.0 4 votes vote down vote up
@Test
public void testNative() throws IOException, InterruptedException, ExecutionException {
	String topic = "kafka.SimpleTextTopic";
	int msgNum = 200;
	final CountDownLatch countDown = new CountDownLatch(msgNum);

	Properties producerProps = new Properties();
	// Producer
	producerProps.put("bootstrap.servers", "");
	producerProps.put("value.serializer", StringSerializer.class.getCanonicalName());
	producerProps.put("key.serializer", StringSerializer.class.getCanonicalName());
	// Consumer
	Properties consumerProps = new Properties();
	consumerProps.put("zookeeper.connect", "");
	consumerProps.put("group.id", "GROUP_" + topic);

	final List<String> actualResult = new ArrayList<String>();
	final List<String> expectedResult = new ArrayList<String>();

	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, 1);
	final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreams(topicCountMap,
	      new StringDecoder(null), new StringDecoder(null)).get(topic);
	for (final KafkaStream<String, String> stream : streams) {
		new Thread() {
			public void run() {
				for (MessageAndMetadata<String, String> msgAndMetadata : stream) {
					try {
						System.out.println("received: " + msgAndMetadata.message());
						actualResult.add(msgAndMetadata.message());
						countDown.countDown();
					} catch (Exception e) {
						e.printStackTrace();
					}
				}
			}
		}.start();
	}

	KafkaProducer<String, String> producer = new KafkaProducer<String, String>(producerProps);
	int i = 0;
	while (i < msgNum) {
		ProducerRecord<String, String> data = new ProducerRecord<String, String>(topic, "test-message" + i++);
		Future<RecordMetadata> send = producer.send(data);
		send.get();
		if (send.isDone()) {
			System.out.println("sending: " + data.value());
			expectedResult.add(data.value());
		}
	}

	countDown.await();

	Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray());

	consumerConnector.shutdown();
	producer.close();
}
 
Example 12
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigBackwardCompatible() throws IOException {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"ack\": 1,\n" +
            "     \"compression.type\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";
    String description2 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "     \"compression.codec\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sink1.open();
    sink2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sink1);
    sinks.add(sink2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
            sink.writeTo(new DefaultMessageContainer(
                    new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                    jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sink1.close();
    sink2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example 13
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultithread() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "    \"batchSize\": 10,\n" +
            "    \"jobQueueSize\": 3\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    int msgCount = 10000;
    for (int i = 0; i < msgCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", Integer.toString(i))
                .put("value", "message:" + i).build();
        sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    System.out.println(sink.getStat());
    assertEquals(sink.getNumOfPendingMessages(), 0);

    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
    for (int i = 0; i < msgCount; ++i) {
        stream.iterator().next();
    }

    try {
        stream.iterator().next();
        fail();
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example 14
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
        "    \"type\": \"kafkaV1\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"ack\": 1,\n" +
        keyTopicMap + "\n" +
        "}";
    String description2 = "{\n" +
        "    \"type\": \"kafkaV2\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"request.required.acks\": 1,\n" +
        keyTopicMap + "\n" +
        "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sinkV1.open();
    sinkV2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sinkV1);
    sinks.add(sinkV2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
        createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
                                            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
          sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sinkV1.close();
    sinkV2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}