Java Code Examples for org.apache.kafka.clients.producer.KafkaProducer#abortTransaction()

The following examples show how to use org.apache.kafka.clients.producer.KafkaProducer#abortTransaction() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransactionOnlySend.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class.getName());
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId);

    KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

    producer.initTransactions();
    producer.beginTransaction();

    try {
        //处理业务逻辑并创建ProducerRecord
        ProducerRecord<String, String> record1 = new ProducerRecord<>(topic, "msg1");
        producer.send(record1);
        ProducerRecord<String, String> record2 = new ProducerRecord<>(topic, "msg2");
        producer.send(record2);
        ProducerRecord<String, String> record3 = new ProducerRecord<>(topic, "msg3");
        producer.send(record3);
        //处理一些其它逻辑
        producer.commitTransaction();
    } catch (ProducerFencedException e) {
        producer.abortTransaction();
    }
    producer.close();
}
 
Example 2
Source File: TransactionConsumeTransformProduce.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    //初始化生产者和消费者
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties());
    consumer.subscribe(Collections.singletonList("topic-source"));
    KafkaProducer<String, String> producer = new KafkaProducer<>(getProducerProperties());
    //初始化事务
    producer.initTransactions();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        if (!records.isEmpty()) {
            Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
            //开启事务
            producer.beginTransaction();
            try {
                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                    for (ConsumerRecord<String, String> record : partitionRecords) {
                        //do some logical processing.
                        ProducerRecord<String, String> producerRecord =
                                new ProducerRecord<>("topic-sink", record.key(), record.value());
                        //消费-生产模型
                        producer.send(producerRecord);
                    }
                    long lastConsumedOffset = partitionRecords.
                            get(partitionRecords.size() - 1).offset();
                    offsets.put(partition, new OffsetAndMetadata(lastConsumedOffset + 1));
                }
                //提交消费位移
                producer.sendOffsetsToTransaction(offsets, "groupId");
                //提交事务
                producer.commitTransaction();
            } catch (ProducerFencedException e) {
                //log the exception
                //中止事务
                producer.abortTransaction();
            }
        }
    }
}
 
Example 3
Source File: TransactionOnlySend.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,                StringSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,                StringSerializer.class.getName());
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId);

    KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

    producer.initTransactions();
    producer.beginTransaction();

    try {
        //处理业务逻辑并创建ProducerRecord
        ProducerRecord<String, String> record1 = new ProducerRecord<>(topic, "msg1");
        producer.send(record1);
        ProducerRecord<String, String> record2 = new ProducerRecord<>(topic, "msg2");
        producer.send(record2);
        ProducerRecord<String, String> record3 = new ProducerRecord<>(topic, "msg3");
        producer.send(record3);
        //处理一些其它逻辑
        producer.commitTransaction();
    } catch (ProducerFencedException e) {
        producer.abortTransaction();
    }
    producer.close();
}
 
Example 4
Source File: TransactionConsumeTransformProduce.java    From kafka_book_demo with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {
    //初始化生产者和消费者
    KafkaConsumer<String, String> consumer =
            new KafkaConsumer<>(getConsumerProperties());
    consumer.subscribe(Collections.singletonList("topic-source"));
    KafkaProducer<String, String> producer =
            new KafkaProducer<>(getProducerProperties());
    //初始化事务
    producer.initTransactions();
    while (true) {
        ConsumerRecords<String, String> records =
                consumer.poll(Duration.ofMillis(1000));
        if (!records.isEmpty()) {
            Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
            //开启事务
            producer.beginTransaction();
            try {
                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionRecords
                            = records.records(partition);
                    for (ConsumerRecord<String, String> record :
                            partitionRecords) {
                        //do some logical processing.
                        ProducerRecord<String, String> producerRecord =
                                new ProducerRecord<>("topic-sink", record.key(),
                                        record.value());
                        //消费-生产模型
                        producer.send(producerRecord);
                    }
                    long lastConsumedOffset = partitionRecords.
                            get(partitionRecords.size() - 1).offset();
                    offsets.put(partition,
                            new OffsetAndMetadata(lastConsumedOffset + 1));
                }
                //提交消费位移
                producer.sendOffsetsToTransaction(offsets,"groupId");
                //提交事务
                producer.commitTransaction();
            } catch (ProducerFencedException e) {
                //log the exception
                //中止事务
                producer.abortTransaction();
            }
        }
    }
}
 
Example 5
Source File: TransactionalMessageProducer.java    From tutorials with MIT License 4 votes vote down vote up
public static void main(String[] args) {

        KafkaProducer<String, String> producer = createKafkaProducer();

        producer.initTransactions();

        try{

            producer.beginTransaction();

            Stream.of(DATA_MESSAGE_1, DATA_MESSAGE_2).forEach(s -> producer.send(
                    new ProducerRecord<String, String>("input", null, s)));

            producer.commitTransaction();

        }catch (KafkaException e){

            producer.abortTransaction();

        }

    }
 
Example 6
Source File: TransactionalWordCount.java    From tutorials with MIT License 2 votes vote down vote up
public static void main(String[] args) {

        KafkaConsumer<String, String> consumer = createKafkaConsumer();
        KafkaProducer<String, String> producer = createKafkaProducer();

        producer.initTransactions();

        try {

            while (true) {

                ConsumerRecords<String, String> records = consumer.poll(ofSeconds(60));

                Map<String, Integer> wordCountMap = records.records(new TopicPartition(INPUT_TOPIC, 0))
                        .stream()
                        .flatMap(record -> Stream.of(record.value().split(" ")))
                        .map(word -> Tuple.of(word, 1))
                        .collect(Collectors.toMap(tuple -> tuple.getKey(), t1 -> t1.getValue(), (v1, v2) -> v1 + v2));

                producer.beginTransaction();

                wordCountMap.forEach((key, value) -> producer.send(new ProducerRecord<String, String>(OUTPUT_TOPIC, key, value.toString())));

                Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();

                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionedRecords = records.records(partition);
                    long offset = partitionedRecords.get(partitionedRecords.size() - 1).offset();

                    offsetsToCommit.put(partition, new OffsetAndMetadata(offset + 1));
                }

                producer.sendOffsetsToTransaction(offsetsToCommit, CONSUMER_GROUP_ID);
                producer.commitTransaction();

            }

        } catch (KafkaException e) {

            producer.abortTransaction();

        }


    }