import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

public class TestProducer {
    public static void main(String[] args) {
         Properties props = new Properties();
         props.put("bootstrap.servers", "");
         //The "all" setting we have specified will result in blocking on the full commit of the record, the slowest but most durable setting.
         props.put("acks", "all");
         //如果请求失败,生产者也会自动重试,即使设置成0 the producer can automatically retry.
         props.put("retries", 0);

         //The producer maintains buffers of unsent records for each partition. 
         props.put("batch.size", 16384);
         props.put("", 1);
         props.put("buffer.memory", 33554432);
         //The key.serializer and value.serializer instruct how to turn the key and value objects the user provides with their ProducerRecord into bytes.
         props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
         props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

         Producer<String, String> producer = new KafkaProducer<String, String>(props);
         long startTime=System.currentTimeMillis();
         producer.send(new ProducerRecord<String, String>("test1",1,startTime,"a","b"));
         // close();//Close this producer.
         //   close(long timeout, TimeUnit timeUnit); //This method waits up to timeout for the producer to complete the sending of all incomplete requests.
         //  flush() ;所有缓存记录被立刻发送
//         for(int i = 0; i < 100; i++){
//        	//这里平均写入4个分区
//             producer.send(new ProducerRecord<String, String>("foo",i%4, Integer.toString(i), Integer.toString(i)));
//             producer.close();
//         }