Java Code Examples for kafka.javaapi.producer.Producer

The following are top voted examples for showing how to use kafka.javaapi.producer.Producer. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: Transwarp-Sample-Code   File: kafkaProducer.java   View source code 7 votes vote down vote up
/**
 * 读取配置文件,创建线程池,运行线程
 */
public void go() {
    Constant constant = new Constant();
    kafkaProperties kafkaProperties = new kafkaProperties();
    ProducerConfig config = new ProducerConfig(kafkaProperties.properties());

    ExecutorService executorService = Executors.newFixedThreadPool(Integer.parseInt(constant.THREAD_POOL_SIZE));

    String topic = constant.TOPIC_NAME;
    Task[] tasks = new Task[Integer.parseInt(constant.THREAD_NUM)];
    String[] folders = constant.FILE_FOLDERS.split(";");
    int batchSize = Integer.parseInt(constant.BATCH_SIZE);
    CopyOnWriteArrayList<String> fileList = addFiles(folders);

    for (int i = 0; i < tasks.length; ++i) {
        tasks[i] = new Task(i, topic, new Producer<String, String>(config), fileList, batchSize);
    }

    for (Task task : tasks) {
        executorService.execute(task);
    }
    executorService.shutdown();
}
 
Example 2
Project: storm-topology-examples   File: KafkaProducerTest.java   View source code 7 votes vote down vote up
public static void produceMessages(String brokerList, String topic, int msgCount, String msgPayload) throws JSONException, IOException {
    
    // Add Producer properties and created the Producer
    ProducerConfig config = new ProducerConfig(setKafkaBrokerProps(brokerList));
    Producer<String, String> producer = new Producer<String, String>(config);

    LOG.info("KAFKA: Preparing To Send " + msgCount + " Events.");
    for (int i=0; i<msgCount; i++){

        // Create the JSON object
        JSONObject obj = new JSONObject();
        obj.put("id", String.valueOf(i));
        obj.put("msg", msgPayload);
        obj.put("dt", GenerateRandomDay.genRandomDay());
        String payload = obj.toString();

        KeyedMessage<String, String> data = new KeyedMessage<String, String>(topic, null, payload);
        producer.send(data);
        LOG.info("Sent message: " + data.toString());
    }
    LOG.info("KAFKA: Sent " + msgCount + " Events.");

    // Stop the producer
    producer.close();
}
 
Example 3
Project: Transwarp-Sample-Code   File: kafkaProducer.java   View source code 6 votes vote down vote up
/**
 * 读取配置文件,创建线程池,运行线程
 */
public void go() {
    Constant constant = new Constant();
    kafkaProperties kafkaProperties = new kafkaProperties();
    ProducerConfig config = new ProducerConfig(kafkaProperties.properties());

    ExecutorService executorService = Executors.newFixedThreadPool(Integer.parseInt(constant.THREAD_POOL_SIZE));

    String topic = constant.TOPIC_NAME;
    Task[] tasks = new Task[Integer.parseInt(constant.THREAD_NUM)];
    String[] folders = constant.FILE_FOLDERS.split(";");
    int batchSize = Integer.parseInt(constant.BATCH_SIZE);
    CopyOnWriteArrayList<String> fileList = addFiles(folders);

    for (int i = 0; i < tasks.length; ++i) {
        tasks[i] = new Task(i, topic, new Producer<String, String>(config), fileList, batchSize);
    }

    for (Task task : tasks) {
        executorService.execute(task);
    }
    executorService.shutdown();
}
 
Example 4
Project: DataProcessPlatformKafkaJavaSDK   File: KafkaProducer.java   View source code 6 votes vote down vote up
public KafkaProducer(){
    Properties props = new Properties();
    //此处配置的是kafka的端口
    props.put("metadata.broker.list", "192.168.1.116:9092");

    //配置value的序列化类
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    //配置key的序列化类
    props.put("key.serializer.class", "kafka.serializer.StringEncoder");

    //request.required.acks
    //0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
    //1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
    //-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
    props.put("request.required.acks","-1");

    producer = new Producer<String, String>(new ProducerConfig(props));
}
 
Example 5
Project: MQTTKafkaBridge   File: Bridge.java   View source code 6 votes vote down vote up
private void connect(String serverURI, String clientId, String zkConnect) throws MqttException {
	
	mqtt = new MqttAsyncClient(serverURI, clientId);
	mqtt.setCallback(this);
	IMqttToken token = mqtt.connect();
	Properties props = new Properties();
	
	//Updated based on Kafka v0.8.1.1
	props.put("metadata.broker.list", "localhost:9092");
       props.put("serializer.class", "kafka.serializer.StringEncoder");
       props.put("partitioner.class", "example.producer.SimplePartitioner");
       props.put("request.required.acks", "1");
	
	ProducerConfig config = new ProducerConfig(props);
	kafkaProducer = new Producer<String, String>(config);
	token.waitForCompletion();
	logger.info("Connected to MQTT and Kafka");
}
 
Example 6
Project: jaf-examples   File: ProducerDemo.java   View source code 6 votes vote down vote up
public static void sendMulitThread() {
	Producer<String, String> producer = buildSyncProducer();
	Random random = new Random();
	List<Thread> produceThreads = IntStream.range(0, 20).mapToObj(i -> {
		return new Thread(() -> {
			final String threadName = Thread.currentThread().getName();
			for(int j = 0; j < 10000; j++) {
				sendMessage(producer, Constants.TOPIC_NAME, random.nextInt(10000) + "", threadName + " message " + j);
			}
		});
	}).peek(Thread::start).collect(toList());
	
	produceThreads.stream().forEach(t -> {
		try {
			t.join();
		} catch (Exception e) {
			e.printStackTrace();
		}
	});
	
	producer.close();
}
 
Example 7
Project: jaf-examples   File: ProducerDemo.java   View source code 6 votes vote down vote up
private static Producer<String, String> buildAsyncProducer() {
	Properties props = new Properties();
	props.put("metadata.broker.list", Constants.BROKER_LIST);
	props.put("serializer.class", StringEncoder.class.getName());
	props.put("partitioner.class", HashPartitioner.class.getName());
	props.put("request.required.acks", "-1");
	props.put("producer.type", "async");  // 使用异步模式
	props.put("batch.num.messages", "3");  // 注意这里会3个消息一起提交
	props.put("queue.buffer.max.ms", "10000000");
	props.put("queue.buffering.max.messages", "1000000");
	props.put("queue.enqueue.timeout.ms", "20000000");
	
	ProducerConfig config = new ProducerConfig(props);
	Producer<String, String> produce = new Producer<>(config);
	return produce;
}
 
Example 8
Project: rb-bi   File: KafkaBolt.java   View source code 6 votes vote down vote up
@Override
public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    //for backward compatibility.
    if(mapper == null) {
        this.mapper = new FieldNameBasedTupleToKafkaMapper<K,V>();
    }

    //for backward compatibility.
    if(topicSelector == null) {
        this.topicSelector = new DefaultTopicSelector((String) stormConf.get(TOPIC));
    }

    Map configMap = (Map) stormConf.get(KAFKA_BROKER_PROPERTIES);
    Properties properties = new Properties();
    properties.putAll(configMap);
    ProducerConfig config = new ProducerConfig(properties);
    producer = new Producer<K, V>(config);
    this.collector = collector;
}
 
Example 9
Project: whatsmars   File: KafkaProducer.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put("metadata.broker.list", "127.0.0.1:9092");
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("key.serializer.class", "kafka.serializer.StringEncoder");
    props.put("request.required.acks","-1");

    Producer<String, String> producer = new Producer<String, String>(new ProducerConfig(props));

    int messageNo = 100;
    final int COUNT = 1000;
    while (messageNo < COUNT) {
        String key = String.valueOf(messageNo);
        String data = "hello kafka message " + key;
        producer.send(new KeyedMessage<String, String>("TestTopic", key ,data));
        System.out.println(data);
        messageNo ++;
    }
}
 
Example 10
Project: spark-streaming-direct-kafka   File: ProcessStreamingData.java   View source code 6 votes vote down vote up
public void execute(JavaPairRDD<String, byte[]> inputMessage) {
    JavaPairRDD<String, byte[]> partitionedRDD;
    if (config.getLocalMode())
        partitionedRDD = inputMessage;
    else {
        // Helps scale beyond number of input partitions in kafka
        partitionedRDD = inputMessage.repartition(config.getRepartitionCount());

    }

    partitionedRDD.foreachPartition(prdd -> {
        // You can choose binary or string encoder
        Producer validProducer = ConnectionManager.getKafkaSingletonConnectionWithBinaryEncoder(config);
        prdd.forEachRemaining(records -> {
            byte[] msg = records._2();
            try {
                // TODO: Add your logic here to process data
                // As default we are just publishing back to another kafka topic
                logger.info("Processing event=" + new String(msg));
                publishMessagesToKafka(validProducer, msg);
            } catch (Exception e){
                logger.error("Error processing message:" + msg);
            }
        });
    });
}
 
Example 11
Project: dmaap-framework   File: KafkaPublisher.java   View source code 6 votes vote down vote up
/**
 * constructor initializing
 * 
 * @param settings
 * @throws rrNvReadable.missingReqdSetting
 */
public KafkaPublisher(@Qualifier("propertyReader") rrNvReadable settings) throws rrNvReadable.missingReqdSetting {
	//fSettings = settings;

	final Properties props = new Properties();
	/*transferSetting(fSettings, props, "metadata.broker.list", "localhost:9092");
	transferSetting(fSettings, props, "request.required.acks", "1");
	transferSetting(fSettings, props, "message.send.max.retries", "5");
	transferSetting(fSettings, props, "retry.backoff.ms", "150"); */
	String kafkaConnUrl= com.att.ajsc.filemonitor.AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,"kafka.metadata.broker.list"); 
	System.out.println("kafkaConnUrl:- "+kafkaConnUrl);
	if(null==kafkaConnUrl){ 

		kafkaConnUrl="localhost:9092"; 
	}		
	transferSetting( props, "metadata.broker.list", kafkaConnUrl);
	transferSetting( props, "request.required.acks", "1");
	transferSetting( props, "message.send.max.retries", "5");
	transferSetting(props, "retry.backoff.ms", "150"); 

	props.put("serializer.class", "kafka.serializer.StringEncoder");

	fConfig = new ProducerConfig(props);
	fProducer = new Producer<String, String>(fConfig);
}
 
Example 12
Project: kafka_jmeter   File: KafkaSampler.java   View source code 6 votes vote down vote up
@Override
public SampleResult sample(Entry entry) {
	SampleResult result = new SampleResult();
	result.setSampleLabel(getName());
	try {
		result.sampleStart();
		Producer<String, String> producer = getProducer();
		KeyedMessage<String, String> msg = new KeyedMessage<String, String>(getTopic(), getMessage());
		producer.send(msg);
		result.sampleEnd(); 
		result.setSuccessful(true);
		result.setResponseCodeOK();
	} catch (Exception e) {
		result.sampleEnd(); // stop stopwatch
		result.setSuccessful(false);
		result.setResponseMessage("Exception: " + e);
		// get stack trace as a String to return as document data
		java.io.StringWriter stringWriter = new java.io.StringWriter();
		e.printStackTrace(new java.io.PrintWriter(stringWriter));
		result.setResponseData(stringWriter.toString(), null);
		result.setDataType(org.apache.jmeter.samplers.SampleResult.TEXT);
		result.setResponseCode("FAILED");
	}
	return result;
}
 
Example 13
Project: another-rule-based-analytics-on-spark   File: KafkaDataProducer.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
	String brokers = "localhost:9092";
	Producer<String, String> producer = KafkaProducer.getInstance(brokers).getProducer();

	KafkaDataProducer instance = new KafkaDataProducer();

	String topic = "test-topic";

	for (int i = 0; i < 100; i++) {
		String message = instance.get(i);
		KeyedMessage<String, String> keyedMessage = new KeyedMessage<String, String>(topic, "device001", message);
		producer.send(keyedMessage);
		System.out.println("message[" + (i + 1) + "] is sent.");
		try {
			Thread.sleep(1000);
		} catch (InterruptedException e) {
			e.printStackTrace();
		}
	}
}
 
Example 14
Project: another-rule-based-analytics-on-spark   File: KafkaProducer.java   View source code 6 votes vote down vote up
public static KafkaProducer getInstance(String brokerList) {
	long threadId = Thread.currentThread().getId();
	Producer<String, String> producer = _pool.get(threadId);
	System.out.println("producer:" + producer + ", thread:" + threadId);

	if (producer == null) {

		Preconditions.checkArgument(StringUtils.isNotBlank(brokerList), "kafka brokerList is blank...");

		// set properties
		Properties properties = new Properties();
		properties.put(METADATA_BROKER_LIST_KEY, brokerList);
		properties.put(SERIALIZER_CLASS_KEY, SERIALIZER_CLASS_VALUE);
		properties.put("kafka.message.CompressionCodec", "1");
		properties.put("client.id", "streaming-kafka-output");
		ProducerConfig producerConfig = new ProducerConfig(properties);

		producer = new Producer<String, String>(producerConfig);

		_pool.put(threadId, producer);
	}

	return instance;
}
 
Example 15
Project: twill   File: SimpleKafkaPublisher.java   View source code 6 votes vote down vote up
@Override
public ListenableFuture<Integer> send() {
  try {
    int size = messages.size();
    Producer<Integer, ByteBuffer> kafkaProducer = producer.get();
    if (kafkaProducer == null) {
      return Futures.immediateFailedFuture(new IllegalStateException("No kafka producer available."));
    }
    kafkaProducer.send(messages);
    return Futures.immediateFuture(size);
  } catch (Exception e) {
    return Futures.immediateFailedFuture(e);
  } finally {
    messages.clear();
  }
}
 
Example 16
Project: spark-kafka-streaming   File: KafkaDataProducer.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("metadata.broker.list", "localhost:9092");

    Producer<String,String> producer = new Producer<String, String>(new ProducerConfig(props));

    int number = 1;
    for(; number < MESSAGES_NUMBER; number++)
    {
        String messageStr =
                String.format("{\"message\": %d, \"uid\":\"%s\"}",
                        number, uId.get(rand.nextInt(uNum)));

        producer.send(new KeyedMessage<String, String>(SparkStreamingConsumer.KAFKA_TOPIC,
                null, messageStr));
        if (number % 10000 == 0)
            System.out.println("Messages pushed: " + number);
    }
    System.out.println("Messages pushed: " + number);
}
 
Example 17
Project: KafkaExample   File: ProducerDemo.java   View source code 6 votes vote down vote up
private static Producer<String, String> initProducer() {
    Properties props = new Properties();
    props.put("metadata.broker.list", BROKER_LIST);
    // props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("serializer.class", StringEncoder.class.getName());
    props.put("partitioner.class", HashPartitioner.class.getName());
//    props.put("compression.codec", "0");
    props.put("producer.type", "sync");
    props.put("batch.num.messages", "1");
    props.put("queue.buffering.max.messages", "1000000");
    props.put("queue.enqueue.timeout.ms", "20000000");

    
    ProducerConfig config = new ProducerConfig(props);
    Producer<String, String> producer = new Producer<String, String>(config);
    return producer;
  }
 
Example 18
Project: kclient   File: KafkaProducer.java   View source code 6 votes vote down vote up
protected void init() {
	if (properties == null) {
		properties = new Properties();
		try {
			properties.load(Thread.currentThread().getContextClassLoader()
					.getResourceAsStream(propertiesFile));
		} catch (IOException e) {
			log.error("The properties file is not loaded.", e);
			throw new IllegalArgumentException(
					"The properties file is not loaded.", e);
		}
	}
	log.info("Producer properties:" + properties);

	ProducerConfig config = new ProducerConfig(properties);
	producer = new Producer<String, String>(config);
}
 
Example 19
Project: iot-traffic-monitor   File: IoTDataProducer.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	//read config file
	Properties prop = PropertyFileReader.readPropertyFile();		
	String zookeeper = prop.getProperty("com.iot.app.kafka.zookeeper");
	String brokerList = prop.getProperty("com.iot.app.kafka.brokerlist");
	String topic = prop.getProperty("com.iot.app.kafka.topic");
	logger.info("Using Zookeeper=" + zookeeper + " ,Broker-list=" + brokerList + " and topic " + topic);

	// set producer properties
	Properties properties = new Properties();
	properties.put("zookeeper.connect", zookeeper);
	properties.put("metadata.broker.list", brokerList);
	properties.put("request.required.acks", "1");
	properties.put("serializer.class", "com.iot.app.kafka.util.IoTDataEncoder");
	//generate event
	Producer<String, IoTData> producer = new Producer<String, IoTData>(new ProducerConfig(properties));
	IoTDataProducer iotProducer = new IoTDataProducer();
	iotProducer.generateIoTEvent(producer,topic);		
}
 
Example 20
Project: metrics-kafka   File: KafkaReporter.java   View source code 6 votes vote down vote up
private KafkaReporter(MetricRegistry registry, String name,
		TimeUnit rateUnit, TimeUnit durationUnit, boolean showSamples, MetricFilter filter,
		String topic, ProducerConfig config, String prefix,
		String hostName, String ip) {
	super(registry, name, filter, rateUnit, durationUnit);
	this.topic = topic;
	this.config = config;
	this.prefix = prefix;
	this.hostName = hostName;
	this.ip = ip;
	
	this.mapper = new ObjectMapper().registerModule(new MetricsModule(rateUnit,
               durationUnit,
               showSamples));

	producer = new Producer<String, String>(config);

	kafkaExecutor = Executors
			.newSingleThreadExecutor(new ThreadFactoryBuilder()
					.setNameFormat("kafka-producer-%d").build());
}
 
Example 21
Project: iot-masterclass   File: KafkaEventCollector.java   View source code 6 votes vote down vote up
public KafkaEventCollector() {
  Properties properties = new Properties();
  try {
    properties.load(new FileInputStream(new File("/etc/storm_demo/config.properties")));
  } catch (Exception ex) {
    System.err.println("Unable to locate config file: /etc/storm_demo/config.properties");
    System.exit(0);
  }

  props.put("metadata.broker.list", properties.getProperty("kafka.brokers"));
  props.put("serializer.class", "kafka.serializer.StringEncoder");
  props.put("request.required.acks", "1");

  try {
    ProducerConfig producerConfig = new ProducerConfig(props);
    kafkaProducer = new Producer<String, String>(producerConfig);
  } catch (Exception e) {
    logger.error("Error creating producer", e);
  }
}
 
Example 22
Project: iot-lab   File: KafkaEventCollector.java   View source code 6 votes vote down vote up
public KafkaEventCollector() {
  Properties properties = new Properties();
  try {
    properties.load(new FileInputStream(new File("/etc/storm_demo/config.properties")));
  } catch (Exception ex) {
    System.err.println("Unable to locate config file: /etc/storm_demo/config.properties");
    System.exit(0);
  }

  props.put("metadata.broker.list", properties.getProperty("kafka.brokers"));
  props.put("serializer.class", "kafka.serializer.StringEncoder");
  props.put("request.required.acks", "1");

  try {
    ProducerConfig producerConfig = new ProducerConfig(props);
    kafkaProducer = new Producer<String, String>(producerConfig);
  } catch (Exception e) {
    logger.error("Error creating producer", e);
  }
}
 
Example 23
Project: benchmarkio   File: KafkaMessageProducer.java   View source code 6 votes vote down vote up
/**
 * metadata.broker.list => "broker1:9092,broker2:9092"
 */
public KafkaMessageProducer(final String metadataBrokerList, final String topic, final String message, final long numberOfMessagesToProduce, final String kafkaProducerType) {
    final Properties props = new Properties();

    props.put("metadata.broker.list",   metadataBrokerList);
    props.put("serializer.class",       "kafka.serializer.StringEncoder");
    props.put("request.required.acks",  "1");
    props.put("producer.type",          kafkaProducerType);

    final Producer<String, String> producer = new Producer<>(new ProducerConfig(props));

    this.producer                   = Preconditions.checkNotNull(producer);
    this.histogram                  = Histograms.create();
    this.topic                      = Preconditions.checkNotNull(topic);
    this.message                    = Preconditions.checkNotNull(message);
    this.numberOfMessagesToProduce  = Preconditions.checkNotNull(numberOfMessagesToProduce);
}
 
Example 24
Project: yuzhouwan   File: KafkaUtils.java   View source code 6 votes vote down vote up
static Producer<String, byte[]> createProducer() {
        Properties props = new Properties();
        try {
//            props.put("zk.connect", p.getProperty("kafka.zk.connect"));   // not need zk in new version
            props.put("key.serializer.class", p.getProperty("kafka.key.serializer.class"));
            props.put("serializer.class", p.getProperty("kafka.serializer.class"));
            props.put("metadata.broker.list", p.getProperty("kafka.metadata.broker.list"));
            props.put("request.required.acks", p.getProperty("kafka.request.required.acks"));
            props.put("producer.type", p.getProperty("kafka.async"));
            props.put("partitioner.class", PARTITIONER_CLASS_NAME);

            props.put("queue.buffering.max.ms", p.getProperty("kafka.queue.buffering.max.ms"));
            props.put("queue.buffering.max.messages", p.getProperty("kafka.queue.buffering.max.messages"));
            props.put("queue.enqueue.timeout.ms", p.getProperty("kafka.queue.enqueue.timeout.ms"));
            // 41,0000,0000 / 24 / 60 / 60 = 47454 / 24 = 1977
            props.put("batch.num.messages", p.getProperty("kafka.batch.num.messages"));
            props.put("send.buffer.bytes", p.getProperty("kafka.send.buffer.bytes"));
//            props.put("compression.type", "lz4");
        } catch (Exception e) {
            _log.error("Connect with kafka failed, error: {}!", e.getMessage());
            throw new RuntimeException(e);
        }
        _log.info("Connect with kafka successfully!");
        return new Producer<>(new ProducerConfig(props));
    }
 
Example 25
Project: druid-kafka-ext   File: TestKafkaProducer.java   View source code 6 votes vote down vote up
public TestKafkaProducer(String topic, String brokerList,
			KafkaMessageSerializer serializer,String partitioner) {
		this.topic = topic;
		this.serializer = serializer;
		Properties props = new Properties();
		props.put("metadata.broker.list", brokerList);
		props.put("zookeeper.session.timeout.ms", 5000);
		props.put("zookeeper.sync.time.ms", 2000);
		props.put("serializer.class", "kafka.serializer.StringEncoder");
//		props.put("key.serializer.class", "kafka.serializer.DefaultEncoder");
//		props.put("partitioner.class", "kafka.producer.DefaultPartitioner");
        props.put("serializer.class", "kafka.serializer.StringEncoder");
        //props.put("partitioner.class", partitioner);
        props.put("request.required.acks", "1");
		props.put("compression.codec", "snappy");
//		props.put("request.required.acks", "0");
		props.put("producer.type", "async");
		props.put("client.id", "testProducer");

		config = new ProducerConfig(props);
		//producer = new Producer<byte[], byte[]>(config);
		producer=new Producer<String,String>(config);
		
	}
 
Example 26
Project: cep   File: KafkaSink.java   View source code 6 votes vote down vote up
/**
 * Creates a new KafkaSink.
 * This method initializes and starts a new Kafka producer that will be
 * used to produce messages to kafka topics.
 * @param properties KafkaSink propertiers. You should provide a kafka_broker property
 *                   set to the Kafka host address. If none is provided localhost will
 *                   be used
 */

public KafkaSink(Map<String, Object> properties) {
    super(properties);
    // The producer config attributes
    Properties props = new Properties();
    if (properties != null && properties.get("kafka_brokers") != null) {
        props.put("metadata.broker.list", properties.get("kafka_brokers"));
    } else {
        props.put("metadata.broker.list", "127.0.0.1:9092");
    }
    props.put("serializer.class", "kafka.serializer.StringEncoder");
    props.put("request.required.acks", "1");
    props.put("message.send.max.retries", "60");
    props.put("retry.backoff.ms", "1000");
    props.put("producer.type", "async");
    props.put("queue.buffering.max.messages", "10000");
    props.put("queue.buffering.max.ms", "500");
    props.put("partitioner.class", "net.redborder.cep.sinks.kafka.SimplePartitioner");

    // Initialize the producer
    ProducerConfig config = new ProducerConfig(props);
    producer = new Producer<>(config);
}
 
Example 27
Project: ezbake-common-java   File: KafkaBroadcaster.java   View source code 6 votes vote down vote up
@Override
protected void prepare(Properties props, String groupId) {
    KafkaConfigurationHelper kafkaHelper = new KafkaConfigurationHelper(props);
    localProps = new Properties();
    localProps.put(GROUP_ID_PROP, groupId);
    localProps.put(METADATA_BROKER_LIST, kafkaHelper.getKafkaBrokerList());
    localProps.put(PRODUCER_TYPE, kafkaHelper.getKafkaProducerType());
    localProps.put(QUEUE_SIZE, kafkaHelper.getKafkaQueueSize());
    localProps.put(QUEUE_TIME, kafkaHelper.getKafkaQueueTime());
    localProps.put(ZOOKEEPERS, kafkaHelper.getKafkaZookeeper());
    localProps.put(ZOOKEEPER_SESSION_TIMEOUT, kafkaHelper.getKafkaZookeeperSessionTimeout());

    // This sets the consumer to start consuming messages with the smallest offset in Kafka if no offset
    // exists for this consumer
    localProps.put(AUTO_OFFSET_RESET, "smallest");

    // Initialize the Producer
    ProducerConfig producerConfig = new ProducerConfig(localProps);
    producer = new Producer<>(producerConfig);

    topicCache = "";
    broadcastTopics = Sets.newHashSet();
    topicsToListenTo = Sets.newHashSet();
}
 
Example 28
Project: easyframe-msg   File: KafkaHelper.java   View source code 6 votes vote down vote up
/** 发送单条消息 [指定Topic]
 * 
 * @param topicName 队列名称
 * @param msg 消息内容 
 * @param partKey 分区对象 为空,表示使用消息内容作为Key分区
 * */
static void send(String topicName, String msg, Object partKey) {
	Producer<Object, String> producer = KafkaHelper.getProducer();
	
	KeyedMessage<Object, String> message = null;
	if (partKey == null) {
		// 将消息内容作为分区Key
		message = new KeyedMessage<Object, String>(topicName, null, msg, msg);
	} else {
		message = new KeyedMessage<Object, String>(topicName, null, partKey, msg);
	}
	
	//发送数据到单个topic, 使用同步或异步的方式, 可以由Key分区
	long start = System.currentTimeMillis();
	producer.send(message);
	if(LOG.isDebugEnabled()){
		long end = System.currentTimeMillis();
		LOG.debug("Sent [" + message + "]" + ", cost = [" + (end-start) + "]");
	}
}
 
Example 29
Project: performance-test-harness-for-geoevent   File: KafkaEventProducer.java   View source code 6 votes vote down vote up
@Override
public synchronized void init(Config config) throws TestException {
    super.init(config);
    try {
        brokerList = config.getPropertyValue("brokerList", "localhost:9092");
        topic = config.getPropertyValue("topic", "default-topic");
        acks = config.getPropertyValue("requiredAcks", "1");

        Properties kprops = new Properties();
        kprops.put("metadata.broker.list", brokerList);
        kprops.put("serializer.class", "kafka.serializer.StringEncoder");
        kprops.put("request.required.acks", acks);
        kprops.put("partitioner.class", SimplePartitioner.class.getCanonicalName());
        kprops.put("producer.type", "async");
        kprops.put("queue.buffering.max.ms", "1000");
        kprops.put("batch.num.messages", "2000");

        ProducerConfig producerConfig = new ProducerConfig(kprops);

        producer = new Producer<String, String>(producerConfig);
    } catch (Throwable error) {
        throw new TestException(ImplMessages.getMessage("INIT_FAILURE", getClass().getName(), error.getMessage()), error);
    }
}
 
Example 30
Project: rekafka   File: Relay.java   View source code 5 votes vote down vote up
@Override
public void run() {
  long relayed = 0;
  
  LOG.info("Starting relay");
  final ConsumerConnector consumer = Consumer.createJavaConsumerConnector(createConsumerConfig());
  final KafkaStream<byte[], byte[]> stream = createConsumerStream(consumer);
  
  final Producer<byte[], byte[]> producer = new Producer<>(createProducerConfig());
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  while (it.hasNext()) {
    final MessageAndMetadata<byte[], byte[]> rx = it.next();
    relayed++;
    if (LOG.isTraceEnabled()) LOG.trace("Relaying {}/{}: key={}, value={}",
                                        relayed,
                                        maxRecords != 0 ? maxRecords : "\u221E",
                                        new String(rx.key()),
                                        new String(rx.message()));
    final KeyedMessage<byte[], byte[]> tx = new KeyedMessage<>(config.sink.topic, rx.key(), rx.message());
    producer.send(tx);
    
    if (maxRecords != 0 && relayed >= maxRecords) {
      LOG.info("Shutting down");
      break;
    }
  }

  producer.close();
  consumer.shutdown();
}
 
Example 31
Project: wngn-jms-kafka   File: DeprecatedProducer.java   View source code 5 votes vote down vote up
protected DeprecatedProducer(String topic, Properties props) {
    this.props.putAll(props);
    this.props.put(ProducerConstants.SERIALIZER_CLASS, ProducerConstants.STEING_ENCODER_SERIALIZER);
    this.props.put(ProducerConstants.METEDATA_BROKER_LIST, ProducerConstants.BROKER_CLUSTER_LIST);
    this.producer = new Producer<Integer, String>(new ProducerConfig(this.props));
    this.topic = topic;
}
 
Example 32
Project: opentsdb-rpc-kafka   File: KafkaStorageExceptionHandler.java   View source code 5 votes vote down vote up
@Override
public void initialize(TSDB tsdb) {
  this.tsdb = tsdb;
  config = new KafkaRpcPluginConfig(tsdb.getConfig());

  setKafkaConfig();
  producer = new Producer<String, byte[]>(producer_config);
  LOG.info("Initialized kafka requeue publisher.");
}
 
Example 33
Project: Transwarp-Sample-Code   File: kafkaProducer.java   View source code 5 votes vote down vote up
Task(int num, String topic, Producer producer, CopyOnWriteArrayList<String> fileList, int batchSize) {
    this.num = num;
    this.topic = topic;
    this.producer = producer;
    this.fileList = fileList;
    this.batchSize = batchSize;
}
 
Example 34
Project: Transwarp-Sample-Code   File: kafkaProducer.java   View source code 5 votes vote down vote up
Task(int num, String topic, Producer producer, CopyOnWriteArrayList<String> fileList, int batchSize) {
    this.num = num;
    this.topic = topic;
    this.producer = producer;
    this.fileList = fileList;
    this.batchSize = batchSize;
}
 
Example 35
Project: iotdb-jdbc   File: KafkaProducer.java   View source code 5 votes vote down vote up
private KafkaProducer() {

		Properties props = new Properties();

		props.put("metadata.broker.list", "127.0.0.1:9092");
		props.put("zk.connect", "127.0.0.1:2181");
		props.put("serializer.class", "kafka.serializer.StringEncoder");
		props.put("key.serializer.class", "kafka.serializer.StringEncoder");

		props.put("request.required.acks", "-1");

		//Producer instance
		producer = new Producer<String, String>(new ProducerConfig(props));
	}
 
Example 36
Project: jaf-examples   File: ProducerDemo.java   View source code 5 votes vote down vote up
public static void syncProducerBatchSend() {
	Producer<String, String> producer = buildSyncProducer();
	IntStream.range(0, 9).forEach(x -> {
		sendMessage(producer, Constants.TOPIC_NAME, x + "", "message : syncProducerBatchSend " + x);
	});
	producer.close();
}
 
Example 37
Project: jaf-examples   File: ProducerDemo.java   View source code 5 votes vote down vote up
public static void asyncProducerBatchSend() {
	Producer<String, String> producer = buildAsyncProducer();
	IntStream.range(0, 20).forEach(x -> {
		sendMessage(producer, Constants.TOPIC_NAME, x + "", "message : asyncProducerBatchSend " + x);
	});
	producer.close();
}
 
Example 38
Project: jaf-examples   File: ProducerDemo.java   View source code 5 votes vote down vote up
private static Producer<String, String> buildSyncProducer() {
	Properties props = new Properties();
	props.put("metadata.broker.list", Constants.BROKER_LIST);
	props.put("serializer.class", StringEncoder.class.getName());
	props.put("partitioner.class", HashPartitioner.class.getName());
	props.put("producer.type", "sync");
	// 这个参数很重要,如果不设置默认为0,也就是异步producer的方式,消息发送之后不会等待leader的ack
	props.put("request.required.acks", "-1");
	
	ProducerConfig config = new ProducerConfig(props);
	Producer<String, String> produce = new Producer<>(config);
	return produce;
}
 
Example 39
Project: rb-bi   File: TridentKafkaState.java   View source code 5 votes vote down vote up
public void prepare(Map stormConf) {
    Validate.notNull(mapper, "mapper can not be null");
    Validate.notNull(topicSelector, "topicSelector can not be null");
    Map configMap = (Map) stormConf.get(KAFKA_BROKER_PROPERTIES);
    Properties properties = new Properties();
    properties.putAll(configMap);
    ProducerConfig config = new ProducerConfig(properties);
    producer = new Producer(config);
}
 
Example 40
Project: SDA   File: AvroRdbmsDeviceInfoPublish.java   View source code 5 votes vote down vote up
public AvroRdbmsDeviceInfoPublish(String broker) {
	Properties props = new Properties();
	props.put("metadata.broker.list", Utils.BROKER_LIST);
	props.put("serializer.class", "kafka.serializer.DefaultEncoder");
	props.put("partitioner.class", "kafka.producer.DefaultPartitioner");
	props.put("request.required.acks", "1");
	producer = new Producer<String, byte[]>(new ProducerConfig(props));

}