org.apache.kafka.clients.producer.KafkaProducer Java Examples
The following examples show how to use
org.apache.kafka.clients.producer.KafkaProducer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProducerTest.java From java-study with Apache License 2.0 | 8 votes |
public KafkaProducerTest(String topicName) { Properties props = new Properties(); props.put("bootstrap.servers", "master:9092,slave1:9092,slave2:9092"); //acks=0:如果设置为0,生产者不会等待kafka的响应。 //acks=1:这个配置意味着kafka会把这条消息写到本地日志文件中,但是不会等待集群中其他机器的成功响应。 //acks=all:这个配置意味着leader会等待所有的follower同步完成。这个确保消息不会丢失,除非kafka集群中所有机器挂掉。这是最强的可用性保证。 props.put("acks", "all"); //配置为大于0的值的话,客户端会在消息发送失败时重新发送。 props.put("retries", 0); //当多条消息需要发送到同一个分区时,生产者会尝试合并网络请求。这会提高client和生产者的效率 props.put("batch.size", 16384); props.put("key.serializer", StringSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); this.producer = new KafkaProducer<String, String>(props); this.topic = topicName; }
Example #2
Source File: KafkaSSLChannelTest.java From kop with Apache License 2.0 | 6 votes |
public SslProducer(String topic, int port) { Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost" + ":" + port); props.put(ProducerConfig.CLIENT_ID_CONFIG, "DemoKafkaOnPulsarProducerSSL"); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); // SSL client config props.put("security.protocol", "SSL"); props.put("ssl.truststore.location", "./src/test/resources/ssl/certificate/broker.truststore.jks"); props.put("ssl.truststore.password", "broker"); // default is https, here need to set empty. props.put("ssl.endpoint.identification.algorithm", ""); producer = new KafkaProducer<>(props); this.topic = topic; }
Example #3
Source File: GraphUtils.java From kafka-graphs with Apache License 2.0 | 6 votes |
public static <K, V> void edgesToTopic( InputStream inputStream, Parser<EdgeWithValue<K, V>> edgeParser, Serializer<V> valueSerializer, Properties props, String topic, int numPartitions, short replicationFactor ) throws IOException { ClientUtils.createTopic(topic, numPartitions, replicationFactor, props); try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); Producer<Edge<K>, V> producer = new KafkaProducer<>(props, new KryoSerializer<>(), valueSerializer)) { String line; while ((line = reader.readLine()) != null) { EdgeWithValue<K, V> edge = edgeParser.parse(line); log.trace("read edge: ({}, {})", edge.source(), edge.target()); ProducerRecord<Edge<K>, V> producerRecord = new ProducerRecord<>(topic, new Edge<>(edge.source(), edge.target()), edge.value()); producer.send(producerRecord); } producer.flush(); } }
Example #4
Source File: TestProducer.java From xxhadoop with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties properties = new Properties(); // bin/kafka-topics.sh properties.put("zookeeper.connect", "node-01:2181,node-02:2181,node-03:2181"); // kafka-console-producer.sh properties.put("metadata.broker.list", "node-02:9092,node-03:9092,node-04:9092"); properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // kafka-console-consumer.sh properties.put("bootstrap.servers", "node-02:9092,node-03:9092,node-04:9092"); Producer<String, String> producer = new KafkaProducer<String, String>(properties); LOGGER.info("roduce start..."); for (int i = 0; i < 100; i++) { ProducerRecord<String, String> msg = new ProducerRecord<String, String>("order-r", "name", "Hello_XXX_" + i); producer.send(msg); } producer.close(); LOGGER.info("produce end..."); }
Example #5
Source File: KafkaMetricsSample.java From micrometer with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { EphemeralKafkaBroker broker = EphemeralKafkaBroker.create(); broker.start(); KafkaHelper kafkaHelper = KafkaHelper.createFor(broker); KafkaConsumer<String, String> consumer = kafkaHelper.createStringConsumer(); KafkaProducer<String, String> producer = kafkaHelper.createStringProducer(); MeterRegistry registry = SampleConfig.myMonitoringSystem(); new KafkaClientMetrics(consumer).bindTo(registry); new KafkaClientMetrics(producer).bindTo(registry); consumer.subscribe(singletonList(TOPIC)); Flux.interval(Duration.ofMillis(10)) .doOnEach(n -> producer.send(new ProducerRecord<>(TOPIC, "hello", "world"))) .subscribe(); for (; ; ) { consumer.poll(Duration.ofMillis(100)); consumer.commitAsync(); } }
Example #6
Source File: KafkaProducerUtil.java From java-study with Apache License 2.0 | 6 votes |
/** * 向kafka发送批量消息 * @param listMsg 发送的消息 * @param url 发送的地址 * @param topicName 消息名称 * @param num 每次发送的条数 * @return * @throws Exception */ public static boolean sendMessage(List<String> listMsg,String url,String topicName,int num) throws Exception{ KafkaProducer<String, String> producer=null; boolean falg=false; try{ Properties props=init(url); producer= new KafkaProducer<String, String>(props); List<String> listMsg2 =new ArrayList<String>(); for(int i = 1,j = listMsg.size();i<=j;i++){ listMsg2.add(listMsg.get(i-1)); if(i%num==0 || i == j){ producer.send(new ProducerRecord<String, String>(topicName,listMsg2.toString())); listMsg2.clear(); } } falg=true; }catch(Exception e){ e.printStackTrace(); }finally{ if(producer!=null){ producer.close(); } } return falg; }
Example #7
Source File: WebKafkaConsumerTest.java From kafka-webview with MIT License | 6 votes |
public void publishDummyDataNumbers() { final String topic = "NumbersTopic"; // Create publisher final Map<String, Object> config = new HashMap<>(); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final KafkaProducer<Integer, Integer> producer = new KafkaProducer<>(config); for (int value = 0; value < 10000; value++) { producer.send(new ProducerRecord<>(topic, value, value)); } producer.flush(); producer.close(); }
Example #8
Source File: PublishManager.java From onos with Apache License 2.0 | 6 votes |
@Override public void start(KafkaServerConfig config) { if (kafkaProducer != null) { log.info("Producer has already started"); return; } String bootstrapServer = new StringBuilder().append(config.getIpAddress()).append(":") .append(config.getPort()).toString(); // Set Server Properties Properties prop = new Properties(); prop.put("bootstrap.servers", bootstrapServer); prop.put("retries", config.getNumOfRetries()); prop.put("max.in.flight.requests.per.connection", config.getMaxInFlightRequestsPerConnection()); prop.put("request.required.acks", config.getAcksRequired()); prop.put("key.serializer", config.getKeySerializer()); prop.put("value.serializer", config.getValueSerializer()); kafkaProducer = new KafkaProducer<>(prop); log.info("Kafka Producer has started."); }
Example #9
Source File: ProducerDemo.java From KafkaExample with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { Properties props = new Properties(); props.put("bootstrap.servers", "kafka0:9092"); props.put("acks", "all"); props.put("retries", 3); props.put("batch.size", 16384); props.put("linger.ms", 1); props.put("buffer.memory", 33554432); props.put("key.serializer", StringSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); props.put("partitioner.class", HashPartitioner.class.getName()); props.put("interceptor.classes", EvenProducerInterceptor.class.getName()); Producer<String, String> producer = new KafkaProducer<String, String>(props); for (int i = 0; i < 10; i++) producer.send(new ProducerRecord<String, String>("topic1", Integer.toString(i), Integer.toString(i))); producer.close(); }
Example #10
Source File: KafkaClientFacotry.java From framework with Apache License 2.0 | 6 votes |
/** * Description: <br> * * @author 王伟<br> * @taskId <br> * @return <br> */ public static KafkaProducer<String, byte[]> getProducer() { synchronized (lock) { if (kafkaProducer == null) { Properties props = new Properties(); props.put("bootstrap.servers", PropertyHolder.getProperty("message.kafka.bootstrap.servers")); props.put("acks", PropertyHolder.getProperty("message.kafka.acks", "all")); props.put("retries", PropertyHolder.getIntProperty("message.kafka.retries", 0)); props.put("batch.size", PropertyHolder.getIntProperty("message.kafka.batch.size", BATCH_SIZE)); props.put("linger.ms", PropertyHolder.getIntProperty("message.kafka.linger.ms", 1)); props.put("buffer.memory", PropertyHolder.getLongProperty("message.kafka.buffer.memory", BUFFER_MEMORY)); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); kafkaProducer = new KafkaProducer<String, byte[]>(props); } } return kafkaProducer; }
Example #11
Source File: KafkaMsgProducer.java From iotplatform with Apache License 2.0 | 6 votes |
@PostConstruct public void init() { properties.put("bootstrap.servers", "127.0.0.1:9092"); properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); properties.put("acks", "-1"); properties.put("retries", 0); properties.put("batch.size", 16384); properties.put("linger.ms", 0); properties.put("buffer.memory", 33554432); try { this.producer = new KafkaProducer<>(properties); } catch (Exception e) { log.error("Failed to start kafka producer", e); throw new RuntimeException(e); } log.info("Kafka Producer is started...."); }
Example #12
Source File: KafkaJunitRuleTest.java From kafka-junit with Apache License 2.0 | 6 votes |
@Test public void testKafkaServerIsUp() { try (KafkaProducer<String, String> producer = kafkaRule.helper().createStringProducer()) { producer.send(new ProducerRecord<>(TOPIC, "keyA", "valueA")); } try (KafkaConsumer<String, String> consumer = kafkaRule.helper().createStringConsumer()) { consumer.subscribe(Lists.newArrayList(TOPIC)); ConsumerRecords<String, String> records = consumer.poll(10000); assertThat(records).isNotNull(); assertThat(records.isEmpty()).isFalse(); ConsumerRecord<String, String> msg = records.iterator().next(); assertThat(msg).isNotNull(); assertThat(msg.key()).isEqualTo("keyA"); assertThat(msg.value()).isEqualTo("valueA"); } }
Example #13
Source File: KafKaProducerAPITest.java From javabase with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws ExecutionException, InterruptedException { KafkaProducer<Integer, String> producer = getProducer(); //构造producerRecord ProducerRecord producerRecord=getProducerRecord(); //普通send while (true) { Thread.sleep(500); //producer.send(producerRecord); //带callback的send producer.send(producerRecord,new CallBackAPI("send message")); log.info("----"); } //send发送时候加锁,一个一个发送 //producer.send(producerRecord).get(); // producer.close(); }
Example #14
Source File: KafkaDatasetOtherDelimTestIT.java From components with Apache License 2.0 | 6 votes |
@Before public void init() throws TimeoutException { // there may exists other topics than these build in(configured in pom.xml) topics, but ignore them // ----------------- Send sample data to TOPIC_IN start -------------------- String testID = "sampleTest" + new Random().nextInt(); List<Person> expectedPersons = Person.genRandomList(testID, 10); Properties props = new Properties(); props.put("bootstrap.servers", BOOTSTRAP_HOST); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Producer<Void, String> producer = new KafkaProducer<>(props); for (Person person : expectedPersons) { ProducerRecord<Void, String> message = new ProducerRecord<>(TOPIC_IN, person.toCSV(fieldDelimiter)); producer.send(message); } producer.close(); // ----------------- Send sample data to TOPIC_IN end -------------------- }
Example #15
Source File: IPLogProducer.java From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License | 6 votes |
@Override public void run() { PropertyReader propertyReader = new PropertyReader(); Properties producerProps = new Properties(); producerProps.put("bootstrap.servers", propertyReader.getPropertyValue("broker.list")); producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerProps.put("auto.create.topics.enable", "true"); KafkaProducer<String, String> ipProducer = new KafkaProducer<String, String>(producerProps); BufferedReader br = readFile(); String oldLine = ""; try { while ((oldLine = br.readLine()) != null) { String line = getNewRecordWithRandomIP(oldLine).replace("[", "").replace("]", ""); ProducerRecord ipData = new ProducerRecord<String, String>(propertyReader.getPropertyValue("topic"), line); Future<RecordMetadata> recordMetadata = ipProducer.send(ipData); } } catch (IOException e) { e.printStackTrace(); } ipProducer.close(); }
Example #16
Source File: KafkaDatasetTestIT.java From components with Apache License 2.0 | 6 votes |
@Before public void init() throws TimeoutException { // there may exists other topics than these build in(configured in pom.xml) topics, but ignore them // ----------------- Send sample data to TOPIC_IN start -------------------- String testID = "sampleTest" + new Random().nextInt(); List<Person> expectedPersons = Person.genRandomList(testID, 10); Properties props = new Properties(); props.put("bootstrap.servers", BOOTSTRAP_HOST); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Producer<Void, String> producer = new KafkaProducer<>(props); for (Person person : expectedPersons) { ProducerRecord<Void, String> message = new ProducerRecord<>(TOPIC_IN, person.toCSV(";")); producer.send(message); } producer.close(); // ----------------- Send sample data to TOPIC_IN end -------------------- }
Example #17
Source File: KafkaHelper.java From kafka-junit with Apache License 2.0 | 5 votes |
/** * Convenience method to produce a set of strings to the specified topic * * @param topic Topic to produce to * @param values Values produce */ public void produceStrings(String topic, String... values) { try (KafkaProducer<String, String> producer = createStringProducer()) { Map<String, String> data = Arrays.stream(values) .collect(Collectors.toMap(k -> String.valueOf(k.hashCode()), Function.identity())); produce(topic, producer, data); } }
Example #18
Source File: TestUtils.java From uReplicator with Apache License 2.0 | 5 votes |
public static void produceMessages(String bootstrapServer, String topicname, int messageCount, int numOfPartitions) { KafkaProducer producer = createProducer(bootstrapServer); for (int i = 0; i < messageCount; i++) { ProducerRecord<Byte[], Byte[]> record = new ProducerRecord(topicname, i % numOfPartitions, null, String.format("Test Value - %d", i).getBytes()); producer.send(record); } producer.flush(); producer.close(); }
Example #19
Source File: DbusHeartBeatBolt.java From DBus with Apache License 2.0 | 5 votes |
private Producer<String, String> createProducer(int taskId) throws Exception { Properties props = PropertiesHolder.getProperties(Constants.Properties.PRODUCER_CONFIG); Properties properties = new Properties(); properties.putAll(props); properties.setProperty("client.id", this.topologyId + "_heartbeat_" + taskId); Producer<String, String> producer = new KafkaProducer<>(properties); return producer; }
Example #20
Source File: KafkaSink.java From test-data-generator with Apache License 2.0 | 5 votes |
@Override public void init(Map<String, String> props) { String brokerList = props.get(PropConst.BROKER_LIST); if (brokerList == null) { throw new IllegalArgumentException("Broker list does not specified"); } Map<String, Object> configs = new HashMap<>(); configs.put("bootstrap.servers", brokerList); producer = new KafkaProducer<>(configs); formatter = formatterFactory.buildFormatter(props); }
Example #21
Source File: KafkaAdapter.java From mdw with Apache License 2.0 | 5 votes |
@Override public Object openConnection() throws ConnectionException, AdapterException { synchronized(producerMap) { if (producerMap.get(bootstrap_servers) == null) { ClassLoader cl = ApplicationContext.setContextPackageClassLoader(getPackage()); kafkaProducer = new KafkaProducer<>(producerProps); producerMap.put(bootstrap_servers, kafkaProducer); ApplicationContext.resetContextClassLoader(cl); return kafkaProducer; } else return producerMap.get(bootstrap_servers); } }
Example #22
Source File: AccumuloCreatePeriodicPCJ.java From rya with Apache License 2.0 | 5 votes |
private static KafkaProducer<String, CommandNotification> createProducer(String bootStrapServers) { Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootStrapServers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CommandNotificationSerializer.class.getName()); return new KafkaProducer<>(props); }
Example #23
Source File: IntegrationTestHarness.java From samza with Apache License 2.0 | 5 votes |
/** * Starts a single kafka broker, and a single embedded zookeeper server in their own threads. * Sub-classes should invoke {@link #zkConnect()} and {@link #bootstrapUrl()}s to * obtain the urls (and ports) of the started zookeeper and kafka broker. */ @Before @Override public void setUp() { super.setUp(); producer = new KafkaProducer<>(createProducerConfigs()); consumer = new KafkaConsumer<>(createConsumerConfigs()); Properties kafkaConfig = new Properties(); kafkaConfig.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()); adminClient = AdminClient.create(kafkaConfig); systemAdmin = createSystemAdmin("kafka"); systemAdmin.start(); }
Example #24
Source File: UvExampleUtil.java From flink-learning with Apache License 2.0 | 5 votes |
public static void writeToKafka() throws InterruptedException { Properties props = new Properties(); props.put("bootstrap.servers", broker_list); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); KafkaProducer producer = new KafkaProducer<String, String>(props); // 生成 0~9 的随机数做为 appId for(int i = 0; i<10; i++){ String yyyyMMdd = new DateTime(System.currentTimeMillis()).toString("yyyyMMdd"); int pageId = random.nextInt(10); // 随机生成页面 id int userId = random.nextInt(100); // 随机生成用户 id UserVisitWebEvent userVisitWebEvent = UserVisitWebEvent.builder() .id(UUID.randomUUID().toString()) // 日志的唯一 id .date(yyyyMMdd) // 日期 .pageId(pageId) // 页面 id .userId(Integer.toString(userId)) // 用户 id .url("url/" + pageId) // 页面的 url .build(); // 对象序列化为 JSON 发送到 Kafka ProducerRecord record = new ProducerRecord<String, String>(topic, null, null, GsonUtil.toJson(userVisitWebEvent)); producer.send(record); System.out.println("发送数据: " + GsonUtil.toJson(userVisitWebEvent)); } producer.flush(); }
Example #25
Source File: CanalKafkaProducer.java From canal with Apache License 2.0 | 5 votes |
@Override public void init(Properties properties) { KafkaProducerConfig kafkaProducerConfig = new KafkaProducerConfig(); this.mqProperties = kafkaProducerConfig; super.init(properties); // load properties this.loadKafkaProperties(properties); Properties kafkaProperties = new Properties(); kafkaProperties.putAll(kafkaProducerConfig.getKafkaProperties()); kafkaProperties.put("key.serializer", StringSerializer.class); if (kafkaProducerConfig.isKerberosEnabled()) { File krb5File = new File(kafkaProducerConfig.getKrb5File()); File jaasFile = new File(kafkaProducerConfig.getJaasFile()); if (krb5File.exists() && jaasFile.exists()) { // 配置kerberos认证,需要使用绝对路径 System.setProperty("java.security.krb5.conf", krb5File.getAbsolutePath()); System.setProperty("java.security.auth.login.config", jaasFile.getAbsolutePath()); System.setProperty("javax.security.auth.useSubjectCredsOnly", "false"); kafkaProperties.put("security.protocol", "SASL_PLAINTEXT"); kafkaProperties.put("sasl.kerberos.service.name", "kafka"); } else { String errorMsg = "ERROR # The kafka kerberos configuration file does not exist! please check it"; logger.error(errorMsg); throw new RuntimeException(errorMsg); } } kafkaProperties.put("value.serializer", KafkaMessageSerializer.class); producer = new KafkaProducer<>(kafkaProperties); }
Example #26
Source File: KafkaOutput.java From envelope with Apache License 2.0 | 5 votes |
private void initialize() { Serializer<Row> keySerializer, valueSerializer; switch (serializerType) { case DELIMITED_SERIALIZER: keySerializer = new DelimitedSerializer(); valueSerializer = new DelimitedSerializer(); break; case AVRO_SERIALIZER: keySerializer = new AvroSerializer(); valueSerializer = new AvroSerializer(); break; default: throw new RuntimeException("Kafka output does not support serializer type: " + serializerType); } Map<String, ?> serializerConfiguration = getSerializerConfiguration(); keySerializer.configure(serializerConfiguration, true); valueSerializer.configure(serializerConfiguration, false); Map<String, Object> producerProps = Maps.newHashMap(); producerProps.put("bootstrap.servers", brokers); KafkaCommon.addCustomParams(producerProps, config); producer = new KafkaProducer<>(producerProps, keySerializer, valueSerializer); LOG.info("Producer initialized"); }
Example #27
Source File: KafkaClientUtil.java From javabase with Apache License 2.0 | 5 votes |
/** * 构造KafkaProducer * @return */ private KafkaProducer<Integer, String> getProducer() { Properties properties = new Properties(); properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); //key 和 value serializer的类 properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); return new KafkaProducer(properties); }
Example #28
Source File: KafkaStorageIT.java From zipkin-storage-kafka with Apache License 2.0 | 5 votes |
@BeforeEach void setUp() throws Exception { consumerConfig = new Properties(); consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVERS); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); assertThat(kafkaContainer.isRunning()).isTrue(); traceTimeout = Duration.ofSeconds(5); int serverPort = randomPort(); storageBuilder = KafkaStorage.newBuilder() .bootstrapServers(KAFKA_BOOTSTRAP_SERVERS) .storageStateDir("target/zipkin_" + System.currentTimeMillis()) .hostname("localhost") .serverPort(serverPort); storageBuilder.spanAggregation.traceTimeout(traceTimeout); storage = (KafkaStorage) storageBuilder.build(); server = Server.builder() .annotatedService("/storage/kafka", new KafkaStorageHttpService(storage)) .http(serverPort) .build(); server.start(); Collection<NewTopic> newTopics = new ArrayList<>(); newTopics.add(new NewTopic(storageBuilder.spanAggregation.spansTopic, 1, (short) 1)); newTopics.add(new NewTopic(storageBuilder.spanAggregation.traceTopic, 1, (short) 1)); newTopics.add(new NewTopic(storageBuilder.spanAggregation.dependencyTopic, 1, (short) 1)); storage.getAdminClient().createTopics(newTopics).all().get(); await().atMost(10, TimeUnit.SECONDS).until(() -> storage.check().ok()); storage.checkResources(); Properties producerConfig = new Properties(); producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVERS); tracesProducer = new KafkaProducer<>(producerConfig, new StringSerializer(), spansSerde.serializer()); dependencyProducer = new KafkaProducer<>(producerConfig, new StringSerializer(), dependencyLinkSerde.serializer()); }
Example #29
Source File: KafkaComponent.java From metron with Apache License 2.0 | 5 votes |
/** * Write a collection of messages to a Kafka topic. * * @param topic The name of the Kafka topic. * @param messages The collection of messages to write. */ public void writeMessages(String topic, Collection<byte[]> messages) { try(KafkaProducer<String, byte[]> kafkaProducer = createProducer()) { for (byte[] message : messages) { kafkaProducer.send(new ProducerRecord<>(topic, message)); } } }
Example #30
Source File: ProducerDemoCallback.java From KafkaExample with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { Properties props = new Properties(); props.put("bootstrap.servers", "kafka0:9092"); props.put("acks", "all"); props.put("retries", 3); props.put("batch.size", 16384); props.put("linger.ms", 1); props.put("buffer.memory", 33554432); props.put("key.serializer", StringSerializer.class.getName()); props.put("value.serializer", StringSerializer.class.getName()); props.put("partitioner.class", HashPartitioner.class.getName()); Producer<String, String> producer = new KafkaProducer<String, String>(props); for (int i = 0; i < 10; i++) { ProducerRecord record = new ProducerRecord<String, String>("topic1", Integer.toString(i), Integer.toString(i)); // producer.send(record); // producer.send(record, new Callback() { // // @Override // public void onCompletion(RecordMetadata metadata, Exception exception) { // System.out.printf("Send record partition:%d, offset:%d, keysize:%d, valuesize:%d %n", // metadata.partition(), metadata.offset(), metadata.serializedKeySize(), // metadata.serializedValueSize()); // } // // }); producer.send(record, (metadata, exception) -> { if(metadata != null) { System.out.printf("Send record partition:%d, offset:%d, keysize:%d, valuesize:%d %n", metadata.partition(), metadata.offset(), metadata.serializedKeySize(), metadata.serializedValueSize()); } if(exception != null) { exception.printStackTrace(); } }); } producer.close(); }