org.apache.kafka.common.serialization.StringSerializer Java Examples

The following examples show how to use org.apache.kafka.common.serialization.StringSerializer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProducerTest.java    From java-study with Apache License 2.0 8 votes vote down vote up
public KafkaProducerTest(String topicName) {
	Properties props = new Properties();
	props.put("bootstrap.servers", "master:9092,slave1:9092,slave2:9092");
	//acks=0:如果设置为0,生产者不会等待kafka的响应。
	//acks=1:这个配置意味着kafka会把这条消息写到本地日志文件中,但是不会等待集群中其他机器的成功响应。
	//acks=all:这个配置意味着leader会等待所有的follower同步完成。这个确保消息不会丢失,除非kafka集群中所有机器挂掉。这是最强的可用性保证。
	props.put("acks", "all");
	//配置为大于0的值的话,客户端会在消息发送失败时重新发送。
	props.put("retries", 0);
	//当多条消息需要发送到同一个分区时,生产者会尝试合并网络请求。这会提高client和生产者的效率
	props.put("batch.size", 16384);
	props.put("key.serializer", StringSerializer.class.getName());
	props.put("value.serializer", StringSerializer.class.getName());
	this.producer = new KafkaProducer<String, String>(props);
	this.topic = topicName;
}
 
Example #2
Source File: WebKafkaConsumerTest.java    From kafka-webview with MIT License 7 votes vote down vote up
public void publishDummyData() {
    final String topic = "TestTopic";

    // Create publisher
    final Map<String, Object> config = new HashMap<>();
    config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    final KafkaProducer<String, String> producer = new KafkaProducer<>(config);
    for (int charCode = 65; charCode < 91; charCode++) {
        final char[] key = new char[1];
        key[0] = (char) charCode;

        producer.send(new ProducerRecord<>(topic, new String(key), new String(key)));
    }
    producer.flush();
    producer.close();
}
 
Example #3
Source File: ProducerExample.java    From pulsar with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    String topic = "persistent://public/default/test";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("key.serializer", IntegerSerializer.class.getName());
    props.put("value.serializer", StringSerializer.class.getName());

    Producer<Integer, String> producer = new KafkaProducer<>(props);

    for (int i = 0; i < 10; i++) {
        producer.send(new ProducerRecord<Integer, String>(topic, i, Integer.toString(i)));
        log.info("Message {} sent successfully", i);
    }

    producer.flush();
    producer.close();
}
 
Example #4
Source File: ProtostuffSerializer.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    String brokerList = "192.168.0.101:9092";
    String topic = "topic.serialization";
    Properties properties = new Properties();
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    // 自定义的 ProtostuffSerializer
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ProtostuffSerializer.class.getName());
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);

    KafkaProducer<String, Company> producer = new KafkaProducer<>(properties);

    Company company = Company.builder().name("whirly").address("中国").build();
    ProducerRecord<String, Company> record = new ProducerRecord<>(topic, company);
    try {
        producer.send(record).get();
    }catch (Exception e) {
        e.printStackTrace();
    }finally {
        producer.close();
    }
}
 
Example #5
Source File: WordCountTopology.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws IOException {
		Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount-processor");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka0:19092");
        props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "zookeeper0:12181/kafka");
        props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		
		TopologyBuilder builder = new TopologyBuilder();
		builder.addSource("SOURCE", new StringDeserializer(), new StringDeserializer(), "words")
				.addProcessor("WordCountProcessor", WordCountProcessor::new, "SOURCE")
				.addStateStore(Stores.create("Counts").withStringKeys().withIntegerValues().inMemory().build(), "WordCountProcessor")
//				.connectProcessorAndStateStores("WordCountProcessor", "Counts")
				.addSink("SINK", "count", new StringSerializer(), new IntegerSerializer(), "WordCountProcessor");
		
        KafkaStreams stream = new KafkaStreams(builder, props);
        stream.start();
        System.in.read();
        stream.close();
        stream.cleanUp();
	}
 
Example #6
Source File: KafkaStreamsYellingIntegrationTest.java    From kafka-streams-in-action with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
    Properties properties = StreamsTestUtils.getStreamsConfig("integrationTest",
            EMBEDDED_KAFKA.bootstrapServers(),
            STRING_SERDE_CLASSNAME,
            STRING_SERDE_CLASSNAME,
            new Properties());
    properties.put(IntegrationTestUtils.INTERNAL_LEAVE_GROUP_ON_CLOSE, true);
    
    streamsConfig = new StreamsConfig(properties);

    producerConfig = TestUtils.producerConfig(EMBEDDED_KAFKA.bootstrapServers(),
            StringSerializer.class,
            StringSerializer.class);

    consumerConfig = TestUtils.consumerConfig(EMBEDDED_KAFKA.bootstrapServers(),
            StringDeserializer.class,
            StringDeserializer.class);
}
 
Example #7
Source File: PaverTollboothServiceConfiguration.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@Bean
public PatchSetEmitter roadModificationEmitter(
    @Value("${kafka.bootstrapServers}") String bootstrapServers,
    @Value("${kafka.road.modification.topic}") String topic,
    ObjectMapper mapper) {

  Map<String, Object> producerProps = new HashMap<>();
  producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
  producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
  producerProps.put(ProducerConfig.RETRIES_CONFIG, 1);

  Producer<String, String> kafkaProducer = new KafkaProducer<>(producerProps);

  return new KafkaPatchSetEmitter(topic, kafkaProducer, mapper);
}
 
Example #8
Source File: RunQueryCommandIT.java    From rya with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() {
    // Make sure the topic that the change log uses exists.
    final String changeLogTopic = KafkaTopics.queryChangeLogTopic("" + ryaInstance);
    kafka.createTopic(changeLogTopic);

    // Setup the QueryRepository used by the test.
    final Producer<?, QueryChange> queryProducer = KafkaTestUtil.makeProducer(kafka, StringSerializer.class, QueryChangeSerializer.class);
    final Consumer<?, QueryChange> queryConsumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, QueryChangeDeserializer.class);
    final QueryChangeLog changeLog = new KafkaQueryChangeLog(queryProducer, queryConsumer, changeLogTopic);
    queryRepo = new InMemoryQueryRepository(changeLog, Scheduler.newFixedRateSchedule(0L, 5, TimeUnit.SECONDS));

    // Initialize the Statements Producer and the Results Consumer.
    stmtProducer = KafkaTestUtil.makeProducer(kafka, StringSerializer.class, VisibilityStatementSerializer.class);
    resultConsumer = KafkaTestUtil.fromStartConsumer(kafka, StringDeserializer.class, VisibilityBindingSetDeserializer.class);
}
 
Example #9
Source File: KafkaKeyValueProducerPusher.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
public KafkaKeyValueProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) {
  this.closer = Closer.create();

  this.topic = topic;

  Properties props = new Properties();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
  props.put(ProducerConfig.ACKS_CONFIG, "all");
  props.put(ProducerConfig.RETRIES_CONFIG, 3);
  //To guarantee ordered delivery, the maximum in flight requests must be set to 1.
  props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
  props.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, true);

  // add the kafka scoped config. if any of the above are specified then they are overridden
  if (kafkaConfig.isPresent()) {
    props.putAll(ConfigUtils.configToProperties(kafkaConfig.get()));
    this.numFuturesToBuffer = ConfigUtils.getLong(kafkaConfig.get(), MAX_NUM_FUTURES_TO_BUFFER_KEY, DEFAULT_MAX_NUM_FUTURES_TO_BUFFER);
  }

  this.producer = createProducer(props);
}
 
Example #10
Source File: FeatureStreamConfig.java    From feast with Apache License 2.0 6 votes vote down vote up
@Bean
public KafkaTemplate<String, FeatureSetProto.FeatureSetSpec> specKafkaTemplate(
    FeastProperties feastProperties) {
  StreamProperties streamProperties = feastProperties.getStream();
  Map<String, Object> props = new HashMap<>();

  props.put(
      ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
      streamProperties.getOptions().getBootstrapServers());

  KafkaTemplate<String, FeatureSetProto.FeatureSetSpec> t =
      new KafkaTemplate<>(
          new DefaultKafkaProducerFactory<>(
              props, new StringSerializer(), new KafkaSerialization.ProtoSerializer<>()));
  t.setDefaultTopic(streamProperties.getSpecsOptions().getSpecsTopic());
  return t;
}
 
Example #11
Source File: PeriodicNotificationApplicationIT.java    From rya with Apache License 2.0 6 votes vote down vote up
@Before
public void init() throws Exception {
    final String topic = rule.getKafkaTopicName();
    rule.createTopic(topic);

    //get user specified props and update with the embedded kafka bootstrap servers and rule generated topic
    props = getProps();
    props.setProperty(NOTIFICATION_TOPIC, topic);
    props.setProperty(KAFKA_BOOTSTRAP_SERVERS, bootstrapServers);
    conf = new PeriodicNotificationApplicationConfiguration(props);

    //create Kafka Producer
    kafkaProps = getKafkaProperties(conf);
    producer = new KafkaProducer<>(kafkaProps, new StringSerializer(), new CommandNotificationSerializer());

    //extract kafka specific properties from application config
    app = PeriodicNotificationApplicationFactory.getPeriodicApplication(conf);
    registrar = new KafkaNotificationRegistrationClient(conf.getNotificationTopic(), producer);
}
 
Example #12
Source File: KafkaLegacyClientIT.java    From apm-agent-java with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setup() {
    // confluent versions 5.3.x correspond Kafka versions 2.3.x -
    // https://docs.confluent.io/current/installation/versions-interoperability.html#cp-and-apache-ak-compatibility
    kafka = new KafkaContainer("5.3.0");
    kafka.start();
    kafkaPort = kafka.getMappedPort(KafkaContainer.KAFKA_PORT);
    bootstrapServers = kafka.getBootstrapServers();
    consumerThread = new Consumer();
    consumerThread.start();
    replyConsumer = createKafkaConsumer();
    replyConsumer.subscribe(Collections.singletonList(REPLY_TOPIC));
    producer = new KafkaProducer<>(
        ImmutableMap.of(
            ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers,
            ProducerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString(),
            // This should guarantee that records are batched, as long as they are sent within the configured duration
            ProducerConfig.LINGER_MS_CONFIG, 50
        ),
        new StringSerializer(),
        new StringSerializer()
    );
}
 
Example #13
Source File: KafkaProducer09IT.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Test
public void testKafkaProducer09Write() throws IOException, StageException {

  final String message = "Hello StreamSets";

  HashMap<String, Object> kafkaProducerConfigs = new HashMap<>();
  kafkaProducerConfigs.put("retries", 0);
  kafkaProducerConfigs.put("batch.size", 100);
  kafkaProducerConfigs.put("linger.ms", 0);
  kafkaProducerConfigs.put(KafkaConstants.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  kafkaProducerConfigs.put(KafkaConstants.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);

  String topic = getNextTopic();
  SdcKafkaProducer sdcKafkaProducer = createSdcKafkaProducer(port, kafkaProducerConfigs);
  sdcKafkaProducer.init();
  sdcKafkaProducer.enqueueMessage(topic, message.getBytes(), "0");
  sdcKafkaProducer.write(null);

  verify(topic, 1, "localhost:" + port, message);
}
 
Example #14
Source File: KafkaOperationRepositoryFactory.java    From eventapis with Apache License 2.0 6 votes vote down vote up
public KafkaOperationRepository createKafkaOperationRepository(ObjectMapper objectMapper) {
    KafkaProducer<String, Operation> operationsKafka = new KafkaProducer<>(
            kafkaProperties.buildProducerProperties(),
            new StringSerializer(),
            new JsonSerializer<>(objectMapper)
    );
    KafkaProducer<String, PublishedEventWrapper> eventsKafka = new KafkaProducer<>(
            kafkaProperties.buildProducerProperties(),
            new StringSerializer(),
            new JsonSerializer<>(objectMapper)
    );
    return new KafkaOperationRepository(
            operationContext,
            userContext,
            operationsKafka,
            eventsKafka,
            kafkaProperties.getConsumer().getGroupId()
    );
}
 
Example #15
Source File: KafkaQueryChangeLogFactory.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an instance of {@link KafkaQueryChangeLog} using a new {@link Producer} and {@link Consumer}.
 *
 * @param bootstrapServers - Indicates which instance of Kafka that will be connected to. (not null)
 * @param topic - The topic the QueryChangeLog is persisted to. (not null)
 * @return A new instance of {@link KafkaQueryChangeLog}.
 */
public static KafkaQueryChangeLog make(
        final String bootstrapServers,
        final String topic) {
    requireNonNull(bootstrapServers);
    requireNonNull(topic);

    final Properties producerProperties = new Properties();
    producerProperties.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    producerProperties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    producerProperties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, QueryChangeSerializer.class.getName());

    final Properties consumerProperties = new Properties();
    consumerProperties.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, QueryChangeDeserializer.class.getName());

    final Producer<?, QueryChange> producer = new KafkaProducer<>(producerProperties);
    final Consumer<?, QueryChange> consumer = new KafkaConsumer<>(consumerProperties);
    return new KafkaQueryChangeLog(producer, consumer, topic);
}
 
Example #16
Source File: KafkaRepository.java    From kafka-service-broker with Apache License 2.0 5 votes vote down vote up
private Map<String, Object> senderProperties() {
    Map<String, Object> props = new HashMap<>();

    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, info.getHosts());
    props.put(ProducerConfig.RETRIES_CONFIG, 0);
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
    props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
    props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

    return props;
}
 
Example #17
Source File: Main.java    From ari-proxy with GNU Affero General Public License v3.0 5 votes vote down vote up
private static void runAriEventProcessor(
		Config serviceConfig,
		ActorSystem system,
		ActorRef callContextProvider,
		ActorRef metricsService,
		Runnable applicationReplacedHandler) {
	// see: https://doc.akka.io/docs/akka/2.5.8/java/stream/stream-error.html#delayed-restarts-with-a-backoff-stage
	final Flow<Message, Message, NotUsed> restartWebsocketFlow = RestartFlow.withBackoff(
			Duration.ofSeconds(3), // min backoff
			Duration.ofSeconds(30), // max backoff
			0.2, // adds 20% "noise" to vary the intervals slightly
			() -> createWebsocketFlow(system, serviceConfig.getString(WEBSOCKET_URI))
	);

	final Source<Message, NotUsed> source = Source.<Message>maybe().viaMat(restartWebsocketFlow, Keep.right());

	final ProducerSettings<String, String> producerSettings = ProducerSettings
			.create(system, new StringSerializer(), new StringSerializer())
			.withBootstrapServers(serviceConfig.getConfig(KAFKA).getString(BOOTSTRAP_SERVERS));

	final Sink<ProducerRecord<String, String>, NotUsed> sink = Producer
			.plainSink(producerSettings)
			.mapMaterializedValue(done -> NotUsed.getInstance());

	final Run processingPipeline = WebsocketMessageToProducerRecordTranslator.eventProcessing()
			.on(system)
			.withHandler(applicationReplacedHandler)
			.withCallContextProvider(callContextProvider)
			.withMetricsService(metricsService)
			.from(source)
			.to(sink);

	Match(Try.of(() -> processingPipeline.run())).of(
			Case($Success($()), mat -> run(() -> system.log().debug("Successfully started ari event processor."))),
			Case($Failure($(instanceOf(KafkaException.class))), err -> run(() -> {
				system.log().error(err, "Failed to start ari event processor.");
				System.exit(-1);
			}))
	);
}
 
Example #18
Source File: CompositeTransactionConfiguration.java    From microservices-transactions-tcc with Apache License 2.0 5 votes vote down vote up
@Bean
public Map<String, Object> kafkaProducerConfiguration() {
	Map<String, Object> props = new HashMap<>();
	// list of host:port pairs used for establishing the initial connections to the Kakfa cluster
	props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
	props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
	props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

	return props;
}
 
Example #19
Source File: ExactlyOnceE2E.java    From flink-statefun with Apache License 2.0 5 votes vote down vote up
private static Producer<String, WrappedMessage> kafkaWrappedMessagesProducer(
    String bootstrapServers) {
  Properties props = new Properties();
  props.put("bootstrap.servers", bootstrapServers);

  return new KafkaProducer<>(
      props, new StringSerializer(), new KafkaProtobufSerializer<>(WrappedMessage.parser()));
}
 
Example #20
Source File: CanalKafkaProducer.java    From canal with Apache License 2.0 5 votes vote down vote up
@Override
public void init(Properties properties) {
    KafkaProducerConfig kafkaProducerConfig = new KafkaProducerConfig();
    this.mqProperties = kafkaProducerConfig;
    super.init(properties);
    // load properties
    this.loadKafkaProperties(properties);

    Properties kafkaProperties = new Properties();
    kafkaProperties.putAll(kafkaProducerConfig.getKafkaProperties());
    kafkaProperties.put("key.serializer", StringSerializer.class);
    if (kafkaProducerConfig.isKerberosEnabled()) {
        File krb5File = new File(kafkaProducerConfig.getKrb5File());
        File jaasFile = new File(kafkaProducerConfig.getJaasFile());
        if (krb5File.exists() && jaasFile.exists()) {
            // 配置kerberos认证,需要使用绝对路径
            System.setProperty("java.security.krb5.conf", krb5File.getAbsolutePath());
            System.setProperty("java.security.auth.login.config", jaasFile.getAbsolutePath());
            System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
            kafkaProperties.put("security.protocol", "SASL_PLAINTEXT");
            kafkaProperties.put("sasl.kerberos.service.name", "kafka");
        } else {
            String errorMsg = "ERROR # The kafka kerberos configuration file does not exist! please check it";
            logger.error(errorMsg);
            throw new RuntimeException(errorMsg);
        }
    }
    kafkaProperties.put("value.serializer", KafkaMessageSerializer.class);
    producer = new KafkaProducer<>(kafkaProperties);
}
 
Example #21
Source File: PosSimulator.java    From Kafka-Streams-Real-time-Stream-Processing with The Unlicense 5 votes vote down vote up
public static void main(String[] args) {

        if (args.length < 3) {
            System.out.println("Please provide command line arguments: topicName noOfProducers produceSpeed");
            System.exit(-1);
        }

        String topicName = args[0];
        int noOfProducers = new Integer(args[1]);
        int produceSpeed = new Integer(args[2]);
        Properties properties = new Properties();
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, "StockSimulator");
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092,localhost:9093");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class.getName());

        KafkaProducer<String, PosInvoice> kafkaProducer = new KafkaProducer<>(properties);
        ExecutorService executor = Executors.newFixedThreadPool(3);
        final List<RunnableProducer> runnableProducers = new ArrayList<>();
        for (int i = 0; i < noOfProducers; i++) {
            RunnableProducer runnableProducer = new RunnableProducer(i, kafkaProducer, topicName, produceSpeed);
            runnableProducers.add(runnableProducer);
            executor.submit(runnableProducer);
        }

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            for (RunnableProducer p : runnableProducers)
                p.shutdown();
            executor.shutdown();
            logger.info("Closing Executor Service");
            try {
                executor.awaitTermination(produceSpeed * 2, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }
        }));

    }
 
Example #22
Source File: BasicExternalKafkaClient.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
/**
 * Send messages to external entrypoint of the cluster with SSL security protocol setting
 * @return sent message count
 */
public int sendMessagesTls(long timeoutMs) {

    String clientName = "sender-ssl" + this.clusterName;
    CompletableFuture<Integer> resultPromise = new CompletableFuture<>();
    IntPredicate msgCntPredicate = x -> x == messageCount;

    String caCertName = this.caCertName == null ?
            KafkaResource.getKafkaExternalListenerCaCertName(this.namespaceName, clusterName) : this.caCertName;
    LOGGER.info("Going to use the following CA certificate: {}", caCertName);

    KafkaClientProperties properties = this.clientProperties;

    if (properties == null || properties.getProperties().isEmpty()) {
        properties = new KafkaClientProperties.KafkaClientPropertiesBuilder()
            .withNamespaceName(namespaceName)
            .withClusterName(clusterName)
            .withBootstrapServerConfig(getExternalBootstrapConnect(namespaceName, clusterName))
            .withKeySerializerConfig(StringSerializer.class)
            .withValueSerializerConfig(StringSerializer.class)
            .withClientIdConfig("producer-tls-" + new Random().nextInt(Integer.MAX_VALUE))
            .withCaSecretName(caCertName)
            .withKafkaUsername(kafkaUsername)
            .withSecurityProtocol(securityProtocol)
            .withSaslMechanism("")
            .withSharedProperties()
            .build();
    }

    try (Producer tlsProducer = new Producer(properties, resultPromise, msgCntPredicate, this.topicName, clientName, partition)) {

        tlsProducer.getVertx().deployVerticle(tlsProducer);

        return tlsProducer.getResultPromise().get(timeoutMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        e.printStackTrace();
        throw new WaitException(e);
    }
}
 
Example #23
Source File: KafkaMessageLogReceiverEndpointIntegrationTest.java    From synapse with Apache License 2.0 5 votes vote down vote up
@Bean
public ProducerFactory<String, String> producerFactory(final EmbeddedKafkaBroker embeddedKafkaBroker) {
    final Map<String, Object> configs = producerProps(embeddedKafkaBroker);
    configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    return new DefaultKafkaProducerFactory<>(configs);
}
 
Example #24
Source File: SenderConfig.java    From spring-kafka with MIT License 5 votes vote down vote up
@Bean
public Map<String, Object> producerConfigs() {
  Map<String, Object> props = new HashMap<>();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

  return props;
}
 
Example #25
Source File: KafKaConfig.java    From gpmall with Apache License 2.0 5 votes vote down vote up
@Bean
public KafKaRegisterSuccProducerFactory kafKaRegisterSuccProducerFactory(){
    Map<String,Object>  producerProperties = kafkaProperties.buildProducerProperties();
    producerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaProperties.getBootstrapServers());
    producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,JsonSerializer.class);
    producerProperties.put(ProducerConfig.ACKS_CONFIG,"-1");
    return new KafKaRegisterSuccProducerFactory(producerProperties);
}
 
Example #26
Source File: KafkaConfig.java    From enode with MIT License 5 votes vote down vote up
/**
 * 根据senderProps的参数创建生产者工厂
 */
@Bean
public ProducerFactory producerFactory() {
    Map<String, Object> props = new HashMap<>();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, QueueProperties.KAFKA_SERVER);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    return new DefaultKafkaProducerFactory<>(props);
}
 
Example #27
Source File: KafkaProducerPoolTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void messageProductionWithProducerConfig() throws InterruptedException, KafkaExecutionException, ExecutionException {
    Properties props = KafkaTests.getProps();
    props.setProperty(KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.setProperty(VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    props.setProperty(LINGER_MS_CONFIG, String.valueOf(100));

    messageProduction(props);
}
 
Example #28
Source File: KafkaCommandConfig.java    From enode with MIT License 5 votes vote down vote up
/**
 * 根据senderProps填写的参数创建生产者工厂
 */
@Bean
public ProducerFactory producerFactory() {
    Map<String, Object> props = new HashMap<>();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    return new DefaultKafkaProducerFactory<>(props);
}
 
Example #29
Source File: KafkaLoadStatementsIT.java    From rya with Apache License 2.0 5 votes vote down vote up
@Test(expected = UnsupportedRDFormatException.class)
public void test_invalidFile() throws Exception {
    try(final Producer<?, VisibilityStatement> producer =
            KafkaTestUtil.makeProducer(rule, StringSerializer.class, VisibilityStatementSerializer.class)) {
        final KafkaLoadStatements command = new KafkaLoadStatements(rule.getKafkaTopicName(), producer);
        command.fromFile(INVALID, "a|b|c");
    }
}
 
Example #30
Source File: KafkaProducerConfiguration.java    From ZTuoExchange_framework with MIT License 5 votes vote down vote up
public Map<String, Object> producerConfigs() {
		Map<String, Object> props = new HashMap<>();
		props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		props.put(ProducerConfig.RETRIES_CONFIG, retries);
		props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
		props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
		props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
//		props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "cn.ztuo.bitrade.kafka.kafkaPartitioner");
		props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
		return props;
	}