Java Code Examples for org.apache.kafka.clients.producer.ProducerConfig#BOOTSTRAP_SERVERS_CONFIG

The following examples show how to use org.apache.kafka.clients.producer.ProducerConfig#BOOTSTRAP_SERVERS_CONFIG . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParaflowKafkaProducer.java    From paraflow with Apache License 2.0 6 votes vote down vote up
public ParaflowKafkaProducer(CollectorConfig conf, long statsInterval)
{
    Properties config = conf.getProperties();
    // set the producer configuration properties for kafka record key and value serializers
    if (!config.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
        config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    }
    if (!config.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
        config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
    }
    if (!config.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
        throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be specified in the config");
    }
    kafkaProducer = new KafkaProducer<>(config);
    this.throughputStats = new ThroughputStats(statsInterval, conf.isMetricEnabled(), conf.getPushGateWayUrl(),
            conf.getCollectorId());
}
 
Example 2
Source File: FlinkKafkaProducerBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * The main constructor for creating a FlinkKafkaProducer.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. Passing null will use Kafka's partitioner.
 */
public FlinkKafkaProducerBase(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
	requireNonNull(defaultTopicId, "TopicID not set");
	requireNonNull(serializationSchema, "serializationSchema not set");
	requireNonNull(producerConfig, "producerConfig not set");
	ClosureCleaner.clean(customPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	this.defaultTopicId = defaultTopicId;
	this.schema = serializationSchema;
	this.producerConfig = producerConfig;
	this.flinkKafkaPartitioner = customPartitioner;

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	this.topicPartitionsMap = new HashMap<>();
}
 
Example 3
Source File: FHIRNotificationKafkaPublisher.java    From FHIR with Apache License 2.0 5 votes vote down vote up
/**
 * Performs any required initialization to allow us to publish events to the topic.
 */
private void init(String topicName, Properties kafkaProps) {
    log.entering(this.getClass().getName(), "init");
    try {
        this.topicName = topicName;
        this.kafkaProps = kafkaProps;
        if (log.isLoggable(Level.FINER)) {
            log.finer("Kafka publisher is configured with the following properties:\n" + this.kafkaProps.toString());
            log.finer("Topic name: " + this.topicName);
        }

        // We'll hard-code some properties to ensure they are set correctly.
        this.kafkaProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        this.kafkaProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        this.kafkaProps.put(ProducerConfig.CLIENT_ID_CONFIG, "fhir-server");

        // Make sure that the properties file contains the bootstrap.servers property at a minimum.
        String bootstrapServers = this.kafkaProps.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
        if (bootstrapServers == null) {
            throw new IllegalStateException("The " + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " property was missing from the Kafka connection properties.");
        }

        // Create our producer object to be used for publishing.
        producer = new KafkaProducer<String, String>(this.kafkaProps);

        // Register this Kafka implementation as a "subscriber" with our Notification Service.
        // This means that our "notify" method will be called when the server publishes an event.
        service.subscribe(this);
        log.info("Initialized Kafka publisher for topic '" + topicName + "' using bootstrap servers: " + bootstrapServers + ".");
    } catch (Throwable t) {
        String msg = "Caught exception while initializing Kafka publisher.";
        log.log(Level.SEVERE, msg, t);
        throw new IllegalStateException(msg, t);
    } finally {
        log.exiting(this.getClass().getName(), "init");
    }
}
 
Example 4
Source File: FlinkKafkaProducerBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * The main constructor for creating a FlinkKafkaProducer.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. Passing null will use Kafka's partitioner.
 */
public FlinkKafkaProducerBase(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
	requireNonNull(defaultTopicId, "TopicID not set");
	requireNonNull(serializationSchema, "serializationSchema not set");
	requireNonNull(producerConfig, "producerConfig not set");
	ClosureCleaner.clean(customPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	this.defaultTopicId = defaultTopicId;
	this.schema = serializationSchema;
	this.producerConfig = producerConfig;
	this.flinkKafkaPartitioner = customPartitioner;

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	this.topicPartitionsMap = new HashMap<>();
}
 
Example 5
Source File: ConfigUtil.java    From adaptive-alerting with Apache License 2.0 5 votes vote down vote up
public static Properties toProducerConfig(Config config) {
    val keys = new String[]{
        ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
        ProducerConfig.CLIENT_ID_CONFIG,
        ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
        ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG
    };
    val props = new Properties();
    copyProps(config, props, keys);
    return props;
}
 
Example 6
Source File: FlinkKafkaProducerBase.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * The main constructor for creating a FlinkKafkaProducer.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. Passing null will use Kafka's partitioner.
 */
public FlinkKafkaProducerBase(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
	requireNonNull(defaultTopicId, "TopicID not set");
	requireNonNull(serializationSchema, "serializationSchema not set");
	requireNonNull(producerConfig, "producerConfig not set");
	ClosureCleaner.clean(customPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	this.defaultTopicId = defaultTopicId;
	this.schema = serializationSchema;
	this.producerConfig = producerConfig;
	this.flinkKafkaPartitioner = customPartitioner;

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	this.topicPartitionsMap = new HashMap<>();
}
 
Example 7
Source File: FlinkKafkaProducer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link FlinkKafkaProducer.Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link FlinkKafkaProducer.Semantic#EXACTLY_ONCE}).
 */
public FlinkKafkaProducer(
	String defaultTopicId,
	KeyedSerializationSchema<IN> serializationSchema,
	Properties producerConfig,
	Optional<FlinkKafkaPartitioner<IN>> customPartitioner,
	FlinkKafkaProducer.Semantic semantic,
	int kafkaProducersPoolSize) {
	super(new FlinkKafkaProducer.TransactionStateSerializer(), new FlinkKafkaProducer.ContextStateSerializer());

	this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
	this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
	this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
	this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
	this.semantic = checkNotNull(semantic, "semantic is null");
	this.kafkaProducersPoolSize = kafkaProducersPoolSize;
	checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

	ClosureCleaner.clean(this.flinkKafkaPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
		long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
		checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
		this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
		LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
	}

	// Enable transactionTimeoutWarnings to avoid silent data loss
	// See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
	// The KafkaProducer may not throw an exception if the transaction failed to commit
	if (semantic == FlinkKafkaProducer.Semantic.EXACTLY_ONCE) {
		final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
		final long transactionTimeout;
		if (object instanceof String && StringUtils.isNumeric((String) object)) {
			transactionTimeout = Long.parseLong((String) object);
		} else if (object instanceof Number) {
			transactionTimeout = ((Number) object).longValue();
		} else {
			throw new IllegalArgumentException(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG
				+ " must be numeric, was " + object);
		}
		super.setTransactionTimeout(transactionTimeout);
		super.enableTransactionTimeoutWarnings(0.8);
	}

	this.topicPartitionsMap = new HashMap<>();
}
 
Example 8
Source File: FlinkKafkaProducer011.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link Semantic#EXACTLY_ONCE}).
 */
public FlinkKafkaProducer011(
		String defaultTopicId,
		KeyedSerializationSchema<IN> serializationSchema,
		Properties producerConfig,
		Optional<FlinkKafkaPartitioner<IN>> customPartitioner,
		Semantic semantic,
		int kafkaProducersPoolSize) {
	super(new TransactionStateSerializer(), new ContextStateSerializer());

	this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
	this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
	this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
	this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
	this.semantic = checkNotNull(semantic, "semantic is null");
	this.kafkaProducersPoolSize = kafkaProducersPoolSize;
	checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

	ClosureCleaner.clean(this.flinkKafkaPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
		long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
		checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
		this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
		LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
	}

	// Enable transactionTimeoutWarnings to avoid silent data loss
	// See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
	// The KafkaProducer may not throw an exception if the transaction failed to commit
	if (semantic == Semantic.EXACTLY_ONCE) {
		final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
		final long transactionTimeout;
		if (object instanceof String && StringUtils.isNumeric((String) object)) {
			transactionTimeout = Long.parseLong((String) object);
		} else if (object instanceof Number) {
			transactionTimeout = ((Number) object).longValue();
		} else {
			throw new IllegalArgumentException(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG
				+ " must be numeric, was " + object);
		}
		super.setTransactionTimeout(transactionTimeout);
		super.enableTransactionTimeoutWarnings(0.8);
	}

	this.topicPartitionsMap = new HashMap<>();
}
 
Example 9
Source File: FlinkKafkaProducer011.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link Semantic#EXACTLY_ONCE}).
 */
public FlinkKafkaProducer011(
		String defaultTopicId,
		KeyedSerializationSchema<IN> serializationSchema,
		Properties producerConfig,
		Optional<FlinkKafkaPartitioner<IN>> customPartitioner,
		Semantic semantic,
		int kafkaProducersPoolSize) {
	super(new TransactionStateSerializer(), new ContextStateSerializer());

	this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
	this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
	this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
	this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
	this.semantic = checkNotNull(semantic, "semantic is null");
	this.kafkaProducersPoolSize = kafkaProducersPoolSize;
	checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

	ClosureCleaner.clean(this.flinkKafkaPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
		long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
		checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
		this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
		LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
	}

	// Enable transactionTimeoutWarnings to avoid silent data loss
	// See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
	// The KafkaProducer may not throw an exception if the transaction failed to commit
	if (semantic == Semantic.EXACTLY_ONCE) {
		final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
		final long transactionTimeout;
		if (object instanceof String && StringUtils.isNumeric((String) object)) {
			transactionTimeout = Long.parseLong((String) object);
		} else if (object instanceof Number) {
			transactionTimeout = ((Number) object).longValue();
		} else {
			throw new IllegalArgumentException(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG
				+ " must be numeric, was " + object);
		}
		super.setTransactionTimeout(transactionTimeout);
		super.enableTransactionTimeoutWarnings(0.8);
	}

	this.topicPartitionsMap = new HashMap<>();
}
 
Example 10
Source File: FlinkKafkaProducer011.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to
 * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}.
 *
 * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each
 * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not
 * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they
 * will be distributed to Kafka partitions in a round-robin fashion.
 *
 * @param defaultTopicId The default topic to write data to
 * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
 * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
 * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
 *                          If a partitioner is not provided, records will be partitioned by the key of each record
 *                          (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys
 *                          are {@code null}, then records will be distributed to Kafka partitions in a
 *                          round-robin fashion.
 * @param semantic Defines semantic that will be used by this producer (see {@link Semantic}).
 * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link Semantic#EXACTLY_ONCE}).
 */
public FlinkKafkaProducer011(
		String defaultTopicId,
		KeyedSerializationSchema<IN> serializationSchema,
		Properties producerConfig,
		Optional<FlinkKafkaPartitioner<IN>> customPartitioner,
		Semantic semantic,
		int kafkaProducersPoolSize) {
	super(new TransactionStateSerializer(), new ContextStateSerializer());

	this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null");
	this.schema = checkNotNull(serializationSchema, "serializationSchema is null");
	this.producerConfig = checkNotNull(producerConfig, "producerConfig is null");
	this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null);
	this.semantic = checkNotNull(semantic, "semantic is null");
	this.kafkaProducersPoolSize = kafkaProducersPoolSize;
	checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty");

	ClosureCleaner.clean(this.flinkKafkaPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
	ClosureCleaner.ensureSerializable(serializationSchema);

	// set the producer configuration properties for kafka record key value serializers.
	if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
	}

	if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) {
		this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
	} else {
		LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
	}

	// eagerly ensure that bootstrap servers are set.
	if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
		throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties.");
	}

	if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) {
		long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds();
		checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer");
		this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout);
		LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_KAFKA_TRANSACTION_TIMEOUT);
	}

	// Enable transactionTimeoutWarnings to avoid silent data loss
	// See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1):
	// The KafkaProducer may not throw an exception if the transaction failed to commit
	if (semantic == Semantic.EXACTLY_ONCE) {
		final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
		final long transactionTimeout;
		if (object instanceof String && StringUtils.isNumeric((String) object)) {
			transactionTimeout = Long.parseLong((String) object);
		} else if (object instanceof Number) {
			transactionTimeout = ((Number) object).longValue();
		} else {
			throw new IllegalArgumentException(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG
				+ " must be numeric, was " + object);
		}
		super.setTransactionTimeout(transactionTimeout);
		super.enableTransactionTimeoutWarnings(0.8);
	}

	this.topicPartitionsMap = new HashMap<>();
}