org.apache.kafka.common.serialization.ByteArraySerializer Java Examples
The following examples show how to use
org.apache.kafka.common.serialization.ByteArraySerializer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestPublisherPool.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testLeaseCloseReturnsToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L, false, null, null, StandardCharsets.UTF_8); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.close(); assertEquals(1, pool.available()); }
Example #2
Source File: PublishKafka_0_11.java From nifi with Apache License 2.0 | 6 votes |
protected PublisherPool createPublisherPool(final ProcessContext context) { final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue(); final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue(); final String attributeNameRegex = context.getProperty(ATTRIBUTE_NAME_REGEX).getValue(); final Pattern attributeNamePattern = attributeNameRegex == null ? null : Pattern.compile(attributeNameRegex); final boolean useTransactions = context.getProperty(USE_TRANSACTIONS).asBoolean(); final String transactionalIdPrefix = context.getProperty(TRANSACTIONAL_ID_PREFIX).evaluateAttributeExpressions().getValue(); Supplier<String> transactionalIdSupplier = KafkaProcessorUtils.getTransactionalIdSupplier(transactionalIdPrefix); final String charsetName = context.getProperty(MESSAGE_HEADER_ENCODING).evaluateAttributeExpressions().getValue(); final Charset charset = Charset.forName(charsetName); final Map<String, Object> kafkaProperties = new HashMap<>(); KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties); kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize)); return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis, useTransactions, transactionalIdSupplier, attributeNamePattern, charset); }
Example #3
Source File: TestPublisherPool.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testPoisonedLeaseNotReturnedToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.poison(); lease.close(); assertEquals(0, pool.available()); }
Example #4
Source File: LiKafkaProducerFactory.java From brooklin with BSD 2-Clause "Simplified" License | 6 votes |
@Override public Producer<byte[], byte[]> createProducer(Properties transportProps) { VerifiableProperties transportProviderProperties = new VerifiableProperties(transportProps); String clientId = transportProviderProperties.getString(ProducerConfig.CLIENT_ID_CONFIG); String bootstrapServers = transportProviderProperties.getString(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG); Properties producerConfig = transportProviderProperties.getDomainProperties(DOMAIN_PRODUCER); Validate.notEmpty(clientId, "clientId cannot be empty."); Validate.notEmpty(bootstrapServers, "bootstrapServers cannot be empty."); producerConfig = buildProducerProperties(producerConfig, clientId, bootstrapServers, DEFAULT_ENABLE_LARGE_MESSAGE); // Default DeSerializer for Key and Payload producerConfig.putIfAbsent(LiKafkaProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); producerConfig.putIfAbsent(LiKafkaProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); return new LiKafkaProducerImpl<>(producerConfig); }
Example #5
Source File: MessageProducer.java From core-ng-project with Apache License 2.0 | 6 votes |
public void tryCreateProducer() { if (uri.resolveURI()) { var watch = new StopWatch(); try { Map<String, Object> config = Map.of(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, uri.bootstrapURIs, ProducerConfig.COMPRESSION_TYPE_CONFIG, CompressionType.SNAPPY.name, ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 60 * 1000, // 60s, DELIVERY_TIMEOUT_MS_CONFIG is INT type ProducerConfig.LINGER_MS_CONFIG, 5L, // use small linger time within acceptable range to improve batching ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG, 500L, // longer backoff to reduce cpu usage when kafka is not available ProducerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG, 5L * 1000, // 5s ProducerConfig.MAX_BLOCK_MS_CONFIG, 30L * 1000); // 30s, metadata update timeout, shorter than default, to get exception sooner if kafka is not available var serializer = new ByteArraySerializer(); producer = new KafkaProducer<>(config, serializer, serializer); producerMetrics.set(producer.metrics()); } finally { logger.info("create kafka producer, uri={}, name={}, elapsed={}", uri, name, watch.elapsed()); } } }
Example #6
Source File: InfluxDBKafkaSender.java From dropwizard-metrics-influxdb with Apache License 2.0 | 6 votes |
public InfluxDBKafkaSender(String database, TimeUnit timePrecision, String measurementPrefix) { super(database, timePrecision, measurementPrefix); int idx = database.indexOf("@"); String hosts; if (idx != -1) { topic = database.substring(0, idx); hosts = database.substring(idx + 1); } else { throw new IllegalArgumentException("invalid database format: " + database +", expected: topic@host1,host2..."); } Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, hosts); props.put(ProducerConfig.CLIENT_ID_CONFIG, KAFKA_CLIENT_ID); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProducer = new KafkaProducer<>(props); }
Example #7
Source File: PublishKafkaRecord_1_0.java From nifi with Apache License 2.0 | 6 votes |
protected PublisherPool createPublisherPool(final ProcessContext context) { final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue(); final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue(); final String attributeNameRegex = context.getProperty(ATTRIBUTE_NAME_REGEX).getValue(); final Pattern attributeNamePattern = attributeNameRegex == null ? null : Pattern.compile(attributeNameRegex); final boolean useTransactions = context.getProperty(USE_TRANSACTIONS).asBoolean(); final String transactionalIdPrefix = context.getProperty(TRANSACTIONAL_ID_PREFIX).evaluateAttributeExpressions().getValue(); Supplier<String> transactionalIdSupplier = KafkaProcessorUtils.getTransactionalIdSupplier(transactionalIdPrefix); final String charsetName = context.getProperty(MESSAGE_HEADER_ENCODING).evaluateAttributeExpressions().getValue(); final Charset charset = Charset.forName(charsetName); final Map<String, Object> kafkaProperties = new HashMap<>(); KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties); kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize)); return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis, useTransactions, transactionalIdSupplier, attributeNamePattern, charset); }
Example #8
Source File: KafkaProducerPusher.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public KafkaProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) { this.closer = Closer.create(); this.topic = topic; Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.RETRIES_CONFIG, 3); // add the kafka scoped config. if any of the above are specified then they are overridden if (kafkaConfig.isPresent()) { props.putAll(ConfigUtils.configToProperties(kafkaConfig.get())); this.numFuturesToBuffer = ConfigUtils.getLong(kafkaConfig.get(), MAX_NUM_FUTURES_TO_BUFFER_KEY, DEFAULT_MAX_NUM_FUTURES_TO_BUFFER); } this.producer = createProducer(props); }
Example #9
Source File: AdvancedKafkaAppender.java From summerframework with Apache License 2.0 | 6 votes |
@Override public void start() { Objects.requireNonNull(topic, "topic must not be null"); Objects.requireNonNull(bootstrapServers, "bootstrapServers must not be null"); Objects.requireNonNull(layout, "layout must not be null"); config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); config.put(ProducerConfig.ACKS_CONFIG, "all"); config.put(ProducerConfig.RETRIES_CONFIG, 0); config.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384); config.put(ProducerConfig.LINGER_MS_CONFIG, 1); config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432); config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); try { if (producer == null) producer = producerFactory.newKafkaProducer(config); super.start(); } catch (Exception e) { if (failOnStartup) { addError("Unable to start Kafka Producer", e); } else { addWarn("Unable to start Kafka Producer", e); } } }
Example #10
Source File: TestPublisherPool.java From localization_nifi with Apache License 2.0 | 6 votes |
@Test public void testPoisonedLeaseNotReturnedToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.poison(); lease.close(); assertEquals(0, pool.available()); }
Example #11
Source File: KafkaProducerPusher.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public KafkaProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) { this.closer = Closer.create(); this.topic = topic; Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.RETRIES_CONFIG, 3); // add the kafka scoped config. if any of the above are specified then they are overridden if (kafkaConfig.isPresent()) { props.putAll(ConfigUtils.configToProperties(kafkaConfig.get())); } this.producer = createProducer(props); }
Example #12
Source File: TestPublisherPool.java From localization_nifi with Apache License 2.0 | 6 votes |
@Test public void testPoisonedLeaseNotReturnedToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.poison(); lease.close(); assertEquals(0, pool.available()); }
Example #13
Source File: PublishKafka_1_0.java From nifi with Apache License 2.0 | 6 votes |
protected PublisherPool createPublisherPool(final ProcessContext context) { final int maxMessageSize = context.getProperty(MAX_REQUEST_SIZE).asDataSize(DataUnit.B).intValue(); final long maxAckWaitMillis = context.getProperty(ACK_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS).longValue(); final String attributeNameRegex = context.getProperty(ATTRIBUTE_NAME_REGEX).getValue(); final Pattern attributeNamePattern = attributeNameRegex == null ? null : Pattern.compile(attributeNameRegex); final boolean useTransactions = context.getProperty(USE_TRANSACTIONS).asBoolean(); final String transactionalIdPrefix = context.getProperty(TRANSACTIONAL_ID_PREFIX).evaluateAttributeExpressions().getValue(); Supplier<String> transactionalIdSupplier = KafkaProcessorUtils.getTransactionalIdSupplier(transactionalIdPrefix); final String charsetName = context.getProperty(MESSAGE_HEADER_ENCODING).evaluateAttributeExpressions().getValue(); final Charset charset = Charset.forName(charsetName); final Map<String, Object> kafkaProperties = new HashMap<>(); KafkaProcessorUtils.buildCommonKafkaProperties(context, ProducerConfig.class, kafkaProperties); kafkaProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); kafkaProperties.put("max.request.size", String.valueOf(maxMessageSize)); return new PublisherPool(kafkaProperties, getLogger(), maxMessageSize, maxAckWaitMillis, useTransactions, transactionalIdSupplier, attributeNamePattern, charset); }
Example #14
Source File: KafkaProducer09IT.java From datacollector with Apache License 2.0 | 6 votes |
@Test public void testKafkaProducer09Write() throws IOException, StageException { final String message = "Hello StreamSets"; HashMap<String, Object> kafkaProducerConfigs = new HashMap<>(); kafkaProducerConfigs.put("retries", 0); kafkaProducerConfigs.put("batch.size", 100); kafkaProducerConfigs.put("linger.ms", 0); kafkaProducerConfigs.put(KafkaConstants.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); kafkaProducerConfigs.put(KafkaConstants.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); String topic = getNextTopic(); SdcKafkaProducer sdcKafkaProducer = createSdcKafkaProducer(port, kafkaProducerConfigs); sdcKafkaProducer.init(); sdcKafkaProducer.enqueueMessage(topic, message.getBytes(), "0"); sdcKafkaProducer.write(null); verify(topic, 1, "localhost:" + port, message); }
Example #15
Source File: TestPublisherPool.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testLeaseCloseReturnsToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.close(); assertEquals(1, pool.available()); }
Example #16
Source File: KafkaManager.java From SkyEye with GNU General Public License v3.0 | 6 votes |
public KafkaManager(final LoggerContext loggerContext, final String name, final String topic, final String zkServers, final String mail, final String rpc, final String app, final String host, final Property[] properties) { super(loggerContext, name); this.topic = topic; this.zkServers = zkServers; this.mail = mail; this.rpc = rpc; this.app = app; this.orginApp = app; this.host = host; this.checkAndSetConfig(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); this.checkAndSetConfig(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); // 设置分区类, 使用自定义的KeyModPartitioner,同样的key进入相同的partition this.checkAndSetConfig(ProducerConfig.PARTITIONER_CLASS_CONFIG, KeyModPartitioner.class.getName()); // xml配置里面的参数 for (final Property property : properties) { this.config.put(property.getName(), property.getValue()); } // 由于容器部署需要从外部获取host this.config.put(ProducerConfig.CLIENT_ID_CONFIG, this.app + Constants.MIDDLE_LINE + this.host + Constants.MIDDLE_LINE + "log4j2"); }
Example #17
Source File: FeatureSetSpecReadAndWriteTest.java From feast with Apache License 2.0 | 6 votes |
private void publishSpecToKafka( String project, String name, int version, SourceProto.Source source) { FeatureSetProto.FeatureSetSpec spec = FeatureSetProto.FeatureSetSpec.newBuilder() .setProject(project) .setName(name) .setVersion(version) .setSource(source) .build(); TestUtil.publishToKafka( KAFKA_BOOTSTRAP_SERVERS, KAFKA_SPECS_TOPIC, ImmutableList.of(Pair.of(getFeatureSetStringRef(spec), spec)), ByteArraySerializer.class, KAFKA_PUBLISH_TIMEOUT_SEC); }
Example #18
Source File: KafkaSinkTask.java From common-kafka with Apache License 2.0 | 6 votes |
@Override public void start(Map<String, String> taskConfig) { Properties properties = new Properties(); // Ensures all data is written successfully and received by all in-sync replicas. This gives us strong consistency properties.setProperty(ProducerConfig.ACKS_CONFIG, "all"); // Ensures messages are effectively written in order and we maintain strong consistency properties.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1"); // Tell producer to effectively try forever to avoid connect task stopping due to transient issues properties.setProperty(ProducerConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE)); // We avoid any serialization by just leaving it in the format we read it as properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); // Apply all connector configuration (includes bootstrap config and any other overrides) properties.putAll(taskConfig); kafkaProducer = buildProducer(properties); producer = new KafkaProducerWrapper<>(kafkaProducer); }
Example #19
Source File: TestStreamProcessor.java From samza with Apache License 2.0 | 6 votes |
private void initProducer(String bootstrapServer) { producer = TestUtils.createProducer( bootstrapServer, 1, 60 * 1000L, 1024L * 1024L, 0, 0L, 5 * 1000L, SecurityProtocol.PLAINTEXT, null, Option$.MODULE$.<Properties>apply(new Properties()), new StringSerializer(), new ByteArraySerializer(), Option$.MODULE$.<Properties>apply(new Properties())); }
Example #20
Source File: KafkaPublisherTest.java From extension-kafka with Apache License 2.0 | 6 votes |
@Test public void testPublishMessagesWithAckModeNoUnitOfWorkShouldBePublishedAndReadSuccessfully() { String testTopic = "testPublishMessagesWithAckModeNoUnitOfWorkShouldBePublishedAndReadSuccessfully"; testProducerFactory = ackProducerFactory(kafkaBroker, ByteArraySerializer.class); testConsumer = buildConsumer(testTopic); testSubject = buildPublisher(testTopic); List<GenericDomainEventMessage<String>> testMessages = domainMessages("1234", 10); eventBus.publish(testMessages); assertThat(KafkaTestUtils.getRecords(testConsumer).count()).isEqualTo(testMessages.size()); assertThat(testMessages).isEqualTo(monitor.getReceived()); assertThat(monitor.failureCount()).isZero(); assertThat(monitor.ignoreCount()).isZero(); assertThat(monitor.successCount()).isEqualTo(testMessages.size()); }
Example #21
Source File: KafkaPublisherTest.java From extension-kafka with Apache License 2.0 | 6 votes |
@Test public void testPublishMessagesWithTransactionalModeNoUnitOfWorkShouldBePublishedAndReadSuccessfully() { assumeFalse( "Transactional producers not supported on Windows", System.getProperty("os.name").contains("Windows") ); String testTopic = "testPublishMessagesWithTransactionalModeNoUnitOfWorkShouldBePublishedAndReadSuccessfully"; testProducerFactory = transactionalProducerFactory(kafkaBroker, "foo", ByteArraySerializer.class); testConsumer = buildConsumer(testTopic); testSubject = buildPublisher(testTopic); List<GenericDomainEventMessage<String>> testMessages = domainMessages("62457", 5); eventBus.publish(testMessages); assertThat(monitor.successCount()).isEqualTo(testMessages.size()); assertThat(monitor.failureCount()).isZero(); assertThat(monitor.ignoreCount()).isZero(); assertThat(KafkaTestUtils.getRecords(testConsumer).count()).isEqualTo(testMessages.size()); }
Example #22
Source File: TestPublisherPool.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testLeaseCloseReturnsToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L, false, null, null, StandardCharsets.UTF_8); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.close(); assertEquals(1, pool.available()); }
Example #23
Source File: KafkaKeyValueProducerPusher.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public KafkaKeyValueProducerPusher(String brokers, String topic, Optional<Config> kafkaConfig) { this.closer = Closer.create(); this.topic = topic; Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.RETRIES_CONFIG, 3); //To guarantee ordered delivery, the maximum in flight requests must be set to 1. props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1); props.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, true); // add the kafka scoped config. if any of the above are specified then they are overridden if (kafkaConfig.isPresent()) { props.putAll(ConfigUtils.configToProperties(kafkaConfig.get())); this.numFuturesToBuffer = ConfigUtils.getLong(kafkaConfig.get(), MAX_NUM_FUTURES_TO_BUFFER_KEY, DEFAULT_MAX_NUM_FUTURES_TO_BUFFER); } this.producer = createProducer(props); }
Example #24
Source File: KafkaIntegrationTest.java From extension-kafka with Apache License 2.0 | 6 votes |
@BeforeEach void setUp() { producerFactory = ProducerConfigUtil.ackProducerFactory(kafkaBroker, ByteArraySerializer.class); publisher = KafkaPublisher.<String, byte[]>builder() .producerFactory(producerFactory) .topic("integration") .build(); KafkaEventPublisher<String, byte[]> sender = KafkaEventPublisher.<String, byte[]>builder().kafkaPublisher(publisher).build(); configurer.eventProcessing( eventProcessingConfigurer -> eventProcessingConfigurer.registerEventHandler(c -> sender) ); consumerFactory = new DefaultConsumerFactory<>(minimal(kafkaBroker, ByteArrayDeserializer.class)); fetcher = AsyncFetcher.<String, byte[], KafkaEventMessage>builder() .pollTimeout(300) .build(); eventBus = SimpleEventBus.builder().build(); configurer.configureEventBus(configuration -> eventBus); configurer.start(); }
Example #25
Source File: TestPublisherPool.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testPoisonedLeaseNotReturnedToPool() { final Map<String, Object> kafkaProperties = new HashMap<>(); kafkaProperties.put("bootstrap.servers", "localhost:1111"); kafkaProperties.put("key.serializer", ByteArraySerializer.class.getName()); kafkaProperties.put("value.serializer", ByteArraySerializer.class.getName()); final PublisherPool pool = new PublisherPool(kafkaProperties, Mockito.mock(ComponentLog.class), 1024 * 1024, 1000L, false, null, null, StandardCharsets.UTF_8); assertEquals(0, pool.available()); final PublisherLease lease = pool.obtainPublisher(); assertEquals(0, pool.available()); lease.poison(); lease.close(); assertEquals(0, pool.available()); }
Example #26
Source File: KafkaDispatcher.java From haystack-agent with Apache License 2.0 | 6 votes |
@Override public void initialize(final Config config) { final String agentName = config.hasPath("agentName") ? config.getString("agentName") : ""; Validate.notNull(config.getString(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); // remove the producer topic from the configuration and use it during send() call topic = config.getString(PRODUCER_TOPIC); producer = new KafkaProducer<>(ConfigurationHelpers.generatePropertiesFromMap(ConfigurationHelpers.convertToPropertyMap(config)), new ByteArraySerializer(), new ByteArraySerializer()); dispatchTimer = newTimer(buildMetricName(agentName, "kafka.dispatch.timer")); dispatchFailure = newMeter(buildMetricName(agentName, "kafka.dispatch.failure")); LOGGER.info("Successfully initialized the kafka dispatcher with config={}", config); }
Example #27
Source File: ITKafkaSender.java From zipkin-reporter-java with Apache License 2.0 | 6 votes |
@Test public void checkFilterPropertiesProducerToAdminClient() { Properties producerProperties = new Properties(); producerProperties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "100"); producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); producerProperties.put(ProducerConfig.BATCH_SIZE_CONFIG, "0"); producerProperties.put(ProducerConfig.ACKS_CONFIG, "0"); producerProperties.put(ProducerConfig.LINGER_MS_CONFIG, "500"); producerProperties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432"); producerProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092"); producerProperties.put(ProducerConfig.SECURITY_PROVIDERS_CONFIG, "sun.security.provider.Sun"); Map<String, Object> filteredProperties = sender.filterPropertiesForAdminClient(producerProperties); assertThat(filteredProperties.size()).isEqualTo(2); assertThat(filteredProperties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)).isNotNull(); assertThat(filteredProperties.get(ProducerConfig.SECURITY_PROVIDERS_CONFIG)).isNotNull(); }
Example #28
Source File: KafkaStandaloneGenerator.java From flink with Apache License 2.0 | 5 votes |
KafkaCollector(String brokerAddress, String topic, int partition) { this.topic = checkNotNull(topic); this.partition = partition; this.serializer = new EventDeSerializer(); // create Kafka producer Properties properties = new Properties(); properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerAddress); properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getCanonicalName()); this.producer = new KafkaProducer<>(properties); }
Example #29
Source File: FakeStandardProducerConfig.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static Properties get() { Properties p = new Properties(); p.setProperty("bootstrap.servers", "localhost:12345"); p.setProperty("key.serializer", ByteArraySerializer.class.getName()); p.setProperty("value.serializer", ByteArraySerializer.class.getName()); return p; }
Example #30
Source File: KafkaProducer09IT.java From datacollector with Apache License 2.0 | 5 votes |
@Test public void testKafkaProducer09WriteException() throws IOException, StageException { final String message = "Hello StreamSets"; HashMap<String, Object> kafkaProducerConfigs = new HashMap<>(); kafkaProducerConfigs.put("retries", 0); kafkaProducerConfigs.put("batch.size", 100); kafkaProducerConfigs.put("linger.ms", 0); kafkaProducerConfigs.put(KafkaConstants.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); kafkaProducerConfigs.put(KafkaConstants.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); SdcKafkaProducer sdcKafkaProducer = createSdcKafkaProducer(port, kafkaProducerConfigs); sdcKafkaProducer.init(); String topic = getNextTopic(); sdcKafkaProducer.enqueueMessage(topic, message.getBytes(), "0"); sdcKafkaProducer.write(null); kafkaServer.shutdown(); // attempt writing when kafka server is down sdcKafkaProducer.enqueueMessage(topic, "Hello".getBytes(), "0"); try { sdcKafkaProducer.write(null); Assert.fail("Expected KafkaConnectionException"); } catch (StageException e) { Assert.assertEquals(KafkaErrors.KAFKA_50, e.getErrorCode()); } kafkaServer = TestUtil09.createKafkaServer(port, zkConnect); }