org.apache.kafka.common.serialization.Serializer Java Examples

The following examples show how to use org.apache.kafka.common.serialization.Serializer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GraphUtils.java    From kafka-graphs with Apache License 2.0 6 votes vote down vote up
public static <K, V> void edgesToTopic(
    InputStream inputStream,
    Parser<EdgeWithValue<K, V>> edgeParser,
    Serializer<V> valueSerializer,
    Properties props,
    String topic,
    int numPartitions,
    short replicationFactor
) throws IOException {
    ClientUtils.createTopic(topic, numPartitions, replicationFactor, props);
    try (BufferedReader reader =
             new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
         Producer<Edge<K>, V> producer = new KafkaProducer<>(props, new KryoSerializer<>(), valueSerializer)) {
        String line;
        while ((line = reader.readLine()) != null) {
            EdgeWithValue<K, V> edge = edgeParser.parse(line);
            log.trace("read edge: ({}, {})", edge.source(), edge.target());
            ProducerRecord<Edge<K>, V> producerRecord =
                new ProducerRecord<>(topic, new Edge<>(edge.source(), edge.target()), edge.value());
            producer.send(producerRecord);
        }
        producer.flush();
    }
}
 
Example #2
Source File: KafkaRyaStreamsClientFactory.java    From rya with Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link Producer} that is able to write to a topic in Kafka.
 *
 * @param kafkaHostname - The Kafka broker hostname. (not null)
 * @param kafkaPort - The Kafka broker port.
 * @param keySerializerClass - Serializes the keys. (not null)
 * @param valueSerializerClass - Serializes the values. (not null)
 * @return A {@link Producer} that can be used to write records to a topic.
 */
private static <K, V> Producer<K, V> makeProducer(
        final String kafkaHostname,
        final int kakfaPort,
        final Class<? extends Serializer<K>> keySerializerClass,
        final Class<? extends Serializer<V>> valueSerializerClass) {
    requireNonNull(kafkaHostname);
    requireNonNull(keySerializerClass);
    requireNonNull(valueSerializerClass);

    final Properties producerProps = new Properties();
    producerProps.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaHostname + ":" + kakfaPort);
    producerProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass.getName());
    producerProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass.getName());
    return new KafkaProducer<>(producerProps);
}
 
Example #3
Source File: GraphUtils.java    From kafka-graphs with Apache License 2.0 6 votes vote down vote up
public static <K, V> void verticesToTopic(
    InputStream inputStream,
    Parser<VertexWithValue<K, V>> vertexParser,
    Serializer<K> keySerializer,
    Serializer<V> valueSerializer,
    Properties props,
    String topic,
    int numPartitions,
    short replicationFactor
) throws IOException {
    ClientUtils.createTopic(topic, numPartitions, replicationFactor, props);
    try (BufferedReader reader =
             new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
         Producer<K, V> producer = new KafkaProducer<>(props, keySerializer, valueSerializer)) {
        String line;
        while ((line = reader.readLine()) != null) {
            VertexWithValue<K, V> vertex = vertexParser.parse(line);
            log.trace("read vertex: {}", vertex.id());
            ProducerRecord<K, V> producerRecord =
                new ProducerRecord<>(topic, vertex.id(), vertex.value());
            producer.send(producerRecord);
        }
        producer.flush();
    }
}
 
Example #4
Source File: SerializerDeserializerTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testSerde() {
  Serializer<String> stringSerializer = new StringSerializer();
  Deserializer<String> stringDeserializer = new StringDeserializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  Deserializer<LargeMessageSegment> segmentDeserializer = new DefaultSegmentDeserializer();

  String s = LiKafkaClientsTestUtils.getRandomString(100);
  assertEquals(s.length(), 100);
  byte[] stringBytes = stringSerializer.serialize("topic", s);
  assertEquals(stringBytes.length, 100);
  LargeMessageSegment segment =
      new LargeMessageSegment(LiKafkaClientsUtils.randomUUID(), 0, 2, stringBytes.length, ByteBuffer.wrap(stringBytes));
  // String bytes + segment header
  byte[] serializedSegment = segmentSerializer.serialize("topic", segment);
  assertEquals(serializedSegment.length, 1 + stringBytes.length + LargeMessageSegment.SEGMENT_INFO_OVERHEAD + 4);

  LargeMessageSegment deserializedSegment = segmentDeserializer.deserialize("topic", serializedSegment);
  assertEquals(deserializedSegment.messageId, segment.messageId);
  assertEquals(deserializedSegment.messageSizeInBytes, segment.messageSizeInBytes);
  assertEquals(deserializedSegment.numberOfSegments, segment.numberOfSegments);
  assertEquals(deserializedSegment.sequenceNumber, segment.sequenceNumber);
  assertEquals(deserializedSegment.payload.limit(), 100);
  String deserializedString = stringDeserializer.deserialize("topic", deserializedSegment.payloadArray());
  assertEquals(deserializedString.length(), s.length());
}
 
Example #5
Source File: KafkaProducerInterceptorWrapper.java    From pulsar with Apache License 2.0 6 votes vote down vote up
static Deserializer getDeserializer(Serializer serializer) {
    if (serializer instanceof StringSerializer) {
        return new StringDeserializer();
    } else if (serializer instanceof LongSerializer) {
        return new LongDeserializer();
    } else if (serializer instanceof IntegerSerializer) {
        return new IntegerDeserializer();
    } else if (serializer instanceof DoubleSerializer) {
        return new DoubleDeserializer();
    } else if (serializer instanceof BytesSerializer) {
        return new BytesDeserializer();
    } else if (serializer instanceof ByteBufferSerializer) {
        return new ByteBufferDeserializer();
    } else if (serializer instanceof ByteArraySerializer) {
        return new ByteArrayDeserializer();
    } else {
        throw new IllegalArgumentException(serializer.getClass().getName() + " is not a valid or supported subclass of org.apache.kafka.common.serialization.Serializer.");
    }
}
 
Example #6
Source File: JsonPOJOSerde.java    From hello-kafka-streams with Apache License 2.0 6 votes vote down vote up
@Override
public Serializer<T> serializer() {
    return new Serializer<T>() {

        @Override
        public void configure(Map<String, ?> configs, boolean isKey) {

        }

        @Override
        public byte[] serialize(String topic, T data) {
            try {
                return mapper.writeValueAsBytes(data);
            } catch (Exception e) {
                throw new SerializationException("Error serializing JSON message", e);
            }
        }

        @Override
        public void close() {

        }
    };

}
 
Example #7
Source File: TestDelimitedSerializer.java    From envelope with Apache License 2.0 6 votes vote down vote up
@Test
public void testDelimitedSerialization() {
  List<StructField> fields = Lists.newArrayList(
      DataTypes.createStructField("field1", DataTypes.StringType, true),
      DataTypes.createStructField("field2", DataTypes.IntegerType, true),
      DataTypes.createStructField("field3", DataTypes.BooleanType, true)
  );
  Row row = new RowWithSchema(DataTypes.createStructType(fields), "hello", 1, false);
  
  Map<String, String> configs = Maps.newHashMap();
  configs.put(DelimitedSerializer.FIELD_DELIMITER_CONFIG_NAME, "||");
  Serializer<Row> serializer = new DelimitedSerializer();
  serializer.configure(configs, false);
  
  byte[] serialized = serializer.serialize("test", row);
  serializer.close();
  
  assertEquals(new String(serialized), "hello||1||false");
}
 
Example #8
Source File: MockInMemorySerde.java    From simplesource with Apache License 2.0 6 votes vote down vote up
@Override
public Serializer<T> serializer() {
    return new Serializer<T>() {
        @Override
        public void configure(Map<String, ?> configs, boolean isKey) {}

        @Override
        public byte[] serialize(String topic, T data) {
            serialisedObjectCache.putIfAbsent(Tuple2.of(topic, data.hashCode()), data);
            return String.valueOf(data.hashCode()).getBytes(Charset.defaultCharset());
        }

        @Override
        public void close() { }
    };
}
 
Example #9
Source File: CodecsTest.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
private <T> void testSerializer(Class<T> type, T val) {
  final Serde<T> serde = VertxSerdes.serdeFrom(type);
  final Deserializer<T> deserializer = serde.deserializer();
  final Serializer<T> serializer = serde.serializer();

  assertEquals("Should get the original value after serialization and deserialization",
    val, deserializer.deserialize(topic, serializer.serialize(topic, val)));

  assertEquals("Should support null in serialization and deserialization",
    null, deserializer.deserialize(topic, serializer.serialize(topic, null)));
}
 
Example #10
Source File: KafkaStreamsInteractiveQuerySample.java    From spring-cloud-stream-samples with Apache License 2.0 5 votes vote down vote up
@Override
public Serializer<TopFiveSongs> serializer() {

	return new Serializer<TopFiveSongs>() {
		@Override
		public void configure(final Map<String, ?> map, final boolean b) {
		}

		@Override
		public byte[] serialize(final String s, final TopFiveSongs topFiveSongs) {

			final ByteArrayOutputStream out = new ByteArrayOutputStream();
			final DataOutputStream
					dataOutputStream =
					new DataOutputStream(out);
			try {
				for (SongPlayCount songPlayCount : topFiveSongs) {
					dataOutputStream.writeLong(songPlayCount.getSongId());
					dataOutputStream.writeLong(songPlayCount.getPlays());
				}
				dataOutputStream.flush();
			} catch (IOException e) {
				throw new RuntimeException(e);
			}
			return out.toByteArray();
		}
	};
}
 
Example #11
Source File: DelimitedProducer.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
@Override
protected Serializer<GenericRow> getSerializer(
    Schema avroSchema,
    org.apache.kafka.connect.data.Schema kafkaSchema,
    String topicName
) {
  return new KsqlDelimitedSerializer(kafkaSchema);
}
 
Example #12
Source File: JsonProducer.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
@Override
protected Serializer<GenericRow> getSerializer(
    Schema avroSchema,
    org.apache.kafka.connect.data.Schema kafkaSchema,
    String topicName
) {
  return new KsqlJsonSerializer(kafkaSchema);
}
 
Example #13
Source File: EphemeralKafkaBroker.java    From kafka-junit with Apache License 2.0 5 votes vote down vote up
/**
 * Create a producer that can write to this broker
 *
 * @param keySerializer   Key serializer class
 * @param valueSerializer Valuer serializer class
 * @param overrideConfig  Producer config to override. Pass null if there aren't any.
 * @param <K>             Type of Key
 * @param <V>             Type of Value
 * @return KafkaProducer
 */
public <K, V> KafkaProducer<K, V> createProducer(Serializer<K> keySerializer, Serializer<V> valueSerializer,
                                                 Properties overrideConfig) {
    Properties conf = producerConfig();
    if (overrideConfig != null) {
        conf.putAll(overrideConfig);
    }
    keySerializer.configure(Maps.fromProperties(conf), true);
    valueSerializer.configure(Maps.fromProperties(conf), false);
    return new KafkaProducer<>(conf, keySerializer, valueSerializer);
}
 
Example #14
Source File: CryptoSerializerPairFactory.java    From kafka-encryption with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public <K, V> SerializerPair<K, V> build(Serializer<K> keySerializer, Serializer<V> valueSerializer) {
    Serializer<K> newKeySerializer = new CryptoAwareSerializerWrapper<K>(keySerializer, keyReferenceExtractor, null);
    Serializer<V> newvalueSerializer = new CryptoSerializer<>(valueSerializer, encryptor, null);
    return new SerializerPair<>(newKeySerializer, newvalueSerializer);
}
 
Example #15
Source File: KafkaDeserializerExtractorTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfluentAvroDeserializer() throws IOException, RestClientException {
  WorkUnitState mockWorkUnitState = getMockWorkUnitState(0L,10L);

  mockWorkUnitState.setProp("schema.registry.url", TEST_URL);

  Schema schema = SchemaBuilder.record(TEST_RECORD_NAME)
      .namespace(TEST_NAMESPACE).fields()
      .name(TEST_FIELD_NAME).type().stringType().noDefault()
      .endRecord();

  GenericRecord testGenericRecord = new GenericRecordBuilder(schema).set(TEST_FIELD_NAME, "testValue").build();

  SchemaRegistryClient mockSchemaRegistryClient = mock(SchemaRegistryClient.class);
  when(mockSchemaRegistryClient.getByID(any(Integer.class))).thenReturn(schema);

  Serializer<Object> kafkaEncoder = new KafkaAvroSerializer(mockSchemaRegistryClient);
  Deserializer<Object> kafkaDecoder = new KafkaAvroDeserializer(mockSchemaRegistryClient);

  ByteBuffer testGenericRecordByteBuffer =
      ByteBuffer.wrap(kafkaEncoder.serialize(TEST_TOPIC_NAME, testGenericRecord));

  KafkaSchemaRegistry<Integer, Schema> mockKafkaSchemaRegistry = mock(KafkaSchemaRegistry.class);
  KafkaDeserializerExtractor kafkaDecoderExtractor =
      new KafkaDeserializerExtractor(mockWorkUnitState,
          Optional.fromNullable(Deserializers.CONFLUENT_AVRO), kafkaDecoder, mockKafkaSchemaRegistry);

  ByteArrayBasedKafkaRecord mockMessageAndOffset = getMockMessageAndOffset(testGenericRecordByteBuffer);

  Assert.assertEquals(kafkaDecoderExtractor.decodeRecord(mockMessageAndOffset), testGenericRecord);
}
 
Example #16
Source File: KafkaUsage.java    From smallrye-reactive-messaging with Apache License 2.0 5 votes vote down vote up
public void produceStrings(int messageCount, Runnable completionCallback,
        Supplier<ProducerRecord<String, String>> messageSupplier) {
    Serializer<String> keySer = new StringSerializer();
    Serializer<String> valSer = new StringSerializer();
    String randomId = UUID.randomUUID().toString();
    this.produce(randomId, messageCount, keySer, valSer, completionCallback, messageSupplier);
}
 
Example #17
Source File: KafkaAvroSerdesTest.java    From registry with Apache License 2.0 5 votes vote down vote up
private void testSchemaHeaderNames(String customKeySchemaHeaderName,
                                   String customValueSchemaHeaderName) {
    TestRecord record = new TestRecord();
    record.setField1("Hello");
    record.setField2("World");

    Map<String, Object> configs = new HashMap<>();
    configs.put(KafkaAvroSerde.KEY_SCHEMA_VERSION_ID_HEADER_NAME, customKeySchemaHeaderName);
    configs.put(KafkaAvroSerde.VALUE_SCHEMA_VERSION_ID_HEADER_NAME, customValueSchemaHeaderName);
    configs.put(KafkaAvroSerializer.STORE_SCHEMA_VERSION_ID_IN_HEADER, "true");
    configs.put(AbstractAvroSnapshotDeserializer.SPECIFIC_AVRO_READER, true);

    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    AvroSerDesHandler handler = new DefaultAvroSerDesHandler();
    handler.handlePayloadSerialization(outputStream, record);

    for (Boolean isKey : Arrays.asList(true, false)) {
        KafkaAvroSerde serde = new KafkaAvroSerde(schemaRegistryClient);
        final Serializer<Object> serializer = serde.serializer();
        serializer.configure(configs, isKey);

        Headers headers = new RecordHeaders();
        final byte[] bytes = serializer.serialize(topic, headers, record);
        Assert.assertArrayEquals(outputStream.toByteArray(), bytes);
        Assert.assertEquals(isKey, headers.lastHeader(customKeySchemaHeaderName) != null);
        Assert.assertEquals(!isKey, headers.lastHeader(customValueSchemaHeaderName) != null);

        final Deserializer<Object> deserializer = serde.deserializer();
        deserializer.configure(configs, isKey);
        final TestRecord actual = (TestRecord) deserializer.deserialize(topic, headers, bytes);
        Assert.assertEquals(record, actual);
    }
}
 
Example #18
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test(expectedExceptions = OffsetNotTrackedException.class)
public void testStartingOffsetWithNormalMessages() throws IOException {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();

  // Let consumer record 0 be a normal record.
  byte[] message0Bytes = stringSerializer.serialize("topic", "message0");
  byte[] message0WrappedBytes = wrapMessageBytes(segmentSerializer, message0Bytes);
  ConsumerRecord<byte[], byte[]> consumerRecord0 =
      new ConsumerRecord<>("topic", 0, 100L, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), message0WrappedBytes);

  // Construct the consumer records.
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>();
  recordList.add(consumerRecord0);
  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);

  consumerRecordsProcessor.process(records).consumerRecords();

  TopicPartition tp = new TopicPartition("topic", 0);
  assertEquals(consumerRecordsProcessor.startingOffset(tp, 100L), 100, "Should return 100 because there are no " +
      "large messages in the partition.");

  // Should throw exception when an offset cannot be found by the offset tracker.
  consumerRecordsProcessor.startingOffset(tp, 0L);
}
 
Example #19
Source File: MessageSplitterImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
public MessageSplitterImpl(int maxSegmentSize,
                           Serializer<LargeMessageSegment> segmentSerializer,
                           UUIDFactory uuidFactory) {
  _maxSegmentSize = maxSegmentSize;
  _segmentSerializer = segmentSerializer;
  _uuidFactory = uuidFactory;
}
 
Example #20
Source File: WikipediaStreamDemo.java    From hello-kafka-streams with Apache License 2.0 5 votes vote down vote up
private static KafkaStreams createWikipediaStreamsInstance(String bootstrapServers) {
    final Serializer<JsonNode> jsonSerializer = new JsonSerializer();
    final Deserializer<JsonNode> jsonDeserializer = new JsonDeserializer();
    final Serde<JsonNode> jsonSerde = Serdes.serdeFrom(jsonSerializer, jsonDeserializer);

    KStreamBuilder builder = new KStreamBuilder();
    Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wikipedia-streams");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);


    KStream<JsonNode, JsonNode> wikipediaRaw = builder.stream(jsonSerde, jsonSerde, "wikipedia-raw");

    KStream<String, WikipediaMessage> wikipediaParsed =
            wikipediaRaw.map(WikipediaMessage::parceIRC)
                    .filter(WikipediaMessage::filterNonNull)
                    .through(Serdes.String(), new JsonPOJOSerde<>(WikipediaMessage.class), "wikipedia-parsed");

    KTable<String, Long> totalEditsByUser = wikipediaParsed
            .filter((key, value) -> value.type == WikipediaMessage.Type.EDIT)
            .countByKey(Serdes.String(), "wikipedia-edits-by-user");

    //some print
    totalEditsByUser.toStream().process(() -> new AbstractProcessor<String, Long>() {
        @Override
        public void process(String user, Long numEdits) {
            System.out.println("USER: " + user + " num.edits: " + numEdits);
        }
    });

    return new KafkaStreams(builder, props);

}
 
Example #21
Source File: ClientBuilder.java    From devicehive-java-server with Apache License 2.0 4 votes vote down vote up
public ClientBuilder withProducerValueSerializer(Serializer<Request> serializer) {
    this.producerValueSerializer = serializer;
    return this;
}
 
Example #22
Source File: ConsumerRecordsProcessorTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testEviction() {
  Serializer<String> stringSerializer = new StringSerializer();
  Serializer<LargeMessageSegment> segmentSerializer = new DefaultSegmentSerializer();
  // Create two large messages.
  MessageSplitter splitter = new MessageSplitterImpl(500, segmentSerializer, new UUIDFactory.DefaultUUIDFactory<>());

  ConsumerRecordsProcessor<String, String> consumerRecordsProcessor = createConsumerRecordsProcessor();
  consumerRecordsProcessor.process(getConsumerRecords()).consumerRecords();
  // The offset tracker now has 2, 4, 5 in it.
  TopicPartition tp = new TopicPartition("topic", 0);

  UUID largeMessageId = LiKafkaClientsUtils.randomUUID();
  byte[] largeMessage1Bytes = stringSerializer.serialize("topic", LiKafkaClientsTestUtils.getRandomString(600));
  List<ProducerRecord<byte[], byte[]>> splitLargeMessage =
      splitter.split("topic", largeMessageId, largeMessage1Bytes);

  // Test evict
  List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<ConsumerRecord<byte[], byte[]>>();
  // Let consumer record 6 be a large message segment.
  ConsumerRecord<byte[], byte[]> consumerRecord6 =
      new ConsumerRecord<>("topic", 0, 6, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(0).value());
  // Let consumer record 7 be a normal record.
  ConsumerRecord<byte[], byte[]> consumerRecord7 =
      new ConsumerRecord<>("topic", 0, 7, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(),
                           stringSerializer.serialize("topic", "message7"));
  // Let consumer record 8 completes consumer record 6
  ConsumerRecord<byte[], byte[]> consumerRecord8 =
      new ConsumerRecord<>("topic", 0, 8, 0L, TimestampType.CREATE_TIME, 0, 0, 0, "key".getBytes(), splitLargeMessage.get(1).value());

  recordList.add(consumerRecord6);
  recordList.add(consumerRecord7);
  recordList.add(consumerRecord8);

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsMap = new HashMap<>();
  recordsMap.put(new TopicPartition("topic", 0), recordList);
  ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(recordsMap);
  consumerRecordsProcessor.process(records).consumerRecords();
  // Now the offset tracker should have 4, 5, 6, 8 in side it.
  assertEquals(consumerRecordsProcessor.safeOffset(tp, 7L).longValue(), 6, "safe offset should be 6");

  try {
    consumerRecordsProcessor.safeOffset(tp, 2L);
    fail("Should throw exception because offset for message 2 should have been evicted.");
  } catch (OffsetNotTrackedException onte) {
    assertTrue(onte.getMessage().startsWith("Offset 2 for partition"));
  }
}
 
Example #23
Source File: StringMessageProducerFactory.java    From alcor with Apache License 2.0 4 votes vote down vote up
@Override
public Serializer getSerializer() {
    return new StringSerializer();
}
 
Example #24
Source File: PregelComputation.java    From kafka-graphs with Apache License 2.0 4 votes vote down vote up
private static <K> int vertexToPartition(K vertex, Serializer<K> serializer, int numPartitions) {
    // TODO make configurable, currently this is tied to DefaultStreamPartitioner
    byte[] keyBytes = serializer.serialize(null, vertex);
    int partition = Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
    return partition;
}
 
Example #25
Source File: KsqlResourceTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 4 votes vote down vote up
private static <T> Serializer<T> getJsonSerializer(boolean isKey) {
  Serializer<T> result = new KafkaJsonSerializer<>();
  result.configure(Collections.emptyMap(), isKey);
  return result;
}
 
Example #26
Source File: KafkaProducerImpl.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
public static <K, V> KafkaProducer<K, V> createShared(Vertx vertx, String name, Properties config, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
  return createShared(vertx, name, () -> KafkaWriteStream.create(vertx, config, keySerializer, valueSerializer));
}
 
Example #27
Source File: KafkaIO.java    From beam with Apache License 2.0 4 votes vote down vote up
/**
 * Wrapper method over {@link WriteRecords#withValueSerializer(Class)}, used to keep the
 * compatibility with old API based on KV type of element.
 */
public Write<K, V> withValueSerializer(Class<? extends Serializer<V>> valueSerializer) {
  return withWriteRecordsTransform(
      getWriteRecordsTransform().withValueSerializer(valueSerializer));
}
 
Example #28
Source File: SerdesProvidedAsBeansTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 4 votes vote down vote up
@Override
public Serializer<T> serializer() {
	return null;
}
 
Example #29
Source File: KafkaWriteStreamImpl.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
public static <K, V> KafkaWriteStreamImpl<K, V> create(Vertx vertx, Properties config, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
  return new KafkaWriteStreamImpl<>(vertx.getOrCreateContext(), new org.apache.kafka.clients.producer.KafkaProducer<>(config, keySerializer, valueSerializer));
}
 
Example #30
Source File: KafkaSerializers.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public static <T> Serializer<T> nullableJsonSerializer() {
   return NullableJsonSerializer.INSTANCE;
}