Java Code Examples for com.fasterxml.jackson.databind.ObjectMapper#setInjectableValues()

The following examples show how to use com.fasterxml.jackson.databind.ObjectMapper#setInjectableValues() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ObjectMapperResolver.java    From clouditor with Apache License 2.0 6 votes vote down vote up
public static void configureObjectMapper(ObjectMapper mapper) {
  mapper.registerModule(new JavaTimeModule());
  mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
  mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
  mapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS);
  mapper.enable(SerializationFeature.INDENT_OUTPUT);
  mapper.setSerializationInclusion(Include.NON_NULL);
  mapper.setConfig(mapper.getSerializationConfig().withView(ApiOnly.class));

  // add all sub types of CloudAccount
  for (var type : REFLECTIONS_SUBTYPE_SCANNER.getSubTypesOf(CloudAccount.class)) {
    mapper.registerSubtypes(type);
  }

  // set injectable value to null
  var values = new InjectableValues.Std();
  values.addValue("hash", null);

  mapper.setInjectableValues(values);
}
 
Example 2
Source File: IdolConfiguration.java    From find with MIT License 6 votes vote down vote up
@SuppressWarnings("SpringJavaAutowiringInspection")
@Bean
@Autowired
@Primary
public ObjectMapper jacksonObjectMapper(
        final Jackson2ObjectMapperBuilder builder,
        final AuthenticationInformationRetriever<?, ?> authenticationInformationRetriever
) {
    final ObjectMapper mapper = builder
            .createXmlMapper(false)
            .mixIn(Authentication.class, IdolAuthenticationMixins.class)
            .mixIn(Widget.class, WidgetMixins.class)
            .mixIn(WidgetDatasource.class, WidgetDatasourceMixins.class)
            .mixIn(QueryRestrictions.class, IdolQueryRestrictionsMixin.class)
            .mixIn(IdolQueryRestrictions.class, IdolQueryRestrictionsMixin.class)
            .featuresToEnable(SerializationFeature.INDENT_OUTPUT)
            .build();

    mapper.setInjectableValues(new InjectableValues.Std().addValue(AuthenticationInformationRetriever.class, authenticationInformationRetriever));

    return mapper;
}
 
Example 3
Source File: GuiceBundle.java    From soabase with Apache License 2.0 6 votes vote down vote up
@Override
public void initialize(Bootstrap<?> bootstrap)
{
    final InjectableValues injectableValues = new InjectableValues()
    {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance)
        {
            return null;
        }
    };
    final ConfigurationFactoryFactory<? extends Configuration> configurationFactoryFactory = bootstrap.getConfigurationFactoryFactory();
    ConfigurationFactoryFactory factoryFactory = new ConfigurationFactoryFactory()
    {
        @Override
        public ConfigurationFactory create(Class klass, Validator validator, ObjectMapper objectMapper, String propertyPrefix)
        {
            objectMapper.setInjectableValues(injectableValues);
            //noinspection unchecked
            return configurationFactoryFactory.create(klass, validator, objectMapper, propertyPrefix);
        }
    };
    //noinspection unchecked
    bootstrap.setConfigurationFactoryFactory(factoryFactory);
}
 
Example 4
Source File: DAXParser5.java    From pegasus with Apache License 2.0 6 votes vote down vote up
/**
 * Start the parser. This starts the parsing of the file by the parser.
 *
 * @param file the path to the YAML file you want to parse.
 */
public void parse(String file) {
    Reader reader;
    try {
        reader = new VariableExpansionReader(new FileReader(file));
    } catch (IOException ioe) {
        throw new RuntimeException("Exception while reading file " + file);
    }
    ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
    mapper.configure(MapperFeature.ALLOW_COERCION_OF_SCALARS, false);
    mapper.setInjectableValues(injectCallback());
    try {
        mapper.readValue(reader, DAXParser5.class);
    } catch (IOException ex) {
        throw new RuntimeException("Exception while parsing yaml file " + file, ex);
    }
}
 
Example 5
Source File: HodConfiguration.java    From find with MIT License 5 votes vote down vote up
@SuppressWarnings("SpringJavaAutowiringInspection")
@Bean
@Primary
@Autowired
public ObjectMapper jacksonObjectMapper(final Jackson2ObjectMapperBuilder builder, final AuthenticationInformationRetriever<?, ?> authenticationInformationRetriever) {
    final ObjectMapper mapper = builder.createXmlMapper(false)
            .mixIn(Authentication.class, HodAuthenticationMixins.class)
            .mixIn(HodQueryRestrictions.class, HodQueryRestrictionsMixin.class)
            .build();

    mapper.setInjectableValues(new InjectableValues.Std().addValue(AuthenticationInformationRetriever.class, authenticationInformationRetriever));

    return mapper;
}
 
Example 6
Source File: DigdagClient.java    From digdag with Apache License 2.0 5 votes vote down vote up
public static ObjectMapper objectMapper()
{
    ObjectMapper mapper = new ObjectMapper();
    mapper.registerModule(new GuavaModule());
    mapper.registerModule(new JacksonTimeModule());
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);

    // InjectableValues makes @JacksonInject work which is used at io.digdag.client.config.Config.<init>
    InjectableValues.Std injects = new InjectableValues.Std();
    injects.addValue(ObjectMapper.class, mapper);
    mapper.setInjectableValues(injects);

    return mapper;
}
 
Example 7
Source File: IndexV1Updater.java    From fdroidclient with GNU General Public License v3.0 5 votes vote down vote up
/**
 * Get the standard {@link ObjectMapper} instance used for parsing {@code index-v1.json}.
 * This ignores unknown properties so that old releases won't crash when new things are
 * added to {@code index-v1.json}.  This is required for both forward compatibility,
 * but also because ignoring such properties when coming from a malicious server seems
 * reasonable anyway.
 */
public static ObjectMapper getObjectMapperInstance(long repoId) {
    ObjectMapper mapper = new ObjectMapper();
    mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
    mapper.setInjectableValues(new InjectableValues.Std().addValue("repoId", repoId));
    mapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.NONE);
    mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.PUBLIC_ONLY);
    return mapper;
}
 
Example 8
Source File: Meta.java    From pegasus with Apache License 2.0 5 votes vote down vote up
/**
 * Reads the on-disk map file into memory.
 *
 * @param filename is the name of the file to read.
 * @return true, if the in-memory data structures appear sound.
 */
public boolean connect(String filename) {
    // sanity check
    if (filename == null) {
        return false;
    }
    mFilename = filename;
    mLFN = new LinkedHashMap<String, ReplicaLocation>();

    File replicaFile = new File(filename);
    // first attempt to validate only if it exists
    if (replicaFile.exists()) {
        Reader reader = null;
        try {
            reader = new VariableExpansionReader(new FileReader(filename));
            ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
            mapper.configure(MapperFeature.ALLOW_COERCION_OF_SCALARS, false);
            // inject instance of this class to be used for deserialization
            mapper.setInjectableValues(injectCallback());
            mapper.readValue(reader, Meta.class);
        } catch (IOException ioe) {
            mLFN = null;
            mFilename = null;
            throw new CatalogException(ioe); // re-throw
        } finally {
            if (reader != null) {
                try {
                    reader.close();
                } catch (IOException ex) {
                }
            }
        }
    } else {
        return false;
    }

    return true;
}
 
Example 9
Source File: YAML.java    From pegasus with Apache License 2.0 5 votes vote down vote up
/**
 * Reads the on-disk map file into memory.
 *
 * @param filename is the name of the file to read.
 * @return true, if the in-memory data structures appear sound.
 */
public boolean connect(String filename) {
    // sanity check
    if (filename == null) {
        return false;
    }
    mFilename = filename;
    mLFN = new LinkedHashMap<String, ReplicaLocation>();
    mLFNRegex = new LinkedHashMap<String, ReplicaLocation>();
    mLFNPattern = new LinkedHashMap<String, Pattern>();

    File replicaFile = new File(filename);
    // first attempt to validate only if it exists
    if (replicaFile.exists() && validate(replicaFile, SCHEMA_FILE)) {
        Reader reader = null;
        try {
            reader = new VariableExpansionReader(new FileReader(filename));
            ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
            mapper.configure(MapperFeature.ALLOW_COERCION_OF_SCALARS, false);
            // inject instance of this class to be used for deserialization
            mapper.setInjectableValues(injectCallback());
            mapper.readValue(reader, YAML.class);
        } catch (IOException ioe) {
            mLFN = null;
            mLFNRegex = null;
            mLFNPattern = null;
            mFilename = null;
            throw new CatalogException(ioe); // re-throw
        } finally {
            if (reader != null) {
                try {
                    reader.close();
                } catch (IOException ex) {
                }
            }
        }
    }
    return true;
}
 
Example 10
Source File: TestElasticSearchSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreate() throws IOException {
    String desc = "    {\n" +
        "        \"type\": \"elasticsearch\",\n" +
        "        \"queue4Sink\":{\"type\": \"memory\", \"capacity\": 0 },\n" +
        "        \"batchSize\": 100,\n" +
        "        \"batchTimeout\": 1000,\n" +
        "        \"clientName\": \"es_test\",\n" +
        "        \"cluster.name\": \"es_test\",\n" +
        "        \"addressList\": [\"http://host1:8080\", \"http://host2:8080\"],\n" +
        "        \"indexInfo\":{\n" +
        "            \"type\": \"default\",\n" +
        "            \"indexTypeMap\":{\"routingkey1\":\"index1:type1\", \"routingkey2\":\"index2:type2\"},\n" +
        "            \"idFields\":{\"index\":[\"f1\", \"f2\"]},\n" +
        "            \"timestamp\": {\"field\":\"ts\"},\n" +
        "            \"indexSuffixFormatter\":{\"type\": \"date\", \"properties\":{\"dateFormat\":\"YYYYMMdd\"}}\n" +
        "        }\n" +
        "    }";
    final ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(ElasticSearchSink.class, "elasticsearch"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(
            Object valueId,
            DeserializationContext ctxt,
            BeanProperty forProperty,
            Object beanInstance
        ) {
            if (valueId.equals(ObjectMapper.class.getCanonicalName())) {
                return jsonMapper;
            } else {
                return null;
            }
        }
    });

    Sink sink = jsonMapper.readValue(desc, new TypeReference<Sink>(){});
    assertTrue(sink instanceof ElasticSearchSink);
    ElasticSearchSink esSink = (ElasticSearchSink) sink;
    esSink.createClient();
    RestClient client = esSink.getClient();
    IClientConfig config = ((BaseLoadBalancer) client.getLoadBalancer()).getClientConfig();
    assertTrue(config.get(CommonClientConfigKey.OkToRetryOnAllOperations));
    assertEquals(2, config.get(CommonClientConfigKey.MaxAutoRetriesNextServer).intValue());
    assertEquals(0, esSink.getSleepOverClientException());
    assertFalse(esSink.getReenqueueOnException());
}
 
Example 11
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigBackwardCompatible() throws IOException {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"ack\": 1,\n" +
            "     \"compression.type\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";
    String description2 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "     \"compression.codec\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sink1.open();
    sink2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sink1);
    sinks.add(sink2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
            sink.writeTo(new DefaultMessageContainer(
                    new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                    jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sink1.close();
    sink2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example 12
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
        "    \"type\": \"kafkaV1\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"ack\": 1,\n" +
        keyTopicMap + "\n" +
        "}";
    String description2 = "{\n" +
        "    \"type\": \"kafkaV2\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"request.required.acks\": 1,\n" +
        keyTopicMap + "\n" +
        "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sinkV1.open();
    sinkV2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sinkV1);
    sinks.add(sinkV2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
        createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
                                            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
          sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sinkV1.close();
    sinkV2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}