Java Code Examples for com.fasterxml.jackson.databind.node.ObjectNode#withArray()

The following examples show how to use com.fasterxml.jackson.databind.node.ObjectNode#withArray() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SchemaGenerationContextImpl.java    From jsonschema-generator with Apache License 2.0 5 votes vote down vote up
/**
 * Preparation Step: create a node for a schema representing the given field's associated value type.
 *
 * @param field field/property to populate the schema node for
 * @return schema node representing the given field/property
 */
private ObjectNode populateFieldSchema(FieldScope field) {
    List<ResolvedType> typeOverrides = this.generatorConfig.resolveTargetTypeOverrides(field);
    if (typeOverrides == null) {
        typeOverrides = this.generatorConfig.resolveSubtypes(field.getType(), this);
    }
    List<FieldScope> fieldOptions;
    if (typeOverrides == null || typeOverrides.isEmpty()) {
        fieldOptions = Collections.singletonList(field);
    } else {
        fieldOptions = typeOverrides.stream()
                .map(field::withOverriddenType)
                .collect(Collectors.toList());
    }
    // consider declared type (instead of overridden one) for determining null-ability
    boolean isNullable = !field.getRawMember().isEnumConstant() && !field.isFakeContainerItemScope() && this.generatorConfig.isNullable(field);
    if (fieldOptions.size() == 1) {
        return this.createFieldSchema(fieldOptions.get(0), isNullable, false, null);
    }
    ObjectNode subSchema = this.generatorConfig.createObjectNode();
    ArrayNode anyOfArray = subSchema.withArray(this.getKeyword(SchemaKeyword.TAG_ANYOF));
    if (isNullable) {
        anyOfArray.addObject()
                .put(this.getKeyword(SchemaKeyword.TAG_TYPE), this.getKeyword(SchemaKeyword.TAG_TYPE_NULL));
    }
    fieldOptions.forEach(option -> anyOfArray.add(this.createFieldSchema(option, false, false, null)));
    return subSchema;
}
 
Example 2
Source File: SchemaGenerationContextImpl.java    From jsonschema-generator with Apache License 2.0 5 votes vote down vote up
/**
 * Preparation Step: create a node for a schema representing the given method's associated return type.
 *
 * @param method method to populate the schema node for
 * @return schema node representing the given method's return type
 */
private JsonNode populateMethodSchema(MethodScope method) {
    List<ResolvedType> typeOverrides = this.generatorConfig.resolveTargetTypeOverrides(method);
    if (typeOverrides == null && !method.isVoid()) {
        typeOverrides = this.generatorConfig.resolveSubtypes(method.getType(), this);
    }
    List<MethodScope> methodOptions;
    if (typeOverrides == null || typeOverrides.isEmpty()) {
        methodOptions = Collections.singletonList(method);
    } else {
        methodOptions = typeOverrides.stream()
                .map(method::withOverriddenType)
                .collect(Collectors.toList());
    }
    // consider declared type (instead of overridden one) for determining null-ability
    boolean isNullable = method.isVoid()
            || !method.isFakeContainerItemScope() && this.generatorConfig.isNullable(method);
    if (methodOptions.size() == 1) {
        return this.createMethodSchema(methodOptions.get(0), isNullable, false, null);
    }
    ObjectNode subSchema = this.generatorConfig.createObjectNode();
    ArrayNode anyOfArray = subSchema.withArray(this.getKeyword(SchemaKeyword.TAG_ANYOF));
    if (isNullable) {
        anyOfArray.add(this.generatorConfig.createObjectNode()
                .put(this.getKeyword(SchemaKeyword.TAG_TYPE), this.getKeyword(SchemaKeyword.TAG_TYPE_NULL)));
    }
    methodOptions.forEach(option -> anyOfArray.add(this.createMethodSchema(option, false, false, null)));
    return subSchema;
}
 
Example 3
Source File: Swagger2Module.java    From jsonschema-generator with Apache License 2.0 5 votes vote down vote up
/**
 * Consider various remaining aspects.
 * <ul>
 * <li>{@code @Schema(not = ...)}</li>
 * <li>{@code @Schema(allOf = ...)}</li>
 * <li>{@code @Schema(minProperties = ...)}</li>
 * <li>{@code @Schema(maxProperties = ...)}</li>
 * <li>{@code @Schema(requiredProperties = ...)}</li>
 * </ul>
 *
 * @param memberAttributes already collected schema for the field/method
 * @param member targeted field/method
 * @param context generation context
 */
protected void overrideInstanceAttributes(ObjectNode memberAttributes, MemberScope<?, ?> member, SchemaGenerationContext context) {
    Schema annotation = this.getSchemaAnnotationValue(member, Function.identity(), x -> true)
            .orElse(null);
    if (annotation == null) {
        return;
    }
    if (annotation.not() != Void.class) {
        memberAttributes.set("not", context.createDefinitionReference(context.getTypeContext().resolve(annotation.not())));
    }
    if (annotation.allOf().length > 0) {
        Stream.of(annotation.allOf())
                .map(rawType -> context.getTypeContext().resolve(rawType))
                .map(context::createDefinitionReference).forEach(memberAttributes
                .withArray(context.getKeyword(SchemaKeyword.TAG_ALLOF))::add);
    }
    if (annotation.minProperties() > 0) {
        memberAttributes.put("minProperties", annotation.minProperties());
    }
    if (annotation.maxProperties() > 0) {
        memberAttributes.put("maxProperties", annotation.maxProperties());
    }
    if (annotation.requiredProperties().length > 0) {
        Set<String> alreadyMentionedRequiredFields = new HashSet<>();
        ArrayNode requiredFieldNames = memberAttributes
                .withArray(context.getKeyword(SchemaKeyword.TAG_REQUIRED));
        requiredFieldNames
                .forEach(arrayItem -> alreadyMentionedRequiredFields.add(arrayItem.asText()));
        Stream.of(annotation.requiredProperties())
                .filter(field -> !alreadyMentionedRequiredFields.contains(field))
                .forEach(requiredFieldNames::add);
    }
}
 
Example 4
Source File: ElasticsearchRel.java    From Quicksql with MIT License 5 votes vote down vote up
public String convert(RelNode input, List<Pair<String, Class>> fields) throws IOException {
    ((ElasticsearchRel) input).implement(this);

    ObjectMapper mapper = new ObjectMapper();
    if (! aggregations.isEmpty()) {
        return aggregate(fields, mapper);
    }

    final ObjectNode query = mapper.createObjectNode();
    // manually parse from previously concatenated string
    for (String op : list) {
        query.setAll((ObjectNode) mapper.readTree(op));
    }

    if (! sort.isEmpty()) {
        ArrayNode sortNode = query.withArray("sort");
        sort.forEach(e ->
            sortNode.add(
                mapper.createObjectNode().put(e.getKey(), e.getValue().isDescending() ? "desc" : "asc"))
        );
    }

    if (offset != null) {
        query.put("from", offset);
    }

    if (fetch != null) {
        query.put("size", fetch);
    }

    return query.toString();
}
 
Example 5
Source File: ElasticsearchTransport.java    From calcite with Apache License 2.0 5 votes vote down vote up
void closeScroll(Iterable<String> scrollIds) {
  Objects.requireNonNull(scrollIds, "scrollIds");

  // delete current scroll
  final URI uri = URI.create("/_search/scroll");
  // http DELETE with payload
  final HttpEntityEnclosingRequestBase request = new HttpEntityEnclosingRequestBase() {
    @Override public String getMethod() {
      return HttpDelete.METHOD_NAME;
    }
  };

  request.setURI(uri);
  final ObjectNode payload = mapper().createObjectNode();
  // ES2 expects json array for DELETE scroll API
  final ArrayNode array = payload.withArray("scroll_id");

  StreamSupport.stream(scrollIds.spliterator(), false)
      .map(TextNode::new)
      .forEach(array::add);

  try {
    final String json = mapper().writeValueAsString(payload);
    request.setEntity(new StringEntity(json, ContentType.APPLICATION_JSON));
    rawHttp().apply(request);
  } catch (IOException | UncheckedIOException e) {
    LOGGER.warn("Failed to close scroll(s): {}", scrollIds, e);
  }
}
 
Example 6
Source File: ElasticsearchOps.java    From immutables with Apache License 2.0 5 votes vote down vote up
Completable closeScroll(Iterable<String> scrollIds) {
  final ObjectNode payload = mapper.createObjectNode();
  final ArrayNode array = payload.withArray("scroll_id");
  scrollIds.forEach(array::add);
  final Request request = new Request("DELETE", "/_search/scroll");
  request.setJsonEntity(payload.toString());
  return transport.execute(request).ignoreElement();
}
 
Example 7
Source File: QueryBuilders.java    From immutables with Apache License 2.0 5 votes vote down vote up
private static void writeJsonArray(String field, List<QueryBuilder> clauses, ObjectNode node, ObjectMapper mapper) {
  if (clauses.isEmpty()) {
    return;
  }

  if (clauses.size() == 1) {
    node.set(field, clauses.get(0).toJson(mapper));
  } else {
    final ArrayNode arrayNode = node.withArray(field);
    for (QueryBuilder clause: clauses) {
      arrayNode.add(clause.toJson(mapper));
    }
  }
}
 
Example 8
Source File: JsonSubTypesResolver.java    From jsonschema-generator with Apache License 2.0 4 votes vote down vote up
/**
 * Create the custom schema definition for the given subtype, considering the {@link JsonTypeInfo#include()} setting.
 *
 * @param javaType targeted subtype
 * @param typeInfoAnnotation annotation for looking up the type identifier and determining the kind of inclusion/serialization
 * @param attributesToInclude optional: additional attributes to include on the actual/contained schema definition
 * @param context generation context
 * @return created custom definition (or {@code null} if no supported subtype resolution scenario could be detected
 */
private ObjectNode createSubtypeDefinition(ResolvedType javaType, JsonTypeInfo typeInfoAnnotation, ObjectNode attributesToInclude,
        SchemaGenerationContext context) {
    final String typeIdentifier = this.getTypeIdentifier(javaType, typeInfoAnnotation);
    if (typeIdentifier == null) {
        return null;
    }
    final ObjectNode definition = context.getGeneratorConfig().createObjectNode();
    switch (typeInfoAnnotation.include()) {
    case WRAPPER_ARRAY:
        definition.put(context.getKeyword(SchemaKeyword.TAG_TYPE), context.getKeyword(SchemaKeyword.TAG_TYPE_ARRAY));
        ArrayNode itemsArray = definition.withArray(context.getKeyword(SchemaKeyword.TAG_ITEMS));
        itemsArray.addObject()
                .put(context.getKeyword(SchemaKeyword.TAG_TYPE), context.getKeyword(SchemaKeyword.TAG_TYPE_STRING))
                .put(context.getKeyword(SchemaKeyword.TAG_CONST), typeIdentifier);
        if (attributesToInclude == null || attributesToInclude.isEmpty()) {
            itemsArray.add(context.createStandardDefinitionReference(javaType, this));
        } else {
            itemsArray.addObject()
                    .withArray(context.getKeyword(SchemaKeyword.TAG_ALLOF))
                    .add(context.createStandardDefinitionReference(javaType, this))
                    .add(attributesToInclude);
        }
        break;
    case WRAPPER_OBJECT:
        definition.put(context.getKeyword(SchemaKeyword.TAG_TYPE), context.getKeyword(SchemaKeyword.TAG_TYPE_OBJECT));
        ObjectNode propertiesNode = definition.with(context.getKeyword(SchemaKeyword.TAG_PROPERTIES));
        if (attributesToInclude == null || attributesToInclude.isEmpty()) {
            propertiesNode.set(typeIdentifier, context.createStandardDefinitionReference(javaType, this));
        } else {
            propertiesNode.with(typeIdentifier)
                    .withArray(context.getKeyword(SchemaKeyword.TAG_ALLOF))
                    .add(context.createStandardDefinitionReference(javaType, this))
                    .add(attributesToInclude);
        }
        break;
    case PROPERTY:
    case EXISTING_PROPERTY:
        final String propertyName = Optional.ofNullable(typeInfoAnnotation.property())
                .filter(name -> !name.isEmpty())
                .orElseGet(() -> typeInfoAnnotation.use().getDefaultPropertyName());
        ObjectNode additionalPart = definition.withArray(context.getKeyword(SchemaKeyword.TAG_ALLOF))
                .add(context.createStandardDefinitionReference(javaType, this))
                .addObject();
        if (attributesToInclude != null && !attributesToInclude.isEmpty()) {
            additionalPart.setAll(attributesToInclude);
        }
        additionalPart.put(context.getKeyword(SchemaKeyword.TAG_TYPE), context.getKeyword(SchemaKeyword.TAG_TYPE_OBJECT))
                .with(context.getKeyword(SchemaKeyword.TAG_PROPERTIES))
                .with(propertyName)
                .put(context.getKeyword(SchemaKeyword.TAG_CONST), typeIdentifier);
        break;
    default:
        return null;
    }
    return definition;
}
 
Example 9
Source File: ElasticsearchTable.java    From calcite with Apache License 2.0 4 votes vote down vote up
/**
 * Executes a "find" operation on the underlying index.
 *
 * @param ops List of operations represented as Json strings.
 * @param fields List of fields to project; or null to return map
 * @param sort list of fields to sort and their direction (asc/desc)
 * @param aggregations aggregation functions
 * @return Enumerator of results
 */
private Enumerable<Object> find(List<String> ops,
    List<Map.Entry<String, Class>> fields,
    List<Map.Entry<String, RelFieldCollation.Direction>> sort,
    List<String> groupBy,
    List<Map.Entry<String, String>> aggregations,
    Map<String, String> mappings,
    Long offset, Long fetch) throws IOException {

  if (!aggregations.isEmpty() || !groupBy.isEmpty()) {
    // process aggregations separately
    return aggregate(ops, fields, sort, groupBy, aggregations, mappings, offset, fetch);
  }

  final ObjectNode query = mapper.createObjectNode();
  // manually parse from previously concatenated string
  for (String op: ops) {
    query.setAll((ObjectNode) mapper.readTree(op));
  }

  if (!sort.isEmpty()) {
    ArrayNode sortNode = query.withArray("sort");
    sort.forEach(e ->
        sortNode.add(
            mapper.createObjectNode().put(e.getKey(),
                e.getValue().isDescending() ? "desc" : "asc")));
  }

  if (offset != null) {
    query.put("from", offset);
  }

  if (fetch != null) {
    query.put("size", fetch);
  }

  final Function1<ElasticsearchJson.SearchHit, Object> getter =
      ElasticsearchEnumerators.getter(fields, ImmutableMap.copyOf(mappings));

  Iterable<ElasticsearchJson.SearchHit> iter;
  if (offset == null) {
    // apply scrolling when there is no offsets
    iter = () -> new Scrolling(transport).query(query);
  } else {
    final ElasticsearchJson.Result search = transport.search().apply(query);
    iter = () -> search.searchHits().hits().iterator();
  }

  return Linq4j.asEnumerable(iter).select(getter);
}
 
Example 10
Source File: ElasticsearchBackend.java    From immutables with Apache License 2.0 4 votes vote down vote up
private Flowable<?> select(StandardOperations.Select op) {
  final Query query = op.query();

  if (query.distinct()) {
    return Flowable.error(new UnsupportedOperationException("DISTINCT not yet supported by " + ElasticsearchBackend.class.getSimpleName()));
  }

  if (query.count()) {
    return new CountCall(op, this).call().toFlowable();
  }

  if (query.hasAggregations()) {
    return aggregate(op);
  }
  final ObjectNode json = objectMapper.createObjectNode();

  query.filter().ifPresent(f -> json.set("query", Elasticsearch.constantScoreQuery(objectMapper, pathNaming, idPredicate).convert(f)));
  query.limit().ifPresent(limit -> json.put("size", limit));
  query.offset().ifPresent(offset -> json.put("from", offset));
  if (!query.collations().isEmpty()) {
    final ArrayNode sort = json.withArray("sort");
    query.collations().forEach(c -> {
      sort.add(objectMapper.createObjectNode().put(c.path().toStringPath(), c.direction().isAscending() ? "asc" : "desc"));
    });
  }

  JsonConverter converter = this.converter;

  if (query.hasProjections()) {
    ArrayNode projection = query.projections().stream()
             .map(p -> ((Path) p).toStringPath())
             .reduce(objectMapper.createArrayNode(), ArrayNode::add, (old, newNode) -> newNode);
    json.set("_source", projection);
    converter = new ToTupleConverter(query, objectMapper);
  }

  final Flowable<?> flowable;
  if (query.offset().isPresent()) {
    // scroll doesn't work with offset
    flowable = ops.search(json, (JsonConverter<?>) converter);
  } else {
    flowable = ops.scrolledSearch(json, (JsonConverter<?>) converter);
  }

  return flowable;
}