org.elasticsearch.index.mapper.FieldMapper Java Examples

The following examples show how to use org.elasticsearch.index.mapper.FieldMapper. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SearchIntoParser.java    From elasticsearch-inout-plugin with Apache License 2.0 6 votes vote down vote up
@Override
protected void validate(SearchIntoContext context) {
    if (!context.hasFieldNames()) {
        throw new SearchParseException(context, "No fields defined");
    }

    for (String field : context.fieldNames()) {
        FieldMapper<?> mapper = context.mapperService().smartNameFieldMapper(
                field);
        if (mapper == null && !field.equals(
                "_version") && !field.startsWith(
                FieldsParseElement.SCRIPT_FIELD_PREFIX)) {
            throw new SearchParseException(context,
                    "SearchInto field [" + field + "] does not exist in " +
                            "the mapping");
        }
    }
    super.validate(context);
}
 
Example #2
Source File: FragmentBuilderHelper.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * Fixes problems with broken analysis chains if positions and offsets are messed up that can lead to
 * {@link StringIndexOutOfBoundsException} in the {@link FastVectorHighlighter}
 */
public static WeightedFragInfo fixWeightedFragInfo(FieldMapper mapper, Field[] values, WeightedFragInfo fragInfo) {
    assert fragInfo != null : "FragInfo must not be null";
    assert mapper.fieldType().names().indexName().equals(values[0].name()) : "Expected FieldMapper for field " + values[0].name();
    if (!fragInfo.getSubInfos().isEmpty() && (containsBrokenAnalysis(mapper.fieldType().indexAnalyzer()))) {
        /* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
         * which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
         * the fragments based on their offsets rather than using soley the positions as it is done in
         * the FastVectorHighlighter. Yet, this is really a lucene problem and should be fixed in lucene rather
         * than in this hack... aka. "we are are working on in!" */
        final List<SubInfo> subInfos = fragInfo.getSubInfos();
        CollectionUtil.introSort(subInfos, new Comparator<SubInfo>() {
            @Override
            public int compare(SubInfo o1, SubInfo o2) {
                int startOffset = o1.getTermsOffsets().get(0).getStartOffset();
                int startOffset2 = o2.getTermsOffsets().get(0).getStartOffset();
                return FragmentBuilderHelper.compare(startOffset, startOffset2);
            }
        });
        return new WeightedFragInfo(Math.min(fragInfo.getSubInfos().get(0).getTermsOffsets().get(0).getStartOffset(),
                fragInfo.getStartOffset()), fragInfo.getEndOffset(), subInfos, fragInfo.getTotalBoost());
    } else {
        return fragInfo;
    }
}
 
Example #3
Source File: ReferenceMapper.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public ReferenceMapper build(BuilderContext context) {
    FieldMapper contentMapper = (FieldMapper) contentBuilder.build(context);
    setupFieldType(context);
    return new ReferenceMapper(name,
            fieldType,
            defaultFieldType,
            client,
            refIndex,
            refType,
            refFields,
            contentMapper,
            context.indexSettings(),
            multiFieldsBuilder.build(this, context),
            copyTo);
}
 
Example #4
Source File: TypeParsers.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public static void parseTermVector(String fieldName, String termVector, FieldMapper.Builder builder) throws MapperParsingException {
    termVector = Strings.toUnderscoreCase(termVector);
    if ("no".equals(termVector)) {
        builder.storeTermVectors(false);
    } else if ("yes".equals(termVector)) {
        builder.storeTermVectors(true);
    } else if ("with_offsets".equals(termVector)) {
        builder.storeTermVectorOffsets(true);
    } else if ("with_positions".equals(termVector)) {
        builder.storeTermVectorPositions(true);
    } else if ("with_positions_offsets".equals(termVector)) {
        builder.storeTermVectorPositions(true);
        builder.storeTermVectorOffsets(true);
    } else if ("with_positions_payloads".equals(termVector)) {
        builder.storeTermVectorPositions(true);
        builder.storeTermVectorPayloads(true);
    } else if ("with_positions_offsets_payloads".equals(termVector)) {
        builder.storeTermVectorPositions(true);
        builder.storeTermVectorOffsets(true);
        builder.storeTermVectorPayloads(true);
    } else {
        throw new MapperParsingException("wrong value for termVector [" + termVector + "] for field [" + fieldName + "]");
    }
}
 
Example #5
Source File: ReferenceMapper.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 6 votes vote down vote up
public ReferenceMapper(String simpleName,
                       MappedFieldType fieldType,
                       MappedFieldType defaultFieldType,
                       Client client,
                       String refindex,
                       String reftype,
                       List<String> reffields,
                       FieldMapper contentMapper,
                       Settings indexSettings,
                       MultiFields multiFields,
                       CopyTo copyTo) {
    super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, COPYTO_EMPTY);
    this.copyTo = copyTo;
    this.client = client;
    this.index = refindex;
    this.type = reftype;
    this.fields = reffields;
    this.contentMapper = contentMapper;
}
 
Example #6
Source File: FieldsVisitor.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public void postProcess(DocumentMapper documentMapper) {
    for (Map.Entry<String, List<Object>> entry : fields().entrySet()) {
        String indexName = entry.getKey();
        FieldMapper fieldMapper = documentMapper.mappers().getMapper(indexName);
        if (fieldMapper == null) {
            // it's possible index name doesn't match field name (legacy feature)
            for (FieldMapper mapper : documentMapper.mappers()) {
                if (mapper.fieldType().names().indexName().equals(indexName)) {
                    fieldMapper = mapper;
                    break;
                }
            }
            if (fieldMapper == null) {
                // no index name or full name found, so skip
                continue;
            }
        }
        List<Object> fieldValues = entry.getValue();
        for (int i = 0; i < fieldValues.size(); i++) {
            fieldValues.set(i, fieldMapper.fieldType().valueForSearch(fieldValues.get(i)));
        }
    }
}
 
Example #7
Source File: HighlightUtils.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
static List<Object> loadFieldValues(SearchContextHighlight.Field field, FieldMapper mapper, SearchContext searchContext, FetchSubPhase.HitContext hitContext) throws IOException {
    //percolator needs to always load from source, thus it sets the global force source to true
    boolean forceSource = searchContext.highlight().forceSource(field);
    List<Object> textsToHighlight;
    if (!forceSource && mapper.fieldType().stored()) {
        CustomFieldsVisitor fieldVisitor = new CustomFieldsVisitor(ImmutableSet.of(mapper.fieldType().names().indexName()), false);
        hitContext.reader().document(hitContext.docId(), fieldVisitor);
        textsToHighlight = fieldVisitor.fields().get(mapper.fieldType().names().indexName());
        if (textsToHighlight == null) {
            // Can happen if the document doesn't have the field to highlight
            textsToHighlight = Collections.emptyList();
        }
    } else {
        SourceLookup sourceLookup = searchContext.lookup().source();
        sourceLookup.setSegmentAndDocument(hitContext.readerContext(), hitContext.docId());
        textsToHighlight = sourceLookup.extractRawValues(hitContext.getSourcePath(mapper.fieldType().names().fullName()));
    }
    assert textsToHighlight != null;
    return textsToHighlight;
}
 
Example #8
Source File: BaseGeoPointFieldMapper.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
    BaseGeoPointFieldMapper updated = (BaseGeoPointFieldMapper) super.updateFieldType(fullNameToFieldType);
    StringFieldMapper geoUpdated = geoHashMapper == null ? null : (StringFieldMapper) geoHashMapper.updateFieldType(fullNameToFieldType);
    DoubleFieldMapper latUpdated = latMapper == null ? null : (DoubleFieldMapper) latMapper.updateFieldType(fullNameToFieldType);
    DoubleFieldMapper lonUpdated = lonMapper == null ? null : (DoubleFieldMapper) lonMapper.updateFieldType(fullNameToFieldType);
    if (updated == this
            && geoUpdated == geoHashMapper
            && latUpdated == latMapper
            && lonUpdated == lonMapper) {
        return this;
    }
    if (updated == this) {
        updated = (BaseGeoPointFieldMapper) updated.clone();
    }
    updated.geoHashMapper = geoUpdated;
    updated.latMapper = latUpdated;
    updated.lonMapper = lonUpdated;
    return updated;
}
 
Example #9
Source File: TypeParsers.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) {
    FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder();
    if (isArray(propNode)) {
        for(Object node : (List<Object>) propNode) {
            copyToBuilder.add(nodeStringValue(node, null));
        }
    } else {
        copyToBuilder.add(nodeStringValue(propNode, null));
    }
    builder.copyTo(copyToBuilder.build());
}
 
Example #10
Source File: TypeParsers.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public static void parseIndex(String fieldName, String index, FieldMapper.Builder builder) throws MapperParsingException {
    index = Strings.toUnderscoreCase(index);
    if ("no".equals(index)) {
        builder.index(false);
    } else if ("not_analyzed".equals(index)) {
        builder.index(true);
        builder.tokenized(false);
    } else if ("analyzed".equals(index)) {
        builder.index(true);
        builder.tokenized(true);
    } else {
        throw new MapperParsingException("wrong value for index [" + index + "] for field [" + fieldName + "]");
    }
}
 
Example #11
Source File: SourceScoreOrderFragmentsBuilder.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public SourceScoreOrderFragmentsBuilder(FieldMapper mapper, SearchContext searchContext,
                                        FetchSubPhase.HitContext hitContext, String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
    super(preTags, postTags, boundaryScanner);
    this.mapper = mapper;
    this.searchContext = searchContext;
    this.hitContext = hitContext;
}
 
Example #12
Source File: TransportGetFieldMappingsIndexAction.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void addFieldMapper(String field, FieldMapper fieldMapper, MapBuilder<String, FieldMappingMetaData> fieldMappings, boolean includeDefaults) {
    if (fieldMappings.containsKey(field)) {
        return;
    }
    try {
        XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
        builder.startObject();
        fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
        builder.endObject();
        fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().names().fullName(), builder.bytes()));
    } catch (IOException e) {
        throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
    }
}
 
Example #13
Source File: MinHashFieldMapper.java    From elasticsearch-minhash with Apache License 2.0 5 votes vote down vote up
/** Creates an copy of the current field with given field name and boost */
private static void parseCopy(final String field, final ParseContext context)
        throws IOException {
    Mapper mapper = context.docMapper().mappers().getMapper(field);
    if (mapper != null) {
        if (mapper instanceof FieldMapper) {
            ((FieldMapper) mapper).parse(context);
        } else if (mapper instanceof FieldAliasMapper) {
            throw new IllegalArgumentException("Cannot copy to a field alias [" + mapper.name() + "].");
        } else {
            throw new IllegalStateException("The provided mapper [" + mapper.name() +
                "] has an unrecognized type [" + mapper.getClass().getSimpleName() + "].");
        }
    }
}
 
Example #14
Source File: LangdetectMapper.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private static void parseLanguageToFields(ParseContext originalContext, Object languageToFields) throws IOException {
    List<Object> fieldList = languageToFields instanceof List ?
            (List<Object>)languageToFields : Collections.singletonList(languageToFields);
    ParseContext context = originalContext.createCopyToContext();
    for (Object field : fieldList) {
        ParseContext.Document targetDoc = null;
        for (ParseContext.Document doc = context.doc(); doc != null; doc = doc.getParent()) {
            if (field.toString().startsWith(doc.getPrefix())) {
                targetDoc = doc;
                break;
            }
        }
        if (targetDoc == null) {
            throw new IllegalArgumentException("target doc is null");
        }
        final ParseContext copyToContext;
        if (targetDoc == context.doc()) {
            copyToContext = context;
        } else {
            copyToContext = context.switchDoc(targetDoc);
        }
        FieldMapper fieldMapper = copyToContext.docMapper().mappers().getMapper(field.toString());
        if (fieldMapper != null) {
            fieldMapper.parse(copyToContext);
        } else {
            throw new MapperParsingException("attempt to copy value to non-existing field [" + field + "]");
        }
    }
}
 
Example #15
Source File: HighlighterContext.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public HighlighterContext(String fieldName, SearchContextHighlight.Field field, FieldMapper mapper, SearchContext context,
        FetchSubPhase.HitContext hitContext, Query query) {
    this.fieldName = fieldName;
    this.field = field;
    this.mapper = mapper;
    this.context = context;
    this.hitContext = hitContext;
    this.query = query;
}
 
Example #16
Source File: ReferenceMapper.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 5 votes vote down vote up
private static void parseCopyFields(ParseContext originalContext, List<String> copyToFields) throws IOException {
    if (!originalContext.isWithinCopyTo() && !copyToFields.isEmpty()) {
        ParseContext context = originalContext.createCopyToContext();
        for (String field : copyToFields) {
            // In case of a hierarchy of nested documents, we need to figure out
            // which document the field should go to
            ParseContext.Document targetDoc = null;
            for (ParseContext.Document doc = context.doc(); doc != null; doc = doc.getParent()) {
                if (field.startsWith(doc.getPrefix())) {
                    targetDoc = doc;
                    break;
                }
            }
            if (targetDoc == null) {
                throw new IllegalArgumentException("target doc is null");
            }
            final ParseContext copyToContext;
            if (targetDoc == context.doc()) {
                copyToContext = context;
            } else {
                copyToContext = context.switchDoc(targetDoc);
            }
            // simplified - no dynamic field creation
            FieldMapper fieldMapper = copyToContext.docMapper().mappers().getMapper(field);
            if (fieldMapper != null) {
                fieldMapper.parse(copyToContext);
            } else {
                throw new MapperParsingException("attempt to copy value to non-existing field [" + field + "]");
            }
        }
    }
}
 
Example #17
Source File: TopKParser.java    From elasticsearch-topk-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {

    ValuesSourceConfig<ValuesSource.Bytes> config = new ValuesSourceConfig<>(ValuesSource.Bytes.class);
    
    String field = null;
    Number size = null;
    Number capacity = 1000;

    XContentParser.Token token;
    String currentFieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.VALUE_STRING) {
            if ("field".equals(currentFieldName)) {
                field = parser.text();
            } else {
                throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
            }
        } else if (token == XContentParser.Token.VALUE_NUMBER) {
            if ("size".equals(currentFieldName)) {
                size = parser.numberValue();
            } else if ("capacity".equals(currentFieldName)) {
                    capacity = parser.numberValue();
            } else {
                throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].");
            }
        } else {
            throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].");
        }
    }

    if (field == null) {
        throw new SearchParseException(context, "Key 'field' cannot be null.");
    }
    if (size == null) {
        throw new SearchParseException(context, "Key 'size' cannot be null.");
    }

    FieldMapper<?> mapper = context.smartNameFieldMapper(field);
    if (mapper == null) {
        config.unmapped(true);
        return new TopKAggregator.Factory(aggregationName, config, size, capacity);
    }
    config.fieldContext(new FieldContext(field, context.fieldData().getForField(mapper), mapper));
    return new TopKAggregator.Factory(aggregationName, config, size, capacity);
}
 
Example #18
Source File: TypeParsers.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Parse common field attributes such as {@code doc_values} or {@code store}.
 */
public static void parseField(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
    Version indexVersionCreated = parserContext.indexVersionCreated();
    for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) {
        Map.Entry<String, Object> entry = iterator.next();
        final String propName = Strings.toUnderscoreCase(entry.getKey());
        final Object propNode = entry.getValue();
        if (propName.equals("index_name") && indexVersionCreated.before(Version.V_2_0_0_beta1)) {
            builder.indexName(propNode.toString());
            iterator.remove();
        } else if (propName.equals("store")) {
            builder.store(parseStore(name, propNode.toString()));
            iterator.remove();
        } else if (propName.equals("index")) {
            parseIndex(name, propNode.toString(), builder);
            iterator.remove();
        } else if (propName.equals(DOC_VALUES)) {
            builder.docValues(nodeBooleanValue(propNode));
            iterator.remove();
        } else if (propName.equals("boost")) {
            builder.boost(nodeFloatValue(propNode));
            iterator.remove();
        } else if (propName.equals("omit_norms")) {
            builder.omitNorms(nodeBooleanValue(propNode));
            iterator.remove();
        } else if (propName.equals("norms")) {
            final Map<String, Object> properties = nodeMapValue(propNode, "norms");
            for (Iterator<Entry<String, Object>> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) {
                Entry<String, Object> entry2 = propsIterator.next();
                final String propName2 = Strings.toUnderscoreCase(entry2.getKey());
                final Object propNode2 = entry2.getValue();
                if (propName2.equals("enabled")) {
                    builder.omitNorms(!nodeBooleanValue(propNode2));
                    propsIterator.remove();
                } else if (propName2.equals(Loading.KEY)) {
                    builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null));
                    propsIterator.remove();
                }
            }
            DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated());
            iterator.remove();
        } else if (propName.equals("omit_term_freq_and_positions")) {
            final IndexOptions op = nodeBooleanValue(propNode) ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
            if (indexVersionCreated.onOrAfter(Version.V_1_0_0_RC2)) {
                throw new ElasticsearchParseException("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'docs']  instead");
            }
            // deprecated option for BW compat
            builder.indexOptions(op);
            iterator.remove();
        } else if (propName.equals("index_options")) {
            builder.indexOptions(nodeIndexOptionValue(propNode));
            iterator.remove();
        } else if (propName.equals("include_in_all")) {
            builder.includeInAll(nodeBooleanValue(propNode));
            iterator.remove();
        } else if (propName.equals("postings_format") && indexVersionCreated.before(Version.V_2_0_0_beta1)) {
            // ignore for old indexes
            iterator.remove();
        } else if (propName.equals("doc_values_format") && indexVersionCreated.before(Version.V_2_0_0_beta1)) {
            // ignore for old indexes
            iterator.remove();
        } else if (propName.equals("similarity")) {
            builder.similarity(parserContext.similarityLookupService().similarity(propNode.toString()));
            iterator.remove();
        } else if (propName.equals("fielddata")) {
            final Settings settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build();
            builder.fieldDataSettings(settings);
            iterator.remove();
        } else if (propName.equals("copy_to")) {
            if (parserContext.isWithinMultiField()) {
                if (indexVersionCreated.after(Version.V_2_1_0) ||
                    (indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) {
                    throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field.");
                } else {
                    ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be ignored.");
                    // we still parse this, otherwise the message will only appear once and the copy_to removed. After that it will appear again. Better to have it always.
                }
            }
            parseCopyFields(propNode, builder);
            iterator.remove();
        }
    }
    if (indexVersionCreated.before(Version.V_2_2_0)) {
        // analyzer, search_analyzer, term_vectors were accepted on all fields
        // before 2.2, even though it made little sense
        parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext);
    }
}
 
Example #19
Source File: TypeParsers.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
    parserContext = parserContext.createMultiFieldContext(parserContext);
    if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
        builder.multiFieldPathType(parsePathType(name, propNode.toString()));
        return true;
    } else if (propName.equals("fields")) {

        final Map<String, Object> multiFieldsPropNodes;

        if (propNode instanceof List && ((List<?>) propNode).isEmpty()) {
            multiFieldsPropNodes = Collections.emptyMap();
        } else if (propNode instanceof Map) {
            multiFieldsPropNodes = (Map<String, Object>) propNode;
        } else {
            throw new MapperParsingException("expected map for property [fields] on field [" + propNode + "] or " +
                    "[" + propName + "] but got a " + propNode.getClass());
        }

        for (Map.Entry<String, Object> multiFieldEntry : multiFieldsPropNodes.entrySet()) {
            String multiFieldName = multiFieldEntry.getKey();
            if (multiFieldName.contains(".")) {
                throw new MapperParsingException("Field name [" + multiFieldName + "] which is a multi field of [" + name + "] cannot contain '.'");
            }
            if (!(multiFieldEntry.getValue() instanceof Map)) {
                throw new MapperParsingException("illegal field [" + multiFieldName + "], only fields can be specified inside fields");
            }
            @SuppressWarnings("unchecked")
            Map<String, Object> multiFieldNodes = (Map<String, Object>) multiFieldEntry.getValue();

            String type;
            Object typeNode = multiFieldNodes.get("type");
            if (typeNode != null) {
                type = typeNode.toString();
            } else {
                throw new MapperParsingException("no type specified for property [" + multiFieldName + "]");
            }
            if (type.equals(ObjectMapper.CONTENT_TYPE) || type.equals(ObjectMapper.NESTED_CONTENT_TYPE)) {
                throw new MapperParsingException("Type [" + type + "] cannot be used in multi field");
            }

            Mapper.TypeParser typeParser = parserContext.typeParser(type);
            if (typeParser == null) {
                throw new MapperParsingException("no handler for type [" + type + "] declared on field [" + multiFieldName + "]");
            }
            builder.addMultiField(typeParser.parse(multiFieldName, multiFieldNodes, parserContext));
            multiFieldNodes.remove("type");
            DocumentMapperParser.checkNoRemainingFields(propName, multiFieldNodes, parserContext.indexVersionCreated());
        }
        return true;
    }
    return false;
}
 
Example #20
Source File: TypeParsers.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Parse text field attributes. In addition to {@link #parseField common attributes}
 * this will parse analysis and term-vectors related settings.
 */
public static void parseTextField(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
    parseField(builder, name, fieldNode, parserContext);
    parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext);
}
 
Example #21
Source File: FastVectorHighlighter.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public boolean canHighlight(FieldMapper fieldMapper) {
    return fieldMapper.fieldType().storeTermVectors() && fieldMapper.fieldType().storeTermVectorOffsets() && fieldMapper.fieldType().storeTermVectorPositions();
}
 
Example #22
Source File: HighlightPhase.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
private FieldMapper getMapperForField(String fieldName, SearchContext searchContext, HitContext hitContext) {
    DocumentMapper documentMapper = searchContext.mapperService().documentMapper(hitContext.hit().type());
    // TODO: no need to lookup the doc mapper with unambiguous field names? just look at the mapper service
    return documentMapper.mappers().smartNameFieldMapper(fieldName);
}
 
Example #23
Source File: PlainHighlighter.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public boolean canHighlight(FieldMapper fieldMapper) {
    return true;
}
 
Example #24
Source File: PostingsHighlighter.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public boolean canHighlight(FieldMapper fieldMapper) {
    return fieldMapper.fieldType().indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
}
 
Example #25
Source File: SourceSimpleFragmentsBuilder.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public SourceSimpleFragmentsBuilder(FieldMapper mapper, SearchContext searchContext,
                                    FetchSubPhase.HitContext hitContext, String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
    super(mapper, preTags, postTags, boundaryScanner);
    this.searchContext = searchContext;
    this.hitContext = hitContext;
}
 
Example #26
Source File: SimpleFragmentsBuilder.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public SimpleFragmentsBuilder(FieldMapper mapper,
                                    String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
    super(preTags, postTags, boundaryScanner);
    this.mapper = mapper;
}
 
Example #27
Source File: Highlighter.java    From Elasticsearch with Apache License 2.0 votes vote down vote up
boolean canHighlight(FieldMapper fieldMapper);