org.elasticsearch.index.mapper.ParsedDocument Java Examples

The following examples show how to use org.elasticsearch.index.mapper.ParsedDocument. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PercolateContext.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public void initialize(Engine.Searcher docSearcher, ParsedDocument parsedDocument) {
    this.docSearcher = docSearcher;

    IndexReader indexReader = docSearcher.reader();
    LeafReaderContext atomicReaderContext = indexReader.leaves().get(0);
    LeafSearchLookup leafLookup = lookup().getLeafSearchLookup(atomicReaderContext);
    leafLookup.setDocument(0);
    leafLookup.source().setSource(parsedDocument.source());

    Map<String, SearchHitField> fields = new HashMap<>();
    for (IndexableField field : parsedDocument.rootDoc().getFields()) {
        fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList()));
    }
    hitContext().reset(
            new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields),
            atomicReaderContext, 0, docSearcher.searcher()
    );
}
 
Example #2
Source File: QueryTester.java    From crate with Apache License 2.0 6 votes vote down vote up
void indexValue(String column, Object value) throws IOException {
    DocumentMapper mapper = indexEnv.mapperService().documentMapperSafe();
    InsertSourceGen sourceGen = InsertSourceGen.of(
        CoordinatorTxnCtx.systemTransactionContext(),
        sqlExecutor.functions(),
        table,
        table.concreteIndices()[0],
        GeneratedColumns.Validation.NONE,
        Collections.singletonList(table.getReference(ColumnIdent.fromPath(column)))
    );
    BytesReference source = sourceGen.generateSourceAndCheckConstraintsAsBytesReference(new Object[]{value});
    SourceToParse sourceToParse = new SourceToParse(
        table.concreteIndices()[0],
        UUIDs.randomBase64UUID(),
        source,
        XContentType.JSON
    );
    ParsedDocument parsedDocument = mapper.parse(sourceToParse);
    indexEnv.writer().addDocuments(parsedDocument.docs());
}
 
Example #3
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 6 votes vote down vote up
@Test
public void testParseDynamicNullArray() throws Exception {
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject().startObject(TYPE).startObject("properties")
        .endObject().endObject().endObject());
    DocumentMapper mapper = mapper(INDEX, mapping);

    // parse source with null array
    BytesReference bytesReference = BytesReference.bytes(XContentFactory.jsonBuilder()
        .startObject()
        .startArray("new_array_field").nullValue().endArray()
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument doc = mapper.parse(sourceToParse);
    assertThat(doc.docs().get(0).getField("new_array_field"), is(nullValue()));
    assertThat(mapper.mappers().getMapper("new_array_field"), is(nullValue()));
}
 
Example #4
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 6 votes vote down vote up
@Test
public void testParseDynamicEmptyArray() throws Exception {
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject().startObject(TYPE).startObject("properties")
        .endObject().endObject().endObject());
    DocumentMapper mapper = mapper(INDEX, mapping);

    // parse source with empty array
    BytesReference bytesReference = BytesReference.bytes(XContentFactory.jsonBuilder()
        .startObject()
        .array("new_array_field")
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument doc = mapper.parse(sourceToParse);
    assertThat(doc.docs().get(0).getField("new_array_field"), is(nullValue()));
    assertThat(mapper.mappers().getMapper("new_array_field"), is(nullValue()));
}
 
Example #5
Source File: EngineTestCase.java    From crate with Apache License 2.0 6 votes vote down vote up
protected static ParsedDocument testParsedDocument(
    String id, String routing, ParseContext.Document document, BytesReference source, Mapping mappingUpdate,
    boolean recoverySource) {
    Field uidField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE);
    Field versionField = new NumericDocValuesField("_version", 0);
    SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    document.add(uidField);
    document.add(versionField);
    document.add(seqID.seqNo);
    document.add(seqID.seqNoDocValue);
    document.add(seqID.primaryTerm);
    BytesRef ref = source.toBytesRef();
    if (recoverySource) {
        document.add(new StoredField(SourceFieldMapper.RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length));
        document.add(new NumericDocValuesField(SourceFieldMapper.RECOVERY_SOURCE_NAME, 1));
    } else {
        document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length));
    }
    return new ParsedDocument(versionField, seqID, id, routing, Arrays.asList(document), source, mappingUpdate);
}
 
Example #6
Source File: RecoverySourceHandlerTests.java    From crate with Apache License 2.0 6 votes vote down vote up
private Engine.Index getIndex(final String id) {
    final String type = "test";
    final ParseContext.Document document = new ParseContext.Document();
    document.add(new TextField("test", "test", Field.Store.YES));
    final Field idField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE);
    final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
    final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
    document.add(idField);
    document.add(versionField);
    document.add(seqID.seqNo);
    document.add(seqID.seqNoDocValue);
    document.add(seqID.primaryTerm);
    final BytesReference source = new BytesArray(new byte[] { 1 });
    final ParsedDocument doc =
        new ParsedDocument(versionField, seqID, id, type, List.of(document), source, null);
    return new Engine.Index(
        new Term("_id", Uid.encodeId(doc.id())), doc, UNASSIGNED_SEQ_NO, 0,
        Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);

}
 
Example #7
Source File: IndexShard.java    From crate with Apache License 2.0 6 votes vote down vote up
private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() {
    final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop");
    final DocumentMapper noopDocumentMapper = new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService);
    return new EngineConfig.TombstoneDocSupplier() {

        @Override
        public ParsedDocument newDeleteTombstoneDoc(String id) {
            return defaultDocMapper().createDeleteTombstoneDoc(shardId.getIndexName(), id);
        }

        @Override
        public ParsedDocument newNoopTombstoneDoc(String reason) {
            return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName(), reason);
        }
    };
}
 
Example #8
Source File: IndexShard.java    From crate with Apache License 2.0 6 votes vote down vote up
public static Engine.Index prepareIndex(DocumentMapper docMapper,
                                        SourceToParse source,
                                        long seqNo,
                                        long primaryTerm,
                                        long version,
                                        VersionType versionType,
                                        Engine.Operation.Origin origin,
                                        long autoGeneratedIdTimestamp,
                                        boolean isRetry,
                                        long ifSeqNo,
                                        long ifPrimaryTerm) {
    long startTime = System.nanoTime();
    ParsedDocument doc = docMapper.parse(source);
    Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id()));
    return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry,
                            ifSeqNo, ifPrimaryTerm);
}
 
Example #9
Source File: SingleDocumentPercolatorIndex.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public void prepare(PercolateContext context, ParsedDocument parsedDocument) {
    MemoryIndex memoryIndex = cache.get();
    for (IndexableField field : parsedDocument.rootDoc().getFields()) {
        if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) {
            continue;
        }
        try {
            Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer();
            // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
            // like the indexer does
            try (TokenStream tokenStream = field.tokenStream(analyzer, null)) {
                if (tokenStream != null) {
                    memoryIndex.addField(field.name(), tokenStream, field.boost());
                }
             }
        } catch (Exception e) {
            throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e);
        }
    }
    context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument);
}
 
Example #10
Source File: PercolatorService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, IndexService documentIndexService, String index, String type) {
    ParsedDocument doc = null;
    XContentParser parser = null;
    try {
        parser = XContentFactory.xContent(fetchedDoc).createParser(fetchedDoc);
        MapperService mapperService = documentIndexService.mapperService();
        DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type);
        doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(type).flyweight(true));

        if (context.highlight() != null) {
            doc.setSource(fetchedDoc);
        }
    } catch (Throwable e) {
        throw new ElasticsearchParseException("failed to parse request", e);
    } finally {
        if (parser != null) {
            parser.close();
        }
    }

    if (doc == null) {
        throw new ElasticsearchParseException("No doc to percolate in the request");
    }

    return doc;
}
 
Example #11
Source File: Engine.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public IndexingOperation(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime, boolean canHaveDuplicates) {
    this.uid = uid;
    this.doc = doc;
    this.version = version;
    this.versionType = versionType;
    this.origin = origin;
    this.startTime = startTime;
    this.canHaveDuplicates = canHaveDuplicates;
    this.reindex = false;
}
 
Example #12
Source File: MultiDocumentPercolatorIndex.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public void prepare(PercolateContext context, ParsedDocument parsedDocument) {
    IndexReader[] memoryIndices = new IndexReader[parsedDocument.docs().size()];
    List<ParseContext.Document> docs = parsedDocument.docs();
    int rootDocIndex = docs.size() - 1;
    assert rootDocIndex > 0;
    MemoryIndex rootDocMemoryIndex = null;
    for (int i = 0; i < docs.size(); i++) {
        ParseContext.Document d = docs.get(i);
        MemoryIndex memoryIndex;
        if (rootDocIndex == i) {
            // the last doc is always the rootDoc, since that is usually the biggest document it make sense
            // to reuse the MemoryIndex it uses
            memoryIndex = rootDocMemoryIndex = cache.get();
        } else {
            memoryIndex = new MemoryIndex(true);
        }
        Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer();
        memoryIndices[i] = indexDoc(d, analyzer, memoryIndex).createSearcher().getIndexReader();
    }
    try {
        MultiReader mReader = new MultiReader(memoryIndices, true);
        LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader);
        final IndexSearcher slowSearcher = new IndexSearcher(slowReader);
        slowSearcher.setQueryCache(null);
        DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex);
        context.initialize(docSearcher, parsedDocument);
    } catch (IOException e) {
        throw new ElasticsearchException("Failed to create index for percolator with nested document ", e);
    }
}
 
Example #13
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testParseNullOnObjectArray() throws Exception {
    // @formatter: on
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject()
            .startObject(TYPE)
                .startObject("properties")
                    .startObject("array_field")
                        .field("type", ArrayMapper.CONTENT_TYPE)
                        .startObject(ArrayMapper.INNER_TYPE)
                            .field("type", "object")
                            .startObject("properties")
                            .endObject()
                        .endObject()
                    .endObject()
                .endObject()
            .endObject()
        .endObject());
    // @formatter: off
    DocumentMapper mapper = mapper(INDEX, mapping);
    BytesReference bytesReference = BytesReference.bytes(XContentFactory.jsonBuilder()
        .startObject()
        .nullField("array_field")
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument parsedDoc = mapper.parse(sourceToParse);
    assertThat(parsedDoc.docs().size(), is(1));
    assertThat(parsedDoc.docs().get(0).getField("array_field"), is(nullValue()));


}
 
Example #14
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testParseNull() throws Exception {
    // @formatter: off
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject()
            .startObject(TYPE)
                .startObject("properties")
                    .startObject("array_field")
                        .field("type", ArrayMapper.CONTENT_TYPE)
                        .startObject(ArrayMapper.INNER_TYPE)
                            .field("type", "double")
                        .endObject()
                    .endObject()
                .endObject()
            .endObject()
        .endObject());
    // @formatter: on
    DocumentMapper mapper = mapper(INDEX, mapping);
    BytesReference bytesReference = BytesReference.bytes(XContentFactory.jsonBuilder()
        .startObject()
        .nullField("array_field")
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument parsedDoc = mapper.parse(sourceToParse);
    assertThat(parsedDoc.docs().size(), is(1));
    assertThat(parsedDoc.docs().get(0).getField("array_field"), is(nullValue()));
}
 
Example #15
Source File: EngineTestCase.java    From crate with Apache License 2.0 5 votes vote down vote up
public List<Engine.Operation> generateHistoryOnReplica(int numOps, boolean allowGapInSeqNo, boolean allowDuplicate) throws Exception {
    long seqNo = 0;
    final int maxIdValue = randomInt(numOps * 2);
    final List<Engine.Operation> operations = new ArrayList<>(numOps);
    for (int i = 0; i < numOps; i++) {
        final String id = Integer.toString(randomInt(maxIdValue));
        final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values());
        final long startTime = threadPool.relativeTimeInMillis();
        final int copies = allowDuplicate && rarely() ? between(2, 4) : 1;
        for (int copy = 0; copy < copies; copy++) {
            final ParsedDocument doc = createParsedDoc(id, null);
            switch (opType) {
                case INDEX:
                    operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(),
                                                    i, null, randomFrom(REPLICA, PEER_RECOVERY), startTime, -1, true, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
                    break;
                case DELETE:
                    operations.add(new Engine.Delete("default", doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(),
                                                     i, null, randomFrom(REPLICA, PEER_RECOVERY), startTime, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
                    break;
                case NO_OP:
                    operations.add(new Engine.NoOp(seqNo, primaryTerm.get(),
                                                   randomFrom(REPLICA, PEER_RECOVERY), startTime, "test-" + i));
                    break;
                default:
                    throw new IllegalStateException("Unknown operation type [" + opType + "]");
            }
        }
        seqNo++;
        if (allowGapInSeqNo && rarely()) {
            seqNo++;
        }
    }
    Randomness.shuffle(operations);
    return operations;
}
 
Example #16
Source File: EngineTestCase.java    From crate with Apache License 2.0 5 votes vote down vote up
protected Engine.Index replicaIndexForDoc(ParsedDocument doc,
                                          long version,
                                          long seqNo,
                                          boolean isRetry) {
    return new Engine.Index(
        newUid(doc), doc, seqNo, primaryTerm.get(), version, null,
        Engine.Operation.Origin.REPLICA, System.nanoTime(), Translog.UNSET_AUTO_GENERATED_TIMESTAMP,
        isRetry, SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
}
 
Example #17
Source File: Engine.java    From crate with Apache License 2.0 5 votes vote down vote up
public Index(Term uid, ParsedDocument doc, long seqNo, long primaryTerm, long version, VersionType versionType, Origin origin,
             long startTime, long autoGeneratedIdTimestamp, boolean isRetry, long ifSeqNo, long ifPrimaryTerm) {
    super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
    assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
    assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
    assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >= 0 :
        "ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
    assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) :
        "cas operations are only allowed if origin is primary. get [" + origin + "]";
    this.doc = doc;
    this.isRetry = isRetry;
    this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
    this.ifSeqNo = ifSeqNo;
    this.ifPrimaryTerm = ifPrimaryTerm;
}
 
Example #18
Source File: LangdetectMappingTests.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 5 votes vote down vote up
public void testToFields() throws Exception {
    IndexService indexService = createIndex("some_index", Settings.EMPTY,
            "someType", getMapping("mapping-to-fields.json"));
    DocumentMapper docMapper = indexService.mapperService().documentMapper("someType");
    String sampleText = copyToStringFromClasspath("english.txt");
    BytesReference json = BytesReference.bytes(XContentFactory.jsonBuilder()
            .startObject().field("someField", sampleText).endObject());
    SourceToParse sourceToParse = SourceToParse.source("some_index", "someType", "1", json, XContentType.JSON);
    ParsedDocument doc = docMapper.parse(sourceToParse);
    assertEquals(1, doc.rootDoc().getFields("someField").length);
    assertEquals("en", doc.rootDoc().getFields("someField")[0].stringValue());
    assertEquals(1, doc.rootDoc().getFields("english_field").length);
    assertEquals("This is a very small example of a text", doc.rootDoc().getFields("english_field")[0].stringValue());
}
 
Example #19
Source File: IndexShard.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long version, VersionType versionType, Engine
        .Operation.Origin origin, boolean canHaveDuplicates) {
    long startTime = System.nanoTime();
    ParsedDocument doc = docMapper.getDocumentMapper().parse(source);
    if (docMapper.getMapping() != null) {
        doc.addDynamicMappingsUpdate(docMapper.getMapping());
    }
    return new Engine.Index(docMapper.getDocumentMapper().uidMapper().term(doc.uid().stringValue()), doc, version, versionType,
            origin, startTime, canHaveDuplicates);
}
 
Example #20
Source File: LangdetectMappingTests.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 5 votes vote down vote up
public void testSimpleMapping() throws Exception {
    IndexService indexService = createIndex("some_index", Settings.EMPTY,
            "someType", getMapping("simple-mapping.json"));
    DocumentMapper docMapper = indexService.mapperService().documentMapper("someType");
    String sampleText = copyToStringFromClasspath("english.txt");
    BytesReference json = BytesReference.bytes(XContentFactory.jsonBuilder()
            .startObject().field("someField", sampleText).endObject());
    SourceToParse sourceToParse = SourceToParse.source("some_index", "someType", "1", json, XContentType.JSON);
    ParsedDocument doc = docMapper.parse(sourceToParse);
    assertEquals(1, doc.rootDoc().getFields("someField").length);
    assertEquals("en", doc.rootDoc().getFields("someField")[0].stringValue());
}
 
Example #21
Source File: IndexShard.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
static Engine.Create prepareCreate(DocumentMapperForType docMapper, SourceToParse source, long version, VersionType versionType,
                                   Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) {
    long startTime = System.nanoTime();
    ParsedDocument doc = docMapper.getDocumentMapper().parse(source);
    if (docMapper.getMapping() != null) {
        doc.addDynamicMappingsUpdate(docMapper.getMapping());
    }
    return new Engine.Create(docMapper.getDocumentMapper().uidMapper().term(doc.uid().stringValue()), doc, version, versionType,
            origin, startTime, canHaveDuplicates, autoGeneratedId);
}
 
Example #22
Source File: IndexingSlowLog.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void postIndexing(ParsedDocument doc, long tookInNanos) {
    if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) {
        indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
    } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) {
        indexLogger.info("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
    } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) {
        indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
    } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) {
        indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog));
    }
}
 
Example #23
Source File: Engine.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public Create(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
    this(uid, doc, version, versionType, origin, startTime, true, false);
}
 
Example #24
Source File: Engine.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public Create(Term uid, ParsedDocument doc) {
    super(uid, doc);
    autoGeneratedId = false;
}
 
Example #25
Source File: Engine.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime, boolean canHaveDuplicates) {
    super(uid, doc, version, versionType, origin, startTime, canHaveDuplicates);
}
 
Example #26
Source File: Engine.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
    super(uid, doc, version, versionType, origin, startTime, true);
}
 
Example #27
Source File: Engine.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public Index(Term uid, ParsedDocument doc) {
    super(uid, doc);
}
 
Example #28
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 4 votes vote down vote up
@Test
public void testObjectArrayMappingNewColumn() throws Exception {
    // @formatter: off
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject()
            .startObject(TYPE)
                .startObject("properties")
                    .startObject("array_field")
                        .field("type", ArrayMapper.CONTENT_TYPE)
                        .startObject(ArrayMapper.INNER_TYPE)
                            .field("type", "object")
                            .field("dynamic", true)
                            .startObject("properties")
                                .startObject("s")
                                    .field("type", "keyword")
                                .endObject()
                            .endObject()
                        .endObject()
                    .endObject()
                .endObject()
            .endObject()
        .endObject());
    DocumentMapper mapper = mapper(INDEX, mapping);
    // child object mapper
    assertThat(mapper.objectMappers().get("array_field"), is(instanceOf(ObjectArrayMapper.class)));
    BytesReference bytesReference = BytesReference.bytes(XContentFactory.jsonBuilder()
        .startObject()
        .startArray("array_field")
        .startObject()
        .field("s", "a")
        .field("new", true)
        .endObject()
        .endArray()
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument doc = mapper.parse(sourceToParse);

    Mapping mappingUpdate = doc.dynamicMappingsUpdate();
    assertThat(mappingUpdate, notNullValue());
    mapper = mapper.merge(mappingUpdate, true);
    assertThat(doc.docs().size(), is(1));
    String[] values = doc.docs().get(0).getValues("array_field.new");
    assertThat(values, arrayContainingInAnyOrder(is("T"), is("1")));
    String mappingSourceString = new CompressedXContent(mapper, XContentType.JSON, ToXContent.EMPTY_PARAMS).string();
    assertThat(
        mappingSourceString,
        is("{\"default\":{" +
           "\"properties\":{" +
           "\"array_field\":{" +
           "\"type\":\"array\"," +
           "\"inner\":{" +
           "\"dynamic\":\"true\"," +
           "\"properties\":{" +
           "\"new\":{\"type\":\"boolean\"}," +
           "\"s\":{" +
           "\"type\":\"keyword\"" +
           "}" +
           "}" +
           "}" +
           "}" +
           "}}}"));
}
 
Example #29
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 4 votes vote down vote up
@Test
public void testObjectArrayMapping() throws Exception {
    // @formatter: off
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject()
            .startObject(TYPE)
                .startObject("properties")
                    .startObject("array_field")
                        .field("type", ArrayMapper.CONTENT_TYPE)
                        .startObject(ArrayMapper.INNER_TYPE)
                            .field("type", "object")
                            .field("dynamic", true)
                            .startObject("properties")
                                .startObject("s")
                                    .field("type", "keyword")
                                .endObject()
                            .endObject()
                        .endObject()
                    .endObject()
                .endObject()
            .endObject()
        .endObject());
    DocumentMapper mapper = mapper(INDEX, mapping);
    // child object mapper
    assertThat(mapper.objectMappers().get("array_field"), is(instanceOf(ObjectArrayMapper.class)));
    BytesReference bytesReference = BytesReference.bytes(XContentFactory.jsonBuilder()
        .startObject()
        .startArray("array_field")
        .startObject()
        .field("s", "a")
        .endObject()
        .startObject()
        .field("s", "b")
        .endObject()
        .startObject()
        .field("s", "c")
        .endObject()
        .endArray()
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument doc = mapper.parse(sourceToParse);
    // @formatter: off
    assertThat(doc.dynamicMappingsUpdate(), nullValue());
    assertThat(doc.docs().size(), is(1));
    assertThat(
        uniqueValuesFromFields(doc.docs().get(0), "array_field.s"),
        containsInAnyOrder("a", "b", "c"));
    assertThat(mapper.mappers().getMapper("array_field.s"), instanceOf(KeywordFieldMapper.class));
    assertThat(
        mapper.mappingSource().string(),
        is("{\"default\":{" +
           "\"properties\":{" +
           "\"array_field\":{" +
           "\"type\":\"array\"," +
           "\"inner\":{" +
           "\"dynamic\":\"true\"," +
           "\"properties\":{" +
           "\"s\":{" +
           "\"type\":\"keyword\"" +
           "}" +
           "}" +
           "}" +
           "}" +
           "}}}"));
}
 
Example #30
Source File: ArrayMapperTest.java    From crate with Apache License 2.0 4 votes vote down vote up
@Test
public void testSimpleArrayMapping() throws Exception {
    // @formatter:off
    String mapping = Strings.toString(XContentFactory.jsonBuilder()
        .startObject()
            .startObject(TYPE)
                .startObject("properties")
                    .startObject("array_field")
                        .field("type", ArrayMapper.CONTENT_TYPE)
                        .startObject(ArrayMapper.INNER_TYPE)
                            .field("type", "keyword")
                        .endObject()
                    .endObject()
                .endObject()
            .endObject()
        .endObject());
    // @formatter:on
    DocumentMapper mapper = mapper(INDEX, mapping);

    assertThat(mapper.mappers().getMapper("array_field"), is(instanceOf(ArrayMapper.class)));

    BytesReference bytesReference = BytesReference.bytes(JsonXContent.contentBuilder()
        .startObject()
        .array("array_field", "a", "b", "c")
        .endObject());
    SourceToParse sourceToParse = new SourceToParse(INDEX, "abc", bytesReference, XContentType.JSON);
    ParsedDocument doc = mapper.parse(sourceToParse);
    assertThat(doc.dynamicMappingsUpdate() == null, is(true));
    assertThat(doc.docs().size(), is(1));

    ParseContext.Document fields = doc.docs().get(0);
    Set<String> values = uniqueValuesFromFields(fields, "array_field");
    assertThat(values, Matchers.containsInAnyOrder("a", "b", "c"));
    assertThat(
        mapper.mappingSource().string(),
        is("{\"default\":{" +
           "\"properties\":{" +
           "\"array_field\":{" +
           "\"type\":\"array\"," +
           "\"inner\":{" +
           "\"type\":\"keyword\"" +
           "}" +
           "}" +
           "}" +
           "}}"));
}