Browse Source

Merge pull request #17613 from jimferenczi/all_field

Simplify AllEntries, AllField and AllFieldMapper
Jim Ferenczi 9 years ago
parent
commit
2713a08fb3

+ 0 - 3
buildSrc/src/main/resources/checkstyle_suppressions.xml

@@ -343,7 +343,6 @@
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
-  <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]ElasticsearchDirectoryReader.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]FilterableTermsEnum.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]index[/\\]FreqTermsEnum.java" checks="LineLength" />
@@ -485,7 +484,6 @@
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]geo[/\\]GeoPointFieldMapper.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]geo[/\\]GeoPointFieldMapperLegacy.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]geo[/\\]GeoShapeFieldMapper.java" checks="LineLength" />
-  <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]AllFieldMapper.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]FieldNamesFieldMapper.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]IdFieldMapper.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]internal[/\\]IndexFieldMapper.java" checks="LineLength" />
@@ -1068,7 +1066,6 @@
   <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DynamicMappingTests.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldTypeTestCase.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperServiceTests.java" checks="LineLength" />
-  <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]all[/\\]SimpleAllMapperTests.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]binary[/\\]BinaryMappingTests.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]boost[/\\]CustomBoostMappingTests.java" checks="LineLength" />
   <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]boost[/\\]FieldLevelBoostTests.java" checks="LineLength" />

+ 7 - 178
core/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java

@@ -19,39 +19,24 @@
 
 package org.elasticsearch.common.lucene.all;
 
-import org.elasticsearch.common.io.FastCharArrayWriter;
-import org.elasticsearch.common.io.FastStringReader;
-
-import java.io.IOException;
-import java.io.Reader;
 import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
-import java.util.Set;
 
 /**
  *
  */
-public class AllEntries extends Reader {
-
+public class AllEntries {
     public static class Entry {
         private final String name;
-        private final FastStringReader reader;
-        private final int startOffset;
+        private final String value;
         private final float boost;
 
-        public Entry(String name, FastStringReader reader, int startOffset, float boost) {
+        public Entry(String name, String value, float boost) {
             this.name = name;
-            this.reader = reader;
-            this.startOffset = startOffset;
+            this.value = value;
             this.boost = boost;
         }
 
-        public int startOffset() {
-            return startOffset;
-        }
-
         public String name() {
             return this.name;
         }
@@ -60,179 +45,23 @@ public class AllEntries extends Reader {
             return this.boost;
         }
 
-        public FastStringReader reader() {
-            return this.reader;
+        public String value() {
+            return this.value;
         }
     }
 
     private final List<Entry> entries = new ArrayList<>();
 
-    private Entry current;
-
-    private Iterator<Entry> it;
-
-    private boolean itsSeparatorTime = false;
-
-    private boolean customBoost = false;
-
     public void addText(String name, String text, float boost) {
-        if (boost != 1.0f) {
-            customBoost = true;
-        }
-        final int lastStartOffset;
-        if (entries.isEmpty()) {
-            lastStartOffset = -1;
-        } else {
-            final Entry last = entries.get(entries.size() - 1);
-            lastStartOffset = last.startOffset() + last.reader().length();
-        }
-        final int startOffset = lastStartOffset + 1; // +1 because we insert a space between tokens
-        Entry entry = new Entry(name, new FastStringReader(text), startOffset, boost);
+        Entry entry = new Entry(name, text, boost);
         entries.add(entry);
     }
 
-    public boolean customBoost() {
-        return customBoost;
-    }
-
     public void clear() {
         this.entries.clear();
-        this.current = null;
-        this.it = null;
-        itsSeparatorTime = false;
-    }
-
-    @Override
-    public void reset() {
-        try {
-            for (Entry entry : entries) {
-                entry.reader().reset();
-            }
-        } catch (IOException e) {
-            throw new IllegalStateException("should not happen");
-        }
-        it = entries.iterator();
-        if (it.hasNext()) {
-            current = it.next();
-            itsSeparatorTime = true;
-        }
-    }
-
-
-    public String buildText() {
-        reset();
-        FastCharArrayWriter writer = new FastCharArrayWriter();
-        for (Entry entry : entries) {
-            writer.append(entry.reader());
-            writer.append(' ');
-        }
-        reset();
-        return writer.toString();
     }
 
     public List<Entry> entries() {
         return this.entries;
     }
-
-    public Set<String> fields() {
-        Set<String> fields = new HashSet<>();
-        for (Entry entry : entries) {
-            fields.add(entry.name());
-        }
-        return fields;
-    }
-
-    // compute the boost for a token with the given startOffset
-    public float boost(int startOffset) {
-        if (!entries.isEmpty()) {
-            int lo = 0, hi = entries.size() - 1;
-            while (lo <= hi) {
-                final int mid = (lo + hi) >>> 1;
-                final int midOffset = entries.get(mid).startOffset();
-                if (startOffset < midOffset) {
-                    hi = mid - 1;
-                } else {
-                    lo = mid + 1;
-                }
-            }
-            final int index = Math.max(0, hi); // protection against broken token streams
-            assert entries.get(index).startOffset() <= startOffset;
-            assert index == entries.size() - 1 || entries.get(index + 1).startOffset() > startOffset;
-            return entries.get(index).boost();
-        }
-        return 1.0f;
-    }
-
-    @Override
-    public int read(char[] cbuf, int off, int len) throws IOException {
-        if (current == null) {
-            return -1;
-        }
-        if (customBoost) {
-            int result = current.reader().read(cbuf, off, len);
-            if (result == -1) {
-                if (itsSeparatorTime) {
-                    itsSeparatorTime = false;
-                    cbuf[off] = ' ';
-                    return 1;
-                }
-                itsSeparatorTime = true;
-                // close(); No need to close, we work on in mem readers
-                if (it.hasNext()) {
-                    current = it.next();
-                } else {
-                    current = null;
-                }
-                return read(cbuf, off, len);
-            }
-            return result;
-        } else {
-            int read = 0;
-            while (len > 0) {
-                int result = current.reader().read(cbuf, off, len);
-                if (result == -1) {
-                    if (it.hasNext()) {
-                        current = it.next();
-                    } else {
-                        current = null;
-                        if (read == 0) {
-                            return -1;
-                        }
-                        return read;
-                    }
-                    cbuf[off++] = ' ';
-                    read++;
-                    len--;
-                } else {
-                    read += result;
-                    off += result;
-                    len -= result;
-                }
-            }
-            return read;
-        }
-    }
-
-    @Override
-    public void close() {
-        if (current != null) {
-            // no need to close, these are readers on strings
-            current = null;
-        }
-    }
-
-
-    @Override
-    public boolean ready() throws IOException {
-        return (current != null) && current.reader().ready();
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder sb = new StringBuilder();
-        for (Entry entry : entries) {
-            sb.append(entry.name()).append(',');
-        }
-        return sb.toString();
-    }
 }

+ 13 - 47
core/src/main/java/org/elasticsearch/common/lucene/all/AllField.java

@@ -24,60 +24,26 @@ import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.IndexOptions;
-import org.elasticsearch.ElasticsearchException;
 
-import java.io.IOException;
-import java.io.Reader;
-
-/**
- *
- */
 public class AllField extends Field {
+    private final float boost;
 
-    private final AllEntries allEntries;
-
-    private final Analyzer analyzer;
-
-    public AllField(String name, AllEntries allEntries, Analyzer analyzer, FieldType fieldType) {
-        super(name, fieldType);
-        this.allEntries = allEntries;
-        this.analyzer = analyzer;
-    }
-
-    @Override
-    public String stringValue() {
-        if (fieldType().stored()) {
-            return allEntries.buildText();
-        }
-        return null;
-    }
-
-    @Override
-    public Reader readerValue() {
-        return null;
-    }
-
-    /** Returns the {@link AllEntries} containing the original text fields for the document. */
-    public AllEntries getAllEntries() {
-        return allEntries;
+    public AllField(String name, String value, float boost, FieldType fieldType) {
+        super(name, value, fieldType);
+        this.boost = boost;
     }
 
     @Override
     public TokenStream tokenStream(Analyzer analyzer, TokenStream previous) {
-        try {
-            allEntries.reset(); // reset the all entries, just in case it was read already
-            if (allEntries.customBoost() && fieldType().indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
-                // TODO: we should be able to reuse "previous" if its instanceof AllTokenStream?
-                // but we need to be careful this optimization is safe (and tested)...
-                
-                // AllTokenStream maps boost to 4-byte payloads, so we only need to use it any field had non-default (!= 1.0f) boost and if
-                // positions are indexed:
-                return AllTokenStream.allTokenStream(name, allEntries, analyzer);
-            } else {
-                return analyzer.tokenStream(name, allEntries);
-            }
-        } catch (IOException e) {
-            throw new ElasticsearchException("Failed to create token stream");
+        TokenStream ts = analyzer.tokenStream(name(), stringValue());
+        if (boost != 1.0f && fieldType().indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) {
+            // TODO: we should be able to reuse "previous" if its instanceof AllTokenStream?
+            // but we need to be careful this optimization is safe (and tested)...
+
+            // AllTokenStream maps boost to 4-byte payloads, so we only need to use it any field had non-default (!= 1.0f) boost and if
+            // positions are indexed:
+            return new AllTokenStream(ts, boost);
         }
+        return ts;
     }
 }

+ 4 - 3
core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java

@@ -43,7 +43,6 @@ import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.search.similarities.Similarity.SimWeight;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SmallFloat;
-import org.apache.lucene.util.ToStringUtils;
 
 import java.io.IOException;
 import java.util.Set;
@@ -129,7 +128,8 @@ public final class AllTermQuery extends Query {
                         SimScorer docScorer = similarity.simScorer(stats, context);
                         Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq);
                         Explanation termScoreExplanation = docScorer.explain(doc, freqExplanation);
-                        Explanation payloadBoostExplanation = Explanation.match(scorer.payloadBoost(), "payloadBoost=" + scorer.payloadBoost());
+                        Explanation payloadBoostExplanation =
+                            Explanation.match(scorer.payloadBoost(), "payloadBoost=" + scorer.payloadBoost());
                         return Explanation.match(
                                 score,
                                 "weight(" + getQuery() + " in " + doc + ") ["
@@ -193,7 +193,8 @@ public final class AllTermQuery extends Query {
                         // TODO: for bw compat only, remove this in 6.0
                         boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
                     } else {
-                        throw new IllegalStateException("Payloads are expected to have a length of 1 or 4 but got: " + payload);
+                        throw new IllegalStateException("Payloads are expected to have a length of 1 or 4 but got: "
+                            + payload);
                     }
                     payloadBoost += boost;
                 }

+ 6 - 25
core/src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java

@@ -33,27 +33,19 @@ import java.io.IOException;
  *
  */
 public final class AllTokenStream extends TokenFilter {
-
-    public static TokenStream allTokenStream(String allFieldName, AllEntries allEntries, Analyzer analyzer) throws IOException {
-        return new AllTokenStream(analyzer.tokenStream(allFieldName, allEntries), allEntries);
+    public static TokenStream allTokenStream(String allFieldName, String value, float boost, Analyzer analyzer) throws IOException {
+        return new AllTokenStream(analyzer.tokenStream(allFieldName, value), boost);
     }
-    
-    private final BytesRef payloadSpare = new BytesRef(new byte[1]);
-
-    private final AllEntries allEntries;
 
+    private final BytesRef payloadSpare = new BytesRef(new byte[1]);
     private final OffsetAttribute offsetAttribute;
     private final PayloadAttribute payloadAttribute;
 
-    AllTokenStream(TokenStream input, AllEntries allEntries) {
+    AllTokenStream(TokenStream input, float boost) {
         super(input);
-        this.allEntries = allEntries;
         offsetAttribute = addAttribute(OffsetAttribute.class);
         payloadAttribute = addAttribute(PayloadAttribute.class);
-    }
-
-    public AllEntries allEntries() {
-        return allEntries;
+        payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
     }
 
     @Override
@@ -61,18 +53,7 @@ public final class AllTokenStream extends TokenFilter {
         if (!input.incrementToken()) {
             return false;
         }
-        final float boost = allEntries.boost(offsetAttribute.startOffset());
-        if (boost != 1.0f) {
-            payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
-            payloadAttribute.setPayload(payloadSpare);
-        } else {
-            payloadAttribute.setPayload(null);
-        }
+        payloadAttribute.setPayload(payloadSpare);
         return true;
     }
-
-    @Override
-    public String toString() {
-        return allEntries.toString();
-    }
 }

+ 1 - 0
core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java

@@ -32,6 +32,7 @@ import org.apache.lucene.util.BytesRef;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllField;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentParser;
 import org.elasticsearch.index.analysis.AnalysisService;

+ 34 - 28
core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java

@@ -19,18 +19,18 @@
 
 package org.elasticsearch.index.mapper.internal;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.all.AllEntries;
 import org.elasticsearch.common.lucene.all.AllField;
 import org.elasticsearch.common.lucene.all.AllTermQuery;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
@@ -83,6 +83,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
         public static final String NAME = AllFieldMapper.NAME;
         public static final String INDEX_NAME = AllFieldMapper.NAME;
         public static final EnabledAttributeMapper ENABLED = EnabledAttributeMapper.UNSET_ENABLED;
+        public static final int POSITION_INCREMENT_GAP = 100;
 
         public static final MappedFieldType FIELD_TYPE = new AllFieldType();
 
@@ -115,6 +116,13 @@ public class AllFieldMapper extends MetadataFieldMapper {
             // TODO: this should be an exception! it doesnt make sense to not index this field
             if (fieldType.indexOptions() == IndexOptions.NONE) {
                 fieldType.setIndexOptions(Defaults.FIELD_TYPE.indexOptions());
+            } else {
+                fieldType.setIndexAnalyzer(new NamedAnalyzer(fieldType.indexAnalyzer(),
+                    Defaults.POSITION_INCREMENT_GAP));
+                fieldType.setSearchAnalyzer(new NamedAnalyzer(fieldType.searchAnalyzer(),
+                    Defaults.POSITION_INCREMENT_GAP));
+                fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(),
+                    Defaults.POSITION_INCREMENT_GAP));
             }
             fieldType.setTokenized(true);
 
@@ -124,15 +132,20 @@ public class AllFieldMapper extends MetadataFieldMapper {
 
     public static class TypeParser implements MetadataFieldMapper.TypeParser {
         @Override
-        public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+        public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node,
+                                                 ParserContext parserContext) throws MapperParsingException {
             Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
-            
+            builder.fieldType().setIndexAnalyzer(parserContext.analysisService().defaultIndexAnalyzer());
+            builder.fieldType().setSearchAnalyzer(parserContext.analysisService().defaultSearchAnalyzer());
+            builder.fieldType().setSearchQuoteAnalyzer(parserContext.analysisService().defaultSearchQuoteAnalyzer());
+
             // parseField below will happily parse the doc_values setting, but it is then never passed to
             // the AllFieldMapper ctor in the builder since it is not valid. Here we validate
             // the doc values settings (old and new) are rejected
             Object docValues = node.get("doc_values");
             if (docValues != null && lenientNodeBooleanValue(docValues)) {
-                throw new MapperParsingException("Field [" + name + "] is always tokenized and cannot have doc values");
+                throw new MapperParsingException("Field [" + name +
+                    "] is always tokenized and cannot have doc values");
             }
             // convoluted way of specifying doc values
             Object fielddata = node.get("fielddata");
@@ -140,17 +153,19 @@ public class AllFieldMapper extends MetadataFieldMapper {
                 Map<String, Object> fielddataMap = nodeMapValue(fielddata, "fielddata");
                 Object format = fielddataMap.get("format");
                 if ("doc_values".equals(format)) {
-                    throw new MapperParsingException("Field [" + name + "] is always tokenized and cannot have doc values");
+                    throw new MapperParsingException("Field [" + name +
+                        "] is always tokenized and cannot have doc values");
                 }
             }
-            
+
             parseTextField(builder, builder.name, node, parserContext);
             for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
                 Map.Entry<String, Object> entry = iterator.next();
                 String fieldName = Strings.toUnderscoreCase(entry.getKey());
                 Object fieldNode = entry.getValue();
                 if (fieldName.equals("enabled")) {
-                    builder.enabled(lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED);
+                    builder.enabled(lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED :
+                        EnabledAttributeMapper.DISABLED);
                     iterator.remove();
                 }
             }
@@ -237,22 +252,9 @@ public class AllFieldMapper extends MetadataFieldMapper {
         if (!enabledState.enabled) {
             return;
         }
-        // reset the entries
-        context.allEntries().reset();
-        Analyzer analyzer = findAnalyzer(context);
-        fields.add(new AllField(fieldType().name(), context.allEntries(), analyzer, fieldType()));
-    }
-
-    private Analyzer findAnalyzer(ParseContext context) {
-        Analyzer analyzer = fieldType().indexAnalyzer();
-        if (analyzer == null) {
-            analyzer = context.docMapper().mappers().indexAnalyzer();
-            if (analyzer == null) {
-                // This should not happen, should we log warn it?
-                analyzer = Lucene.STANDARD_ANALYZER;
-            }
+        for (AllEntries.Entry entry : context.allEntries().entries()) {
+            fields.add(new AllField(fieldType().name(), entry.value(), entry.boost(), fieldType()));
         }
-        return analyzer;
     }
 
     @Override
@@ -295,16 +297,18 @@ public class AllFieldMapper extends MetadataFieldMapper {
         if (includeDefaults || fieldType().storeTermVectorOffsets() != Defaults.FIELD_TYPE.storeTermVectorOffsets()) {
             builder.field("store_term_vector_offsets", fieldType().storeTermVectorOffsets());
         }
-        if (includeDefaults || fieldType().storeTermVectorPositions() != Defaults.FIELD_TYPE.storeTermVectorPositions()) {
+        if (includeDefaults ||
+            fieldType().storeTermVectorPositions() != Defaults.FIELD_TYPE.storeTermVectorPositions()) {
             builder.field("store_term_vector_positions", fieldType().storeTermVectorPositions());
         }
-        if (includeDefaults || fieldType().storeTermVectorPayloads() != Defaults.FIELD_TYPE.storeTermVectorPayloads()) {
+        if (includeDefaults ||
+            fieldType().storeTermVectorPayloads() != Defaults.FIELD_TYPE.storeTermVectorPayloads()) {
             builder.field("store_term_vector_payloads", fieldType().storeTermVectorPayloads());
         }
         if (includeDefaults || fieldType().omitNorms() != Defaults.FIELD_TYPE.omitNorms()) {
             builder.field("norms", !fieldType().omitNorms());
         }
-        
+
         doXContentAnalyzers(builder, includeDefaults);
 
         if (fieldType().similarity() != null) {
@@ -316,8 +320,10 @@ public class AllFieldMapper extends MetadataFieldMapper {
 
     @Override
     protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
-        if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
-            throw new IllegalArgumentException("mapper [" + fieldType().name() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
+        if (((AllFieldMapper)mergeWith).enabled() != this.enabled() &&
+            ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
+            throw new IllegalArgumentException("mapper [" + fieldType().name() +
+                "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
         }
         super.doMerge(mergeWith, updateAllTypes);
     }

+ 37 - 133
core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java

@@ -19,17 +19,13 @@
 
 package org.elasticsearch.common.lucene.all;
 
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.payloads.PayloadHelper;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
@@ -41,8 +37,6 @@ import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.SmallFloat;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.test.ESTestCase;
 
@@ -54,73 +48,12 @@ import static org.hamcrest.Matchers.equalTo;
  *
  */
 public class SimpleAllTests extends ESTestCase {
-    public void testBoostOnEagerTokenizer() throws Exception {
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field1", "all", 2.0f);
-        allEntries.addText("field2", "your", 1.0f);
-        allEntries.addText("field1", "boosts", 0.5f);
-        allEntries.reset();
-        // whitespace analyzer's tokenizer reads characters eagerly on the contrary to the standard tokenizer
-        final TokenStream ts = AllTokenStream.allTokenStream("any", allEntries, new WhitespaceAnalyzer());
-        final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
-        final PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
-        ts.reset();
-        for (int i = 0; i < 3; ++i) {
-            assertTrue(ts.incrementToken());
-            final String term;
-            final float boost;
-            switch (i) {
-            case 0:
-                term = "all";
-                boost = 2;
-                break;
-            case 1:
-                term = "your";
-                boost = 1;
-                break;
-            case 2:
-                term = "boosts";
-                boost = 0.5f;
-                break;
-            default:
-                throw new AssertionError();
-            }
-            assertEquals(term, termAtt.toString());
-            final BytesRef payload = payloadAtt.getPayload();
-            if (payload == null || payload.length == 0) {
-                assertEquals(boost, 1f, 0.001f);
-            } else {
-                assertEquals(1, payload.length);
-                final float b = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
-                assertEquals(boost, b, 0.001f);
-            }
-        }
-        assertFalse(ts.incrementToken());
-    }
-
-    public void testAllEntriesRead() throws Exception {
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field1", "something", 1.0f);
-        allEntries.addText("field2", "else", 1.0f);
-
-        for (int i = 1; i < 30; i++) {
-            allEntries.reset();
-            char[] data = new char[i];
-            String value = slurpToString(allEntries, data);
-            assertThat("failed for " + i, value, equalTo("something else"));
-        }
-    }
-
-    private String slurpToString(AllEntries allEntries, char[] data) throws IOException {
-        StringBuilder sb = new StringBuilder();
-        while (true) {
-            int read = allEntries.read(data, 0, data.length);
-            if (read == -1) {
-                break;
-            }
-            sb.append(data, 0, read);
-        }
-        return sb.toString();
+    private FieldType getAllFieldType() {
+        FieldType ft = new FieldType();
+        ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+        ft.setTokenized(true);
+        ft.freeze();
+        return ft;
     }
 
     private void assertExplanationScore(IndexSearcher searcher, Query query, ScoreDoc scoreDoc) throws IOException {
@@ -129,27 +62,21 @@ public class SimpleAllTests extends ESTestCase {
     }
 
     public void testSimpleAllNoBoost() throws Exception {
+        FieldType allFt = getAllFieldType();
         Directory dir = new RAMDirectory();
         IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
 
         Document doc = new Document();
         doc.add(new Field("_id", "1", StoredField.TYPE));
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field1", "something", 1.0f);
-        allEntries.addText("field2", "else", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "something", 1.0f, allFt));
+        doc.add(new AllField("_all", "else", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
         doc = new Document();
         doc.add(new Field("_id", "2", StoredField.TYPE));
-        allEntries = new AllEntries();
-        allEntries.addText("field1", "else", 1.0f);
-        allEntries.addText("field2", "something", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
-
+        doc.add(new AllField("_all", "else", 1.0f, allFt));
+        doc.add(new AllField("_all", "something", 1.0f, allFt));
         indexWriter.addDocument(doc);
 
         IndexReader reader = DirectoryReader.open(indexWriter);
@@ -178,23 +105,18 @@ public class SimpleAllTests extends ESTestCase {
         Directory dir = new RAMDirectory();
         IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
 
+        FieldType allFt = getAllFieldType();
         Document doc = new Document();
         doc.add(new Field("_id", "1", StoredField.TYPE));
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field1", "something", 1.0f);
-        allEntries.addText("field2", "else", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "something", 1.0f, allFt));
+        doc.add(new AllField("_all", "else", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
         doc = new Document();
         doc.add(new Field("_id", "2", StoredField.TYPE));
-        allEntries = new AllEntries();
-        allEntries.addText("field1", "else", 2.0f);
-        allEntries.addText("field2", "something", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "else", 2.0f, allFt));
+        doc.add(new AllField("_all", "something", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
@@ -221,27 +143,21 @@ public class SimpleAllTests extends ESTestCase {
         indexWriter.close();
     }
 
-    public void testTermMissingFromOneSegment() throws Exception {
+   public void testTermMissingFromOneSegment() throws Exception {
         Directory dir = new RAMDirectory();
         IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
 
+        FieldType allFt = getAllFieldType();
         Document doc = new Document();
         doc.add(new Field("_id", "1", StoredField.TYPE));
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field", "something", 2.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "something", 2.0f, allFt));
 
         indexWriter.addDocument(doc);
         indexWriter.commit();
 
         doc = new Document();
         doc.add(new Field("_id", "2", StoredField.TYPE));
-        allEntries = new AllEntries();
-        allEntries.addText("field", "else", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
-
+        doc.add(new AllField("_all", "else", 1.0f, allFt));
         indexWriter.addDocument(doc);
 
         IndexReader reader = DirectoryReader.open(indexWriter);
@@ -260,23 +176,18 @@ public class SimpleAllTests extends ESTestCase {
         Directory dir = new RAMDirectory();
         IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
 
+        FieldType allFt = getAllFieldType();
         Document doc = new Document();
         doc.add(new Field("_id", "1", StoredField.TYPE));
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field1", "something moo", 1.0f);
-        allEntries.addText("field2", "else koo", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "something moo", 1.0f, allFt));
+        doc.add(new AllField("_all", "else koo", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
         doc = new Document();
         doc.add(new Field("_id", "2", StoredField.TYPE));
-        allEntries = new AllEntries();
-        allEntries.addText("field1", "else koo", 1.0f);
-        allEntries.addText("field2", "something moo", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "else koo", 1.0f, allFt));
+        doc.add(new AllField("_all", "something moo", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
@@ -310,23 +221,18 @@ public class SimpleAllTests extends ESTestCase {
         Directory dir = new RAMDirectory();
         IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
 
+        FieldType allFt = getAllFieldType();
         Document doc = new Document();
         doc.add(new Field("_id", "1", StoredField.TYPE));
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field1", "something moo", 1.0f);
-        allEntries.addText("field2", "else koo", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "something moo", 1.0f, allFt));
+        doc.add(new AllField("_all", "else koo", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
         doc = new Document();
         doc.add(new Field("_id", "2", StoredField.TYPE));
-        allEntries = new AllEntries();
-        allEntries.addText("field1", "else koo", 2.0f);
-        allEntries.addText("field2", "something moo", 1.0f);
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+        doc.add(new AllField("_all", "else koo", 2.0f, allFt));
+        doc.add(new AllField("_all", "something moo", 1.0f, allFt));
 
         indexWriter.addDocument(doc);
 
@@ -356,16 +262,14 @@ public class SimpleAllTests extends ESTestCase {
         indexWriter.close();
     }
 
-    public void testNoTokensWithKeywordAnalyzer() throws Exception {
+    public void testNoTokens() throws Exception {
         Directory dir = new RAMDirectory();
         IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.KEYWORD_ANALYZER));
 
+        FieldType allFt = getAllFieldType();
         Document doc = new Document();
         doc.add(new Field("_id", "1", StoredField.TYPE));
-        AllEntries allEntries = new AllEntries();
-        allEntries.reset();
-        doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.KEYWORD_ANALYZER)));
-
+        doc.add(new AllField("_all", "", 2.0f, allFt));
         indexWriter.addDocument(doc);
 
         IndexReader reader = DirectoryReader.open(indexWriter);

+ 1 - 1
core/src/test/java/org/elasticsearch/get/GetActionIT.java

@@ -729,7 +729,7 @@ public class GetActionIT extends ESIntegTestCase {
 
         GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("_all").get();
         assertNotNull(getResponse.getField("_all").getValue());
-        assertThat(getResponse.getField("_all").getValue().toString(), equalTo("some text" + " "));
+        assertThat(getResponse.getField("_all").getValue().toString(), equalTo("some text"));
     }
 
     public void testUngeneratedFieldsThatAreNeverStored() throws IOException {

+ 1 - 2
core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java

@@ -75,9 +75,8 @@ public class CompoundAnalysisTests extends ESTestCase {
 
         AllEntries allEntries = new AllEntries();
         allEntries.addText("field1", text, 1.0f);
-        allEntries.reset();
 
-        TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+        TokenStream stream = AllTokenStream.allTokenStream("_all", text, 1.0f, analyzer);
         stream.reset();
         CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
 

+ 1 - 6
core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java

@@ -75,18 +75,13 @@ public class SynonymsAnalysisTests extends ESTestCase {
         match("synonymAnalyzerWordnet", "abstain", "abstain refrain desist");
         match("synonymAnalyzerWordnet_file", "abstain", "abstain refrain desist");
         match("synonymAnalyzerWithsettings", "kimchy", "sha hay");
-
     }
 
     private void match(String analyzerName, String source, String target) throws IOException {
 
         Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
 
-        AllEntries allEntries = new AllEntries();
-        allEntries.addText("field", source, 1.0f);
-        allEntries.reset();
-
-        TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+        TokenStream stream = AllTokenStream.allTokenStream("_all", source, 1.0f, analyzer);
         stream.reset();
         CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
 

+ 108 - 0
core/src/test/java/org/elasticsearch/index/mapper/all/AllFieldMapperPositionIncrementGapTests.java

@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.all;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.ESSingleNodeTestCase;
+import org.elasticsearch.test.InternalSettingsPlugin;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+/**
+ * Tests that position_increment_gap is read from the mapper and applies as
+ * expected in queries.
+ */
+public class AllFieldMapperPositionIncrementGapTests extends ESSingleNodeTestCase {
+
+    @Override
+    protected Collection<Class<? extends Plugin>> getPlugins() {
+        return pluginList(InternalSettingsPlugin.class);
+    }
+
+    /**
+     * The default position_increment_gap should be large enough that most
+     * "sensible" queries phrase slops won't match across values.
+     */
+    public void testDefault() throws IOException {
+        assertGapIsOneHundred(client(), "test", "test");
+    }
+
+    /**
+     * Asserts that the post-2.0 default is being applied.
+     */
+    public static void assertGapIsOneHundred(Client client, String indexName, String type) throws IOException {
+        testGap(client, indexName, type, 100);
+
+        // No match across gap using default slop with default positionIncrementGap
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "one two")).get(), 0);
+
+        // Nor with small-ish values
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "one two").slop(5)).get(), 0);
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "one two").slop(50)).get(), 0);
+
+        // But huge-ish values still match
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "one two").slop(500)).get(), 1);
+    }
+
+    /**
+     * Asserts that the pre-2.0 default has been applied or explicitly
+     * configured.
+     */
+    public static void assertGapIsZero(Client client, String indexName, String type) throws IOException {
+        testGap(client, indexName, type, 0);
+        /*
+         * Phrases match across different values using default slop with pre-2.0 default
+         * position_increment_gap.
+         */
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("string", "one two")).get(), 1);
+    }
+
+    private static void testGap(Client client, String indexName,
+                                String type, int positionIncrementGap) throws IOException {
+        client.prepareIndex(indexName, type, "position_gap_test")
+            .setSource("string1", "one", "string2", "two three").setRefresh(true).get();
+
+        // Baseline - phrase query finds matches in the same field value
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "two three")).get(), 1);
+
+        if (positionIncrementGap > 0) {
+            // No match across gaps when slop < position gap
+            assertHitCount(client.prepareSearch(indexName)
+                .setQuery(new MatchPhraseQueryBuilder("_all", "one two").slop(positionIncrementGap - 1)).get(), 0);
+        }
+
+        // Match across gaps when slop >= position gap
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "one two").slop(positionIncrementGap)).get(), 1);
+        assertHitCount(client.prepareSearch(indexName)
+            .setQuery(new MatchPhraseQueryBuilder("_all", "one two").slop(positionIncrementGap+1)).get(), 1);
+    }
+}

+ 154 - 121
core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java

@@ -19,20 +19,16 @@
 
 package org.elasticsearch.index.mapper.all;
 
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.TermQuery;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.common.bytes.BytesArray;
 import org.elasticsearch.common.collect.Tuple;
 import org.elasticsearch.common.compress.CompressedXContent;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.lucene.all.AllEntries;
-import org.elasticsearch.common.lucene.all.AllField;
 import org.elasticsearch.common.lucene.all.AllTermQuery;
 import org.elasticsearch.common.lucene.all.AllTokenStream;
-import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.elasticsearch.common.xcontent.XContentFactory;
@@ -62,10 +58,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
 import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
 import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
 import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.empty;
 import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.hasItem;
-import static org.hamcrest.Matchers.hasSize;
 import static org.hamcrest.Matchers.nullValue;
 
 public class SimpleAllMapperTests extends ESSingleNodeTestCase {
@@ -77,93 +70,119 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
 
     public void testSimpleAllMappers() throws Exception {
         String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser()
+            .parse("person", new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        // One field is boosted so we should see AllTokenStream used:
-        assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.instanceOf(AllTokenStream.class));
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().size(), equalTo(3));
-        assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        String[] expected = new String[] {"banon", "last location", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(true));
+            if (i == 0) {
+                // The field "name.last" is boosted so we should see AllTokenStream used:
+                assertThat(fields[i].tokenStream(docMapper.mappers().indexAnalyzer(), null),
+                    Matchers.instanceOf(AllTokenStream.class));
+            } else {
+                assertThat(fields[i].tokenStream(docMapper.mappers().indexAnalyzer(), null),
+                    Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+            }
+        }
         AllFieldMapper mapper = docMapper.allFieldMapper();
-        assertThat(field.fieldType().omitNorms(), equalTo(true));
-        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")),
+            Matchers.instanceOf(AllTermQuery.class));
     }
 
     public void testAllMappersNoBoost() throws Exception {
         String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json");
         IndexService index = createIndex("test");
-        DocumentMapper docMapper = index.mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        DocumentMapper docMapper = index.mapperService().documentMapperParser().parse("person",
+            new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().size(), equalTo(3));
-        assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
-        assertThat(field.fieldType().omitNorms(), equalTo(false));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        String[] expected = new String[] {"banon", "last location", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(false));
+        }
+        AllFieldMapper mapper = docMapper.allFieldMapper();
+        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")),
+            Matchers.instanceOf(AllTermQuery.class));
     }
 
     public void testAllMappersTermQuery() throws Exception {
-        String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        String mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json");
+        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person",
+            new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().size(), equalTo(3));
-        assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        String[] expected = new String[] {"banon", "last location", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(false));
+        }
         AllFieldMapper mapper = docMapper.allFieldMapper();
-        assertThat(field.fieldType().omitNorms(), equalTo(false));
-        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
-
+        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")),
+            Matchers.instanceOf(AllTermQuery.class));
     }
 
     // #6187: make sure we see AllTermQuery even when offsets are indexed in the _all field:
     public void testAllMappersWithOffsetsTermQuery() throws Exception {
         String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_offsets_on_all.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        DocumentMapper docMapper =
+            createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        // _all field indexes positions, and mapping has boosts, so we should see AllTokenStream:
-        assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.instanceOf(AllTokenStream.class));
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().size(), equalTo(3));
-        assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        String[] expected = new String[] {"banon", "last location", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(false));
+        }
         AllFieldMapper mapper = docMapper.allFieldMapper();
-        assertThat(field.fieldType().omitNorms(), equalTo(false));
-        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+        assertThat(mapper.fieldType().queryStringTermQuery(new Term("_all", "foobar")),
+            Matchers.instanceOf(AllTermQuery.class));
     }
 
     // #6187: if _all doesn't index positions then we never use AllTokenStream, even if some fields have boost
     public void testBoostWithOmitPositions() throws Exception {
-        String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        String mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json");
+        DocumentMapper docMapper =
+            createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        // _all field omits positions, so we should not get AllTokenStream even though fields are boosted
-        assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        for (IndexableField field : fields) {
+            // _all field omits positions, so we should not get AllTokenStream even though fields are boosted
+            assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null),
+                Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+        }
     }
 
     // #6187: if no fields were boosted, we shouldn't use AllTokenStream
     public void testNoBoost() throws Exception {
         String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser()
+            .parse("person", new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        // no fields have boost, so we should not see AllTokenStream:
-        assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        for (IndexableField field : fields) {
+            // no fields have boost, so we should not see AllTokenStream:
+            assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null),
+                Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+        }
     }
 
     public void testSimpleAllMappersWithReparse() throws Exception {
@@ -175,32 +194,39 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
         DocumentMapper builtDocMapper = parser.parse("person", new CompressedXContent(builtMapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = builtDocMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().toString(), allEntries.fields().size(), equalTo(3));
-        assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
-        assertThat(field.fieldType().omitNorms(), equalTo(true));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(3));
+        String[] expected = new String[] {"banon", "last location", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(true));
+            if (i == 0) {
+                // The field "name.last" is boosted so we should see AllTokenStream used:
+                assertThat(fields[i].tokenStream(docMapper.mappers().indexAnalyzer(), null),
+                    Matchers.instanceOf(AllTokenStream.class));
+            } else {
+                assertThat(fields[i].tokenStream(docMapper.mappers().indexAnalyzer(), null),
+                    Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+            }
+        }
     }
 
     public void testSimpleAllMappersWithStore() throws Exception {
         String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
+        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser()
+            .parse("person", new CompressedXContent(mapping));
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().size(), equalTo(2));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
-
-        String text = field.stringValue();
-        assertThat(text, equalTo(allEntries.buildText()));
-        assertThat(field.fieldType().omitNorms(), equalTo(false));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(2));
+        String[] expected = new String[] {"banon", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(false));
+        }
     }
 
+
     public void testSimpleAllMappersWithReparseWithStore() throws Exception {
         DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
         String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
@@ -211,15 +237,13 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
         byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
         Document doc = builtDocMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc();
 
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields().size(), equalTo(2));
-        assertThat(allEntries.fields().contains("name.last"), equalTo(true));
-        assertThat(allEntries.fields().contains("simple1"), equalTo(true));
-
-        String text = field.stringValue();
-        assertThat(text, equalTo(allEntries.buildText()));
-        assertThat(field.fieldType().omitNorms(), equalTo(false));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(2));
+        String[] expected = new String[] {"banon", "1"};
+        for (int i = 0; i < fields.length; i++) {
+            assertThat(fields[i].stringValue(), equalTo(expected[i]));
+            assertThat(fields[i].fieldType().omitNorms(), equalTo(false));
+        }
     }
 
     public void testRandom() throws Exception {
@@ -283,25 +307,21 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
                 .field("foobar", "foobar")
                 .endObject().bytes().toBytes();
         Document doc = builtDocMapper.parse("test", "test", "1", new BytesArray(json)).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
+        IndexableField[] fields = doc.getFields("_all");
         if (enabled) {
-            assertThat(field.fieldType().omitNorms(), equalTo(!norms));
-            assertThat(field.fieldType().stored(), equalTo(stored));
-            assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));
-            assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));
-            assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));
-            assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));
-            AllEntries allEntries = field.getAllEntries();
-            assertThat(allEntries.fields().size(), equalTo(2));
-            assertThat(allEntries.fields().contains("foobar"), equalTo(true));
-            assertThat(allEntries.fields().contains("foo"), equalTo(true));
-            if (!stored) {
-                assertThat(field.stringValue(), nullValue());
+            assertThat(fields.length, equalTo(2));
+            String[] expected = new String[] {"bar", "foobar"};
+            for (int i = 0; i < fields.length; i++) {
+                assertThat(fields[i].fieldType().omitNorms(), equalTo(!norms));
+                assertThat(fields[i].fieldType().stored(), equalTo(stored));
+                assertThat(fields[i].fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));
+                assertThat(fields[i].fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));
+                assertThat(fields[i].fieldType().storeTermVectorPositions(), equalTo(tv_positions));
+                assertThat(fields[i].fieldType().storeTermVectors(), equalTo(tv_stored));
+                assertThat(fields[i].stringValue(), equalTo(expected[i]));
             }
-            String text = stored ? field.stringValue() : "bar foobar";
-            assertThat(text.trim(), equalTo(allEntries.buildText().trim()));
         } else {
-            assertThat(field, nullValue());
+            assertThat(fields.length, equalTo(0));
         }
         if (similarity == null) {
             assertThat(builtDocMapper.allFieldMapper().fieldType().similarity(), nullValue());
@@ -319,8 +339,11 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
     }
 
     public void testMultiField_includeInAllSetToFalse() throws IOException {
-        String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/multifield-mapping_include_in_all_set_to_false.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("test", new CompressedXContent(mapping));
+        String mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/" +
+                "multifield-mapping_include_in_all_set_to_false.json");
+        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser()
+            .parse("test", new CompressedXContent(mapping));
 
         XContentBuilder builder = XContentFactory.jsonBuilder();
         builder.startObject()
@@ -331,14 +354,15 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
                 .endObject();
 
         Document doc = docMapper.parse("test", "test", "1", builder.bytes()).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields(), empty());
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(0));
     }
 
     public void testMultiField_defaults() throws IOException {
-        String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/multifield-mapping_default.json");
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("test", new CompressedXContent(mapping));
+        String mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/multifield-mapping_default.json");
+        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser()
+            .parse("test", new CompressedXContent(mapping));
 
         XContentBuilder builder = XContentFactory.jsonBuilder();
         builder.startObject()
@@ -349,10 +373,9 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
                 .endObject();
 
         Document doc = docMapper.parse("test", "test", "1", builder.bytes()).rootDoc();
-        AllField field = (AllField) doc.getField("_all");
-        AllEntries allEntries = field.getAllEntries();
-        assertThat(allEntries.fields(), hasSize(1));
-        assertThat(allEntries.fields(), hasItem("foo.bar"));
+        IndexableField[] fields = doc.getFields("_all");
+        assertThat(fields.length, equalTo(1));
+        assertThat(fields[0].stringValue(), equalTo("Elasticsearch rules!"));
     }
 
     public void testMisplacedTypeInRoot() throws IOException {
@@ -380,7 +403,8 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
 
     // issue https://github.com/elastic/elasticsearch/issues/5864
     public void testMisplacedMappingAsRoot() throws IOException {
-        String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json");
+        String mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json");
         try {
             createIndex("test").mapperService().documentMapperParser().parse("test", new CompressedXContent(mapping));
             fail("Expected MapperParsingException");
@@ -394,9 +418,11 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
     // test that RootObjectMapping still works
     public void testRootObjectMapperPropertiesDoNotCauseException() throws IOException {
         DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
-        String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json");
+        String mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json");
         parser.parse("test", new CompressedXContent(mapping));
-        mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json");
+        mapping =
+            copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json");
         parser.parse("test", new CompressedXContent(mapping));
         mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_date_detection_mapping.json");
         parser.parse("test", new CompressedXContent(mapping));
@@ -430,7 +456,8 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
             createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
             fail();
         } catch (MapperParsingException e) {
-            assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values"));
+            assertThat(e.getDetailedMessage(),
+                containsString("[_all] is always tokenized and cannot have doc values"));
         }
 
     }
@@ -438,10 +465,13 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
     public void testAutoBoost() throws Exception {
         for (boolean boost : new boolean[] {false, true}) {
             String index = "test_" + boost;
-            IndexService indexService = createIndex(index, client().admin().indices().prepareCreate(index).addMapping("type", "foo", "type=text" + (boost ? ",boost=2" : "")));
+            IndexService indexService =
+                createIndex(index, client().admin().indices().prepareCreate(index)
+                    .addMapping("type", "foo", "type=text" + (boost ? ",boost=2" : "")));
             client().prepareIndex(index, "type").setSource("foo", "bar").get();
             client().admin().indices().prepareRefresh(index).get();
-            Query query = indexService.mapperService().documentMapper("type").allFieldMapper().fieldType().termQuery("bar", null);
+            Query query = indexService.mapperService()
+                .documentMapper("type").allFieldMapper().fieldType().termQuery("bar", null);
             try (Searcher searcher = indexService.getShardOrNull(0).acquireSearcher("tests")) {
                 query = searcher.searcher().rewrite(query);
                 final Class<?> expected = boost ? AllTermQuery.class : TermQuery.class;
@@ -451,15 +481,18 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
     }
 
     public void testIncludeInObjectNotAllowed() throws Exception {
-        String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
-        DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
+        String mapping = XContentFactory.jsonBuilder().startObject()
+            .startObject("type").endObject().endObject().string();
+        DocumentMapper docMapper = createIndex("test").mapperService()
+            .documentMapperParser().parse("type", new CompressedXContent(mapping));
 
         try {
             docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
                 .startObject().field("_all", "foo").endObject().bytes());
             fail("Expected failure to parse metadata field");
         } catch (MapperParsingException e) {
-            assertTrue(e.getMessage(), e.getMessage().contains("Field [_all] is a metadata field and cannot be added inside a document"));
+            assertTrue(e.getMessage(),
+                e.getMessage().contains("Field [_all] is a metadata field and cannot be added inside a document"));
         }
     }
 }

+ 1 - 1
core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java

@@ -975,7 +975,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
                 .get();
         for (int i = 0; i < COUNT; i++) {
             SearchHit hit = searchResponse.getHits().getHits()[i];
-            assertHighlight(searchResponse, i, "_all", 0, 1, equalTo("<em>test</em> " + hit.id() + " "));
+            assertHighlight(searchResponse, i, "_all", 0, 1, equalTo("<em>test</em> " + hit.id()));
         }
     }