|
@@ -39,7 +39,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|
|
import org.elasticsearch.index.Index;
|
|
|
import org.elasticsearch.index.IndexSettings;
|
|
|
import org.elasticsearch.index.analysis.AnalyzerScope;
|
|
|
-import org.elasticsearch.index.analysis.IndexAnalyzers;
|
|
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
|
|
import org.elasticsearch.index.fielddata.IndexFieldData;
|
|
|
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
|
|
@@ -54,7 +53,6 @@ import org.elasticsearch.index.mapper.MapperParsingException;
|
|
|
import org.elasticsearch.index.mapper.MapperService;
|
|
|
import org.elasticsearch.index.mapper.ParseContext;
|
|
|
import org.elasticsearch.index.mapper.ParseContext.Document;
|
|
|
-import org.elasticsearch.index.mapper.StringFieldType;
|
|
|
import org.elasticsearch.index.query.QueryShardContext;
|
|
|
import org.elasticsearch.index.similarity.SimilarityProvider;
|
|
|
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
|
@@ -68,7 +66,6 @@ import java.util.ArrayList;
|
|
|
import java.util.Iterator;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
|
-import java.util.Objects;
|
|
|
|
|
|
import static org.elasticsearch.index.mapper.TypeParsers.parseField;
|
|
|
|
|
@@ -105,8 +102,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
|
|
|
public static class Builder extends FieldMapper.Builder<Builder, WildcardFieldMapper> {
|
|
|
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
|
|
|
- private IndexAnalyzers indexAnalyzers;
|
|
|
- private String normalizerName;
|
|
|
|
|
|
|
|
|
public Builder(String name) {
|
|
@@ -173,22 +168,9 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
return (WildcardFieldType) super.fieldType();
|
|
|
}
|
|
|
|
|
|
- public Builder normalizer(IndexAnalyzers indexAnalyzers, String name) {
|
|
|
- this.indexAnalyzers = indexAnalyzers;
|
|
|
- this.normalizerName = name;
|
|
|
- return builder;
|
|
|
- }
|
|
|
-
|
|
|
@Override
|
|
|
public WildcardFieldMapper build(BuilderContext context) {
|
|
|
setupFieldType(context);
|
|
|
- if (normalizerName != null) {
|
|
|
- NamedAnalyzer normalizer = indexAnalyzers.getNormalizer(normalizerName);
|
|
|
- if (normalizer == null) {
|
|
|
- throw new MapperParsingException("normalizer [" + normalizerName + "] not found for field [" + name + "]");
|
|
|
- }
|
|
|
- fieldType().setNormalizer(normalizer);
|
|
|
- }
|
|
|
return new WildcardFieldMapper(
|
|
|
name, fieldType, defaultFieldType, ignoreAbove,
|
|
|
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
|
|
@@ -209,11 +191,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
if (propName.equals("ignore_above")) {
|
|
|
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
|
|
|
iterator.remove();
|
|
|
- } else if (propName.equals("normalizer")) {
|
|
|
- if (propNode != null) {
|
|
|
- builder.normalizer(parserContext.getIndexAnalyzers(), propNode.toString());
|
|
|
- }
|
|
|
- iterator.remove();
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -225,8 +202,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
|
|
|
public static final class WildcardFieldType extends MappedFieldType {
|
|
|
|
|
|
- private NamedAnalyzer normalizer = null;
|
|
|
-
|
|
|
public WildcardFieldType() {
|
|
|
setIndexAnalyzer(Lucene.KEYWORD_ANALYZER);
|
|
|
setSearchAnalyzer(Lucene.KEYWORD_ANALYZER);
|
|
@@ -234,7 +209,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
|
|
|
protected WildcardFieldType(WildcardFieldType ref) {
|
|
|
super(ref);
|
|
|
- this.normalizer = ref.normalizer;
|
|
|
}
|
|
|
|
|
|
public WildcardFieldType clone() {
|
|
@@ -242,39 +216,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- @Override
|
|
|
- public boolean equals(Object o) {
|
|
|
- if (super.equals(o) == false) {
|
|
|
- return false;
|
|
|
- }
|
|
|
- WildcardFieldType other = (WildcardFieldType) o;
|
|
|
- return Objects.equals(normalizer, other.normalizer);
|
|
|
- }
|
|
|
-
|
|
|
- @Override
|
|
|
- public int hashCode() {
|
|
|
- return 31 * super.hashCode() + Objects.hash(normalizer);
|
|
|
- }
|
|
|
-
|
|
|
- private NamedAnalyzer normalizer() {
|
|
|
- return normalizer;
|
|
|
- }
|
|
|
-
|
|
|
- public void setNormalizer(NamedAnalyzer normalizer) {
|
|
|
- checkIfFrozen();
|
|
|
- this.normalizer = normalizer;
|
|
|
- }
|
|
|
-
|
|
|
- @Override
|
|
|
- public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts) {
|
|
|
- super.checkCompatibility(otherFT, conflicts);
|
|
|
- WildcardFieldType other = (WildcardFieldType) otherFT;
|
|
|
- if (Objects.equals(normalizer, other.normalizer) == false) {
|
|
|
- conflicts.add("mapper [" + name() + "] has different [normalizer]");
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
// Holds parsed information about the wildcard pattern
|
|
|
static class PatternStructure {
|
|
|
boolean openStart, openEnd, hasSymbols;
|
|
@@ -388,9 +329,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
|
|
|
@Override
|
|
|
public Query wildcardQuery(String wildcardPattern, RewriteMethod method, QueryShardContext context) {
|
|
|
- if (normalizer != null) {
|
|
|
- wildcardPattern = StringFieldType.normalizeWildcardPattern(name(), wildcardPattern, normalizer);
|
|
|
- }
|
|
|
PatternStructure patternStructure = new PatternStructure(wildcardPattern);
|
|
|
ArrayList<String> tokens = new ArrayList<>();
|
|
|
|
|
@@ -538,29 +476,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
return CoreValuesSourceType.BYTES;
|
|
|
}
|
|
|
|
|
|
- String normalize(String value) throws IOException {
|
|
|
- if (normalizer != null) {
|
|
|
- try (TokenStream ts = normalizer.tokenStream(name(), value)) {
|
|
|
- final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
|
|
|
- ts.reset();
|
|
|
- if (ts.incrementToken() == false) {
|
|
|
- throw new IllegalStateException("The normalization token stream is "
|
|
|
- + "expected to produce exactly 1 token, but got 0 for analyzer "
|
|
|
- + normalizer + " and input \"" + value + "\"");
|
|
|
- }
|
|
|
- final String newValue = termAtt.toString();
|
|
|
- if (ts.incrementToken()) {
|
|
|
- throw new IllegalStateException("The normalization token stream is "
|
|
|
- + "expected to produce exactly 1 token, but got 2+ for analyzer "
|
|
|
- + normalizer + " and input \"" + value + "\"");
|
|
|
- }
|
|
|
- ts.end();
|
|
|
- return newValue;
|
|
|
- }
|
|
|
- }
|
|
|
- return value;
|
|
|
- }
|
|
|
-
|
|
|
}
|
|
|
|
|
|
static class WildcardBytesBinaryDVIndexFieldData extends BytesBinaryDVIndexFieldData{
|
|
@@ -614,11 +529,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
|
|
|
builder.field("ignore_above", ignoreAbove);
|
|
|
}
|
|
|
- if (fieldType().normalizer() != null) {
|
|
|
- builder.field("normalizer", fieldType().normalizer().name());
|
|
|
- } else if (includeDefaults) {
|
|
|
- builder.nullField("normalizer");
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
@Override
|
|
@@ -646,7 +556,6 @@ public class WildcardFieldMapper extends FieldMapper {
|
|
|
if (value == null || value.length() > ignoreAbove) {
|
|
|
return;
|
|
|
}
|
|
|
- value = fieldType().normalize(value);
|
|
|
String ngramValue = TOKEN_START_OR_END_CHAR + value + TOKEN_START_OR_END_CHAR + TOKEN_START_OR_END_CHAR;
|
|
|
Field ngramField = new Field(fieldType().name(), ngramValue, ngramFieldType);
|
|
|
fields.add(ngramField);
|