|
@@ -35,6 +35,7 @@ import org.elasticsearch.index.mapper.LuceneDocument;
|
|
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
|
|
import org.elasticsearch.index.mapper.MapperService;
|
|
|
import org.elasticsearch.index.mapper.MappingLookup;
|
|
|
+import org.elasticsearch.index.mapper.NestedPathFieldMapper;
|
|
|
import org.elasticsearch.index.mapper.ParsedDocument;
|
|
|
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
|
|
import org.elasticsearch.index.mapper.SourceToParse;
|
|
@@ -185,6 +186,11 @@ public class TermVectorsService {
|
|
|
if (fieldType.isIndexed() == false) {
|
|
|
return false;
|
|
|
}
|
|
|
+ // and must not be the nested path field
|
|
|
+ if (fieldType.name().equals(NestedPathFieldMapper.NAME)) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -291,7 +297,13 @@ public class TermVectorsService {
|
|
|
MemoryIndex index = new MemoryIndex(withOffsets);
|
|
|
for (Map.Entry<String, Collection<Object>> entry : values.entrySet()) {
|
|
|
String field = entry.getKey();
|
|
|
- Analyzer analyzer = getAnalyzerAtField(indexShard, field, perFieldAnalyzer);
|
|
|
+ final Analyzer analyzer;
|
|
|
+ try {
|
|
|
+ analyzer = getAnalyzerAtField(indexShard, field, perFieldAnalyzer);
|
|
|
+ } catch (IllegalArgumentException e) {
|
|
|
+ // failed to get the analyzer for the given field, it could be a metadata field
|
|
|
+ continue;
|
|
|
+ }
|
|
|
if (entry.getValue() instanceof List) {
|
|
|
for (Object text : entry.getValue()) {
|
|
|
index.addField(field, text.toString(), analyzer);
|
|
@@ -310,25 +322,26 @@ public class TermVectorsService {
|
|
|
MappingLookup mappingLookup = indexShard.mapperService().mappingLookup();
|
|
|
ParsedDocument parsedDocument = documentParser.parseDocument(source, mappingLookup);
|
|
|
// select the right fields and generate term vectors
|
|
|
- LuceneDocument doc = parsedDocument.rootDoc();
|
|
|
- Set<String> seenFields = new HashSet<>();
|
|
|
- Collection<DocumentField> documentFields = new HashSet<>();
|
|
|
- for (IndexableField field : doc.getFields()) {
|
|
|
- MappedFieldType fieldType = indexShard.mapperService().fieldType(field.name());
|
|
|
- if (isValidField(fieldType) == false) {
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (request.selectedFields() != null && request.selectedFields().contains(field.name()) == false) {
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (seenFields.contains(field.name())) {
|
|
|
- continue;
|
|
|
- } else {
|
|
|
- seenFields.add(field.name());
|
|
|
+ final Set<String> seenFields = new HashSet<>();
|
|
|
+ final Collection<DocumentField> documentFields = new HashSet<>();
|
|
|
+ for (LuceneDocument doc : parsedDocument.docs()) {
|
|
|
+ for (IndexableField field : doc.getFields()) {
|
|
|
+ MappedFieldType fieldType = indexShard.mapperService().fieldType(field.name());
|
|
|
+ if (isValidField(fieldType) == false) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (request.selectedFields() != null && request.selectedFields().contains(field.name()) == false) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (seenFields.contains(field.name())) {
|
|
|
+ continue;
|
|
|
+ } else {
|
|
|
+ seenFields.add(field.name());
|
|
|
+ }
|
|
|
+ @SuppressWarnings("unchecked")
|
|
|
+ List<Object> values = (List) getValues(doc.getFields(field.name()));
|
|
|
+ documentFields.add(new DocumentField(field.name(), values));
|
|
|
}
|
|
|
- @SuppressWarnings("unchecked")
|
|
|
- List<Object> values = (List) getValues(doc.getFields(field.name()));
|
|
|
- documentFields.add(new DocumentField(field.name(), values));
|
|
|
}
|
|
|
return generateTermVectors(
|
|
|
indexShard,
|