|
@@ -64,8 +64,6 @@ import java.util.Arrays;
|
|
|
import java.util.Collection;
|
|
|
import java.util.Collections;
|
|
|
import java.util.HashMap;
|
|
|
-import java.util.HashSet;
|
|
|
-import java.util.LinkedHashMap;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
|
import java.util.Set;
|
|
@@ -196,62 +194,59 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|
|
public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException {
|
|
|
assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index()
|
|
|
+ " but was " + newIndexMetaData.getIndex();
|
|
|
- // go over and add the relevant mappings (or update them)
|
|
|
- Set<String> existingMappers = new HashSet<>();
|
|
|
- if (mapper != null) {
|
|
|
- existingMappers.add(mapper.type());
|
|
|
- }
|
|
|
- final Map<String, DocumentMapper> updatedEntries;
|
|
|
+
|
|
|
+ final DocumentMapper updatedMapper;
|
|
|
try {
|
|
|
// only update entries if needed
|
|
|
- updatedEntries = internalMerge(newIndexMetaData, MergeReason.MAPPING_RECOVERY, true);
|
|
|
+ updatedMapper = internalMerge(newIndexMetaData, MergeReason.MAPPING_RECOVERY, true);
|
|
|
} catch (Exception e) {
|
|
|
logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
|
|
|
throw e;
|
|
|
}
|
|
|
|
|
|
+ if (updatedMapper == null) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
boolean requireRefresh = false;
|
|
|
|
|
|
- assertMappingVersion(currentIndexMetaData, newIndexMetaData, updatedEntries);
|
|
|
+ assertMappingVersion(currentIndexMetaData, newIndexMetaData, updatedMapper);
|
|
|
|
|
|
- for (DocumentMapper documentMapper : updatedEntries.values()) {
|
|
|
- String mappingType = documentMapper.type();
|
|
|
- MappingMetaData mappingMetaData = newIndexMetaData.mapping();
|
|
|
- assert mappingType.equals(mappingMetaData.type());
|
|
|
- CompressedXContent incomingMappingSource = mappingMetaData.source();
|
|
|
+ MappingMetaData mappingMetaData = newIndexMetaData.mapping();
|
|
|
+ CompressedXContent incomingMappingSource = mappingMetaData.source();
|
|
|
|
|
|
- String op = existingMappers.contains(mappingType) ? "updated" : "added";
|
|
|
- if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) {
|
|
|
- logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
|
|
|
- } else if (logger.isTraceEnabled()) {
|
|
|
- logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
|
|
|
- } else {
|
|
|
- logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)",
|
|
|
- index(), op, mappingType);
|
|
|
- }
|
|
|
+ String op = mapper != null ? "updated" : "added";
|
|
|
+ if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) {
|
|
|
+ logger.debug("[{}] {} mapping, source [{}]", index(), op, incomingMappingSource.string());
|
|
|
+ } else if (logger.isTraceEnabled()) {
|
|
|
+ logger.trace("[{}] {} mapping, source [{}]", index(), op, incomingMappingSource.string());
|
|
|
+ } else {
|
|
|
+ logger.debug("[{}] {} mapping (source suppressed due to length, use TRACE level if needed)",
|
|
|
+ index(), op);
|
|
|
+ }
|
|
|
|
|
|
- // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
|
|
|
- // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
|
|
|
- // merge version of it, which it does when refreshing the mappings), and warn log it.
|
|
|
- if (documentMapper().mappingSource().equals(incomingMappingSource) == false) {
|
|
|
- logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}",
|
|
|
- index(), mappingType, incomingMappingSource, documentMapper().mappingSource());
|
|
|
+ // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
|
|
|
+ // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
|
|
|
+ // merge version of it, which it does when refreshing the mappings), and warn log it.
|
|
|
+ if (documentMapper().mappingSource().equals(incomingMappingSource) == false) {
|
|
|
+ logger.debug("[{}] parsed mapping, and got different sources\noriginal:\n{}\nparsed:\n{}",
|
|
|
+ index(), incomingMappingSource, documentMapper().mappingSource());
|
|
|
|
|
|
- requireRefresh = true;
|
|
|
- }
|
|
|
+ requireRefresh = true;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
return requireRefresh;
|
|
|
}
|
|
|
|
|
|
private void assertMappingVersion(
|
|
|
final IndexMetaData currentIndexMetaData,
|
|
|
final IndexMetaData newIndexMetaData,
|
|
|
- final Map<String, DocumentMapper> updatedEntries) {
|
|
|
+ final DocumentMapper updatedMapper) {
|
|
|
if (Assertions.ENABLED && currentIndexMetaData != null) {
|
|
|
if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) {
|
|
|
// if the mapping version is unchanged, then there should not be any updates and all mappings should be the same
|
|
|
- assert updatedEntries.isEmpty() : updatedEntries;
|
|
|
+ assert updatedMapper == mapper;
|
|
|
|
|
|
MappingMetaData mapping = newIndexMetaData.mapping();
|
|
|
if (mapping != null) {
|
|
@@ -269,34 +264,21 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|
|
assert currentMappingVersion < newMappingVersion :
|
|
|
"expected current mapping version [" + currentMappingVersion + "] "
|
|
|
+ "to be less than new mapping version [" + newMappingVersion + "]";
|
|
|
- assert updatedEntries.isEmpty() == false;
|
|
|
- for (final DocumentMapper documentMapper : updatedEntries.values()) {
|
|
|
- final MappingMetaData currentMapping = currentIndexMetaData.mapping();
|
|
|
- assert currentMapping == null || documentMapper.type().equals(currentMapping.type());
|
|
|
- if (currentMapping != null) {
|
|
|
- final CompressedXContent currentSource = currentMapping.source();
|
|
|
- final CompressedXContent newSource = documentMapper.mappingSource();
|
|
|
- assert currentSource.equals(newSource) == false :
|
|
|
- "expected current mapping [" + currentSource + "] for type [" + documentMapper.type() + "] " +
|
|
|
- "to be different than new mapping";
|
|
|
- }
|
|
|
+ assert updatedMapper != null;
|
|
|
+ final MappingMetaData currentMapping = currentIndexMetaData.mapping();
|
|
|
+ if (currentMapping != null) {
|
|
|
+ final CompressedXContent currentSource = currentMapping.source();
|
|
|
+ final CompressedXContent newSource = updatedMapper.mappingSource();
|
|
|
+ assert currentSource.equals(newSource) == false :
|
|
|
+ "expected current mapping [" + currentSource + "] to be different than new mapping";
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason) {
|
|
|
- Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size());
|
|
|
- for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
|
|
|
- try {
|
|
|
- mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(Strings.toString(
|
|
|
- XContentFactory.jsonBuilder().map(entry.getValue()))));
|
|
|
- } catch (Exception e) {
|
|
|
- throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- internalMerge(mappingSourcesCompressed, reason);
|
|
|
+ public void merge(String type, Map<String, Object> mappings, MergeReason reason) throws IOException {
|
|
|
+ CompressedXContent content = new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(mappings)));
|
|
|
+ internalMerge(type, content, reason);
|
|
|
}
|
|
|
|
|
|
public void merge(IndexMetaData indexMetaData, MergeReason reason) {
|
|
@@ -304,42 +286,34 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|
|
}
|
|
|
|
|
|
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) {
|
|
|
- // TODO change internalMerge() to return a single DocumentMapper rather than a Map
|
|
|
- return internalMerge(Collections.singletonMap(type, mappingSource), reason).values().iterator().next();
|
|
|
+ return internalMerge(type, mappingSource, reason);
|
|
|
}
|
|
|
|
|
|
- private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData,
|
|
|
+ private synchronized DocumentMapper internalMerge(IndexMetaData indexMetaData,
|
|
|
MergeReason reason, boolean onlyUpdateIfNeeded) {
|
|
|
assert reason != MergeReason.MAPPING_UPDATE_PREFLIGHT;
|
|
|
- Map<String, CompressedXContent> map = new LinkedHashMap<>();
|
|
|
MappingMetaData mappingMetaData = indexMetaData.mapping();
|
|
|
if (mappingMetaData != null) {
|
|
|
if (onlyUpdateIfNeeded) {
|
|
|
DocumentMapper existingMapper = documentMapper();
|
|
|
if (existingMapper == null || mappingMetaData.source().equals(existingMapper.mappingSource()) == false) {
|
|
|
- map.put(mappingMetaData.type(), mappingMetaData.source());
|
|
|
+ return internalMerge(mappingMetaData.type(), mappingMetaData.source(), reason);
|
|
|
}
|
|
|
} else {
|
|
|
- map.put(mappingMetaData.type(), mappingMetaData.source());
|
|
|
+ return internalMerge(mappingMetaData.type(), mappingMetaData.source(), reason);
|
|
|
}
|
|
|
}
|
|
|
- return internalMerge(map, reason);
|
|
|
+ return null;
|
|
|
}
|
|
|
|
|
|
- private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason) {
|
|
|
+ private synchronized DocumentMapper internalMerge(String type, CompressedXContent mappings, MergeReason reason) {
|
|
|
|
|
|
- DocumentMapper documentMapper = null;
|
|
|
- for (Map.Entry<String, CompressedXContent> entry : mappings.entrySet()) {
|
|
|
- String type = entry.getKey();
|
|
|
- if (documentMapper != null) {
|
|
|
- throw new IllegalArgumentException("Cannot put multiple mappings: " + mappings.keySet());
|
|
|
- }
|
|
|
+ DocumentMapper documentMapper;
|
|
|
|
|
|
- try {
|
|
|
- documentMapper = documentParser.parse(type, entry.getValue());
|
|
|
- } catch (Exception e) {
|
|
|
- throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
|
|
- }
|
|
|
+ try {
|
|
|
+ documentMapper = documentParser.parse(type, mappings);
|
|
|
+ } catch (Exception e) {
|
|
|
+ throw new MapperParsingException("Failed to parse mapping: {}", e, e.getMessage());
|
|
|
}
|
|
|
|
|
|
return internalMerge(documentMapper, reason);
|
|
@@ -368,71 +342,65 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- private synchronized Map<String, DocumentMapper> internalMerge(DocumentMapper mapper,
|
|
|
- MergeReason reason) {
|
|
|
+ private synchronized DocumentMapper internalMerge(DocumentMapper mapper, MergeReason reason) {
|
|
|
boolean hasNested = this.hasNested;
|
|
|
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
|
|
|
FieldTypeLookup fieldTypes = this.fieldTypes;
|
|
|
|
|
|
- Map<String, DocumentMapper> results = new LinkedHashMap<>(2);
|
|
|
-
|
|
|
- DocumentMapper newMapper = null;
|
|
|
- if (mapper != null) {
|
|
|
- // check naming
|
|
|
- validateTypeName(mapper.type());
|
|
|
-
|
|
|
- // compute the merged DocumentMapper
|
|
|
- DocumentMapper oldMapper = this.mapper;
|
|
|
- if (oldMapper != null) {
|
|
|
- newMapper = oldMapper.merge(mapper.mapping());
|
|
|
- } else {
|
|
|
- newMapper = mapper;
|
|
|
+ assert mapper != null;
|
|
|
+ // check naming
|
|
|
+ validateTypeName(mapper.type());
|
|
|
+
|
|
|
+ // compute the merged DocumentMapper
|
|
|
+ DocumentMapper oldMapper = this.mapper;
|
|
|
+ DocumentMapper newMapper;
|
|
|
+ if (oldMapper != null) {
|
|
|
+ newMapper = oldMapper.merge(mapper.mapping());
|
|
|
+ } else {
|
|
|
+ newMapper = mapper;
|
|
|
+ }
|
|
|
+
|
|
|
+ // check basic sanity of the new mapping
|
|
|
+ List<ObjectMapper> objectMappers = new ArrayList<>();
|
|
|
+ List<FieldMapper> fieldMappers = new ArrayList<>();
|
|
|
+ List<FieldAliasMapper> fieldAliasMappers = new ArrayList<>();
|
|
|
+ MetadataFieldMapper[] metadataMappers = newMapper.mapping().metadataMappers;
|
|
|
+ Collections.addAll(fieldMappers, metadataMappers);
|
|
|
+ MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers, fieldAliasMappers);
|
|
|
+
|
|
|
+ MapperMergeValidator.validateNewMappers(objectMappers, fieldMappers, fieldAliasMappers, fieldTypes);
|
|
|
+ checkPartitionedIndexConstraints(newMapper);
|
|
|
+
|
|
|
+ // update lookup data-structures
|
|
|
+ fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, fieldAliasMappers);
|
|
|
+
|
|
|
+ for (ObjectMapper objectMapper : objectMappers) {
|
|
|
+ if (fullPathObjectMappers == this.fullPathObjectMappers) {
|
|
|
+ // first time through the loops
|
|
|
+ fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
|
|
|
}
|
|
|
+ fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
|
|
|
|
|
|
- // check basic sanity of the new mapping
|
|
|
- List<ObjectMapper> objectMappers = new ArrayList<>();
|
|
|
- List<FieldMapper> fieldMappers = new ArrayList<>();
|
|
|
- List<FieldAliasMapper> fieldAliasMappers = new ArrayList<>();
|
|
|
- MetadataFieldMapper[] metadataMappers = newMapper.mapping().metadataMappers;
|
|
|
- Collections.addAll(fieldMappers, metadataMappers);
|
|
|
- MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers, fieldAliasMappers);
|
|
|
-
|
|
|
- MapperMergeValidator.validateNewMappers(objectMappers, fieldMappers, fieldAliasMappers, fieldTypes);
|
|
|
- checkPartitionedIndexConstraints(newMapper);
|
|
|
-
|
|
|
- // update lookup data-structures
|
|
|
- fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, fieldAliasMappers);
|
|
|
-
|
|
|
- for (ObjectMapper objectMapper : objectMappers) {
|
|
|
- if (fullPathObjectMappers == this.fullPathObjectMappers) {
|
|
|
- // first time through the loops
|
|
|
- fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
|
|
|
- }
|
|
|
- fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
|
|
|
-
|
|
|
- if (objectMapper.nested().isNested()) {
|
|
|
- hasNested = true;
|
|
|
- }
|
|
|
+ if (objectMapper.nested().isNested()) {
|
|
|
+ hasNested = true;
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- MapperMergeValidator.validateFieldReferences(fieldMappers, fieldAliasMappers,
|
|
|
- fullPathObjectMappers, fieldTypes);
|
|
|
-
|
|
|
- ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get);
|
|
|
-
|
|
|
- if (reason == MergeReason.MAPPING_UPDATE || reason == MergeReason.MAPPING_UPDATE_PREFLIGHT) {
|
|
|
- // this check will only be performed on the master node when there is
|
|
|
- // a call to the update mapping API. For all other cases like
|
|
|
- // the master node restoring mappings from disk or data nodes
|
|
|
- // deserializing cluster state that was sent by the master node,
|
|
|
- // this check will be skipped.
|
|
|
- // Also, don't take metadata mappers into account for the field limit check
|
|
|
- checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length
|
|
|
- + fieldAliasMappers.size() );
|
|
|
- checkFieldNameSoftLimit(objectMappers, fieldMappers, fieldAliasMappers);
|
|
|
- }
|
|
|
+ MapperMergeValidator.validateFieldReferences(fieldMappers, fieldAliasMappers,
|
|
|
+ fullPathObjectMappers, fieldTypes);
|
|
|
+
|
|
|
+ ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get);
|
|
|
|
|
|
- results.put(newMapper.type(), newMapper);
|
|
|
+ if (reason == MergeReason.MAPPING_UPDATE || reason == MergeReason.MAPPING_UPDATE_PREFLIGHT) {
|
|
|
+ // this check will only be performed on the master node when there is
|
|
|
+ // a call to the update mapping API. For all other cases like
|
|
|
+ // the master node restoring mappings from disk or data nodes
|
|
|
+ // deserializing cluster state that was sent by the master node,
|
|
|
+ // this check will be skipped.
|
|
|
+ // Also, don't take metadata mappers into account for the field limit check
|
|
|
+ checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length
|
|
|
+ + fieldAliasMappers.size() );
|
|
|
+ checkFieldNameSoftLimit(objectMappers, fieldMappers, fieldAliasMappers);
|
|
|
}
|
|
|
|
|
|
if (reason == MergeReason.MAPPING_UPDATE || reason == MergeReason.MAPPING_UPDATE_PREFLIGHT) {
|
|
@@ -449,17 +417,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|
|
if (newMapper != null) {
|
|
|
DocumentMapper updatedDocumentMapper = newMapper.updateFieldType(fieldTypes.fullNameToFieldType);
|
|
|
if (updatedDocumentMapper != newMapper) {
|
|
|
- // update both mappers and result
|
|
|
newMapper = updatedDocumentMapper;
|
|
|
- results.put(updatedDocumentMapper.type(), updatedDocumentMapper);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- // make structures immutable
|
|
|
- results = Collections.unmodifiableMap(results);
|
|
|
-
|
|
|
if (reason == MergeReason.MAPPING_UPDATE_PREFLIGHT) {
|
|
|
- return results;
|
|
|
+ return newMapper;
|
|
|
}
|
|
|
|
|
|
// only need to immutably rewrap these if the previous reference was changed.
|
|
@@ -477,9 +440,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|
|
this.fullPathObjectMappers = fullPathObjectMappers;
|
|
|
|
|
|
assert assertMappersShareSameFieldType();
|
|
|
- assert results.values().stream().allMatch(this::assertSerialization);
|
|
|
+ assert newMapper == null || assertSerialization(newMapper);
|
|
|
|
|
|
- return results;
|
|
|
+ return newMapper;
|
|
|
}
|
|
|
|
|
|
private boolean assertMappersShareSameFieldType() {
|