|
|
@@ -22,17 +22,16 @@ package org.elasticsearch.cluster.metadata;
|
|
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|
|
import org.elasticsearch.action.ActionListener;
|
|
|
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
|
|
-import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
|
|
-import org.elasticsearch.cluster.ClusterService;
|
|
|
-import org.elasticsearch.cluster.ClusterState;
|
|
|
-import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
|
|
+import org.elasticsearch.cluster.*;
|
|
|
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
|
|
+import org.elasticsearch.cluster.node.DiscoveryNode;
|
|
|
+import org.elasticsearch.common.Nullable;
|
|
|
import org.elasticsearch.common.Priority;
|
|
|
-import org.elasticsearch.common.collect.Tuple;
|
|
|
import org.elasticsearch.common.component.AbstractComponent;
|
|
|
import org.elasticsearch.common.compress.CompressedXContent;
|
|
|
import org.elasticsearch.common.inject.Inject;
|
|
|
import org.elasticsearch.common.settings.Settings;
|
|
|
+import org.elasticsearch.common.unit.TimeValue;
|
|
|
import org.elasticsearch.index.IndexNotFoundException;
|
|
|
import org.elasticsearch.index.IndexService;
|
|
|
import org.elasticsearch.index.NodeServicesProvider;
|
|
|
@@ -44,6 +43,7 @@ import org.elasticsearch.indices.IndicesService;
|
|
|
import org.elasticsearch.indices.InvalidTypeNameException;
|
|
|
import org.elasticsearch.percolator.PercolatorService;
|
|
|
|
|
|
+import java.io.IOException;
|
|
|
import java.util.*;
|
|
|
/**
|
|
|
* Service responsible for submitting mapping changes
|
|
|
@@ -53,13 +53,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
private final ClusterService clusterService;
|
|
|
private final IndicesService indicesService;
|
|
|
|
|
|
- // the mutex protect all the refreshOrUpdate variables!
|
|
|
- private final Object refreshOrUpdateMutex = new Object();
|
|
|
- private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<>();
|
|
|
- private long refreshOrUpdateInsertOrder;
|
|
|
- private long refreshOrUpdateProcessedInsertOrder;
|
|
|
+ final ClusterStateTaskExecutor<RefreshTask> refreshExectuor = new RefreshTaskExecutor();
|
|
|
+ final ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> putMappingExecutor = new PutMappingExecutor();
|
|
|
private final NodeServicesProvider nodeServicesProvider;
|
|
|
|
|
|
+
|
|
|
@Inject
|
|
|
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
|
|
|
super(settings);
|
|
|
@@ -68,37 +66,23 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
this.nodeServicesProvider = nodeServicesProvider;
|
|
|
}
|
|
|
|
|
|
- static class MappingTask {
|
|
|
+ static class RefreshTask {
|
|
|
final String index;
|
|
|
final String indexUUID;
|
|
|
-
|
|
|
- MappingTask(String index, final String indexUUID) {
|
|
|
- this.index = index;
|
|
|
- this.indexUUID = indexUUID;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- static class RefreshTask extends MappingTask {
|
|
|
final String[] types;
|
|
|
|
|
|
RefreshTask(String index, final String indexUUID, String[] types) {
|
|
|
- super(index, indexUUID);
|
|
|
+ this.index = index;
|
|
|
+ this.indexUUID = indexUUID;
|
|
|
this.types = types;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- static class UpdateTask extends MappingTask {
|
|
|
- final String type;
|
|
|
- final CompressedXContent mappingSource;
|
|
|
- final String nodeId; // null fr unknown
|
|
|
- final ActionListener<ClusterStateUpdateResponse> listener;
|
|
|
-
|
|
|
- UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
|
|
|
- super(index, indexUUID);
|
|
|
- this.type = type;
|
|
|
- this.mappingSource = mappingSource;
|
|
|
- this.nodeId = nodeId;
|
|
|
- this.listener = listener;
|
|
|
+ class RefreshTaskExecutor implements ClusterStateTaskExecutor<RefreshTask> {
|
|
|
+ @Override
|
|
|
+ public BatchResult<RefreshTask> execute(ClusterState currentState, List<RefreshTask> tasks) throws Exception {
|
|
|
+ ClusterState newClusterState = executeRefresh(currentState, tasks);
|
|
|
+ return BatchResult.<RefreshTask>builder().successes(tasks).build(newClusterState);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -107,50 +91,25 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
* as possible so we won't create the same index all the time for example for the updates on the same mapping
|
|
|
* and generate a single cluster change event out of all of those.
|
|
|
*/
|
|
|
- Tuple<ClusterState, List<MappingTask>> executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception {
|
|
|
- final List<MappingTask> allTasks = new ArrayList<>();
|
|
|
-
|
|
|
- synchronized (refreshOrUpdateMutex) {
|
|
|
- if (refreshOrUpdateQueue.isEmpty()) {
|
|
|
- return Tuple.tuple(currentState, allTasks);
|
|
|
- }
|
|
|
-
|
|
|
- // we already processed this task in a bulk manner in a previous cluster event, simply ignore
|
|
|
- // it so we will let other tasks get in and processed ones, we will handle the queued ones
|
|
|
- // later on in a subsequent cluster state event
|
|
|
- if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
|
|
|
- return Tuple.tuple(currentState, allTasks);
|
|
|
- }
|
|
|
-
|
|
|
- allTasks.addAll(refreshOrUpdateQueue);
|
|
|
- refreshOrUpdateQueue.clear();
|
|
|
-
|
|
|
- refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
|
|
|
- }
|
|
|
-
|
|
|
+ ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
|
|
|
if (allTasks.isEmpty()) {
|
|
|
- return Tuple.tuple(currentState, allTasks);
|
|
|
+ return currentState;
|
|
|
}
|
|
|
|
|
|
// break down to tasks per index, so we can optimize the on demand index service creation
|
|
|
// to only happen for the duration of a single index processing of its respective events
|
|
|
- Map<String, List<MappingTask>> tasksPerIndex = new HashMap<>();
|
|
|
- for (MappingTask task : allTasks) {
|
|
|
+ Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();
|
|
|
+ for (RefreshTask task : allTasks) {
|
|
|
if (task.index == null) {
|
|
|
logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
|
|
|
}
|
|
|
- List<MappingTask> indexTasks = tasksPerIndex.get(task.index);
|
|
|
- if (indexTasks == null) {
|
|
|
- indexTasks = new ArrayList<>();
|
|
|
- tasksPerIndex.put(task.index, indexTasks);
|
|
|
- }
|
|
|
- indexTasks.add(task);
|
|
|
+ tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task);
|
|
|
}
|
|
|
|
|
|
boolean dirty = false;
|
|
|
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
|
|
|
|
|
- for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {
|
|
|
+ for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
|
|
|
String index = entry.getKey();
|
|
|
IndexMetaData indexMetaData = mdBuilder.get(index);
|
|
|
if (indexMetaData == null) {
|
|
|
@@ -160,9 +119,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
}
|
|
|
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
|
|
|
// the latest (based on order) update mapping one per node
|
|
|
- List<MappingTask> allIndexTasks = entry.getValue();
|
|
|
- List<MappingTask> tasks = new ArrayList<>();
|
|
|
- for (MappingTask task : allIndexTasks) {
|
|
|
+ List<RefreshTask> allIndexTasks = entry.getValue();
|
|
|
+ List<RefreshTask> tasks = new ArrayList<>();
|
|
|
+ for (RefreshTask task : allIndexTasks) {
|
|
|
if (!indexMetaData.isSameUUID(task.indexUUID)) {
|
|
|
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
|
|
continue;
|
|
|
@@ -178,12 +137,8 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
|
|
removeIndex = true;
|
|
|
Set<String> typesToIntroduce = new HashSet<>();
|
|
|
- for (MappingTask task : tasks) {
|
|
|
- if (task instanceof UpdateTask) {
|
|
|
- typesToIntroduce.add(((UpdateTask) task).type);
|
|
|
- } else if (task instanceof RefreshTask) {
|
|
|
- Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
|
|
|
- }
|
|
|
+ for (RefreshTask task : tasks) {
|
|
|
+ Collections.addAll(typesToIntroduce, task.types);
|
|
|
}
|
|
|
for (String type : typesToIntroduce) {
|
|
|
// only add the current relevant mapping (if exists)
|
|
|
@@ -209,80 +164,42 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
}
|
|
|
|
|
|
if (!dirty) {
|
|
|
- return Tuple.tuple(currentState, allTasks);
|
|
|
+ return currentState;
|
|
|
}
|
|
|
- return Tuple.tuple(ClusterState.builder(currentState).metaData(mdBuilder).build(), allTasks);
|
|
|
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
|
|
}
|
|
|
|
|
|
- private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
|
|
+ private boolean processIndexMappingTasks(List<RefreshTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
|
|
boolean dirty = false;
|
|
|
String index = indexService.index().name();
|
|
|
// keep track of what we already refreshed, no need to refresh it again...
|
|
|
Set<String> processedRefreshes = new HashSet<>();
|
|
|
- for (MappingTask task : tasks) {
|
|
|
- if (task instanceof RefreshTask) {
|
|
|
- RefreshTask refreshTask = (RefreshTask) task;
|
|
|
- try {
|
|
|
- List<String> updatedTypes = new ArrayList<>();
|
|
|
- for (String type : refreshTask.types) {
|
|
|
- if (processedRefreshes.contains(type)) {
|
|
|
- continue;
|
|
|
- }
|
|
|
- DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
|
|
- if (mapper == null) {
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
|
|
- updatedTypes.add(type);
|
|
|
- builder.putMapping(new MappingMetaData(mapper));
|
|
|
- }
|
|
|
- processedRefreshes.add(type);
|
|
|
- }
|
|
|
-
|
|
|
- if (updatedTypes.isEmpty()) {
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
|
|
- dirty = true;
|
|
|
- } catch (Throwable t) {
|
|
|
- logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
|
|
- }
|
|
|
- } else if (task instanceof UpdateTask) {
|
|
|
- UpdateTask updateTask = (UpdateTask) task;
|
|
|
- try {
|
|
|
- String type = updateTask.type;
|
|
|
- CompressedXContent mappingSource = updateTask.mappingSource;
|
|
|
-
|
|
|
- MappingMetaData mappingMetaData = builder.mapping(type);
|
|
|
- if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
|
|
|
- logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
|
|
|
+ for (RefreshTask refreshTask : tasks) {
|
|
|
+ try {
|
|
|
+ List<String> updatedTypes = new ArrayList<>();
|
|
|
+ for (String type : refreshTask.types) {
|
|
|
+ if (processedRefreshes.contains(type)) {
|
|
|
continue;
|
|
|
}
|
|
|
-
|
|
|
- DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true);
|
|
|
- processedRefreshes.add(type);
|
|
|
-
|
|
|
- // if we end up with the same mapping as the original once, ignore
|
|
|
- if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
|
|
|
- logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
|
|
|
+ DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
|
|
+ if (mapper == null) {
|
|
|
continue;
|
|
|
}
|
|
|
-
|
|
|
- // build the updated mapping source
|
|
|
- if (logger.isDebugEnabled()) {
|
|
|
- logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
|
|
|
- } else if (logger.isInfoEnabled()) {
|
|
|
- logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
|
|
|
+ if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
|
|
+ updatedTypes.add(type);
|
|
|
+ builder.putMapping(new MappingMetaData(mapper));
|
|
|
}
|
|
|
+ processedRefreshes.add(type);
|
|
|
+ }
|
|
|
|
|
|
- builder.putMapping(new MappingMetaData(updatedMapper));
|
|
|
- dirty = true;
|
|
|
- } catch (Throwable t) {
|
|
|
- logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
|
|
|
+ if (updatedTypes.isEmpty()) {
|
|
|
+ continue;
|
|
|
}
|
|
|
- } else {
|
|
|
- logger.warn("illegal state, got wrong mapping task type [{}]", task);
|
|
|
+
|
|
|
+ logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
|
|
+ dirty = true;
|
|
|
+ } catch (Throwable t) {
|
|
|
+ logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
|
|
}
|
|
|
}
|
|
|
return dirty;
|
|
|
@@ -292,197 +209,204 @@ public class MetaDataMappingService extends AbstractComponent {
|
|
|
* Refreshes mappings if they are not the same between original and parsed version
|
|
|
*/
|
|
|
public void refreshMapping(final String index, final String indexUUID, final String... types) {
|
|
|
- final long insertOrder;
|
|
|
- synchronized (refreshOrUpdateMutex) {
|
|
|
- insertOrder = ++refreshOrUpdateInsertOrder;
|
|
|
- refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
|
|
|
- }
|
|
|
- clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
|
|
- private volatile List<MappingTask> allTasks;
|
|
|
-
|
|
|
- @Override
|
|
|
- public void onFailure(String source, Throwable t) {
|
|
|
- logger.warn("failure during [{}]", t, source);
|
|
|
- }
|
|
|
-
|
|
|
- @Override
|
|
|
- public ClusterState execute(ClusterState currentState) throws Exception {
|
|
|
- Tuple<ClusterState, List<MappingTask>> tuple = executeRefreshOrUpdate(currentState, insertOrder);
|
|
|
- this.allTasks = tuple.v2();
|
|
|
- return tuple.v1();
|
|
|
- }
|
|
|
+ final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types);
|
|
|
+ clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]",
|
|
|
+ refreshTask,
|
|
|
+ ClusterStateTaskConfig.build(Priority.HIGH),
|
|
|
+ refreshExectuor,
|
|
|
+ (source, t) -> logger.warn("failure during [{}]", t, source)
|
|
|
+ );
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
|
|
- if (allTasks == null) {
|
|
|
- return;
|
|
|
+ class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
|
|
|
+ @Override
|
|
|
+ public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
|
|
|
+ List<String> indicesToClose = new ArrayList<>();
|
|
|
+ BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
|
|
|
+ Map<PutMappingClusterStateUpdateRequest, TaskResult> executionResults = new HashMap<>();
|
|
|
+ try {
|
|
|
+ // precreate incoming indices;
|
|
|
+ for (PutMappingClusterStateUpdateRequest request : tasks) {
|
|
|
+ // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
|
|
|
+ for (String index : request.indices()) {
|
|
|
+ if (currentState.metaData().hasIndex(index)) {
|
|
|
+ // if we don't have the index, we will throw exceptions later;
|
|
|
+ if (indicesService.hasIndex(index) == false) {
|
|
|
+ final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
|
|
+ IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
|
|
+ indicesToClose.add(indexMetaData.getIndex());
|
|
|
+ // make sure to add custom default mapping if exists
|
|
|
+ if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
|
|
+ indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
|
|
+ }
|
|
|
+ // only add the current relevant mapping (if exists)
|
|
|
+ if (indexMetaData.getMappings().containsKey(request.type())) {
|
|
|
+ indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
- for (Object task : allTasks) {
|
|
|
- if (task instanceof UpdateTask) {
|
|
|
- UpdateTask uTask = (UpdateTask) task;
|
|
|
- ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true);
|
|
|
- uTask.listener.onResponse(response);
|
|
|
+ for (PutMappingClusterStateUpdateRequest request : tasks) {
|
|
|
+ try {
|
|
|
+ currentState = applyRequest(currentState, request);
|
|
|
+ builder.success(request);
|
|
|
+ } catch (Throwable t) {
|
|
|
+ builder.failure(request, t);
|
|
|
}
|
|
|
}
|
|
|
- }
|
|
|
- });
|
|
|
- }
|
|
|
-
|
|
|
- public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
|
|
|
|
|
- clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
|
|
-
|
|
|
- @Override
|
|
|
- protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
|
|
- return new ClusterStateUpdateResponse(acknowledged);
|
|
|
+ return builder.build(currentState);
|
|
|
+ } finally {
|
|
|
+ for (String index : indicesToClose) {
|
|
|
+ indicesService.removeIndex(index, "created for mapping processing");
|
|
|
+ }
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- @Override
|
|
|
- public ClusterState execute(final ClusterState currentState) throws Exception {
|
|
|
- List<String> indicesToClose = new ArrayList<>();
|
|
|
- try {
|
|
|
- for (String index : request.indices()) {
|
|
|
- if (!currentState.metaData().hasIndex(index)) {
|
|
|
- throw new IndexNotFoundException(index);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- // pre create indices here and add mappings to them so we can merge the mappings here if needed
|
|
|
- for (String index : request.indices()) {
|
|
|
- if (indicesService.hasIndex(index)) {
|
|
|
- continue;
|
|
|
+ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
|
|
+ Map<String, DocumentMapper> newMappers = new HashMap<>();
|
|
|
+ Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
|
|
+ for (String index : request.indices()) {
|
|
|
+ IndexService indexService = indicesService.indexServiceSafe(index);
|
|
|
+ // try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
|
|
+ DocumentMapper newMapper;
|
|
|
+ DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
|
|
+ if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
|
|
+ // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
|
|
+ newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
|
|
+ } else {
|
|
|
+ newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
|
|
+ if (existingMapper != null) {
|
|
|
+ // first, simulate
|
|
|
+ MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
|
|
+ // if we have conflicts, throw an exception
|
|
|
+ if (mergeResult.hasConflicts()) {
|
|
|
+ throw new MergeMappingException(mergeResult.buildConflicts());
|
|
|
}
|
|
|
- final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
|
|
- IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
|
|
- indicesToClose.add(indexMetaData.getIndex());
|
|
|
- // make sure to add custom default mapping if exists
|
|
|
- if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
|
|
- indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
|
|
- }
|
|
|
- // only add the current relevant mapping (if exists)
|
|
|
- if (indexMetaData.getMappings().containsKey(request.type())) {
|
|
|
- indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- Map<String, DocumentMapper> newMappers = new HashMap<>();
|
|
|
- Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
|
|
- for (String index : request.indices()) {
|
|
|
- IndexService indexService = indicesService.indexServiceSafe(index);
|
|
|
- // try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
|
|
- DocumentMapper newMapper;
|
|
|
- DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
|
|
- if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
|
|
- // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
|
|
- newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
|
|
- } else {
|
|
|
- newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
|
|
- if (existingMapper != null) {
|
|
|
- // first, simulate
|
|
|
- MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
|
|
- // if we have conflicts, throw an exception
|
|
|
- if (mergeResult.hasConflicts()) {
|
|
|
- throw new MergeMappingException(mergeResult.buildConflicts());
|
|
|
- }
|
|
|
- } else {
|
|
|
- // TODO: can we find a better place for this validation?
|
|
|
- // The reason this validation is here is that the mapper service doesn't learn about
|
|
|
- // new types all at once , which can create a false error.
|
|
|
-
|
|
|
- // For example in MapperService we can't distinguish between a create index api call
|
|
|
- // and a put mapping api call, so we don't which type did exist before.
|
|
|
- // Also the order of the mappings may be backwards.
|
|
|
- if (newMapper.parentFieldMapper().active()) {
|
|
|
- IndexMetaData indexMetaData = currentState.metaData().index(index);
|
|
|
- for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
|
|
- if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
|
|
- throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
|
|
- }
|
|
|
- }
|
|
|
+ } else {
|
|
|
+ // TODO: can we find a better place for this validation?
|
|
|
+ // The reason this validation is here is that the mapper service doesn't learn about
|
|
|
+ // new types all at once , which can create a false error.
|
|
|
+
|
|
|
+ // For example in MapperService we can't distinguish between a create index api call
|
|
|
+ // and a put mapping api call, so we don't which type did exist before.
|
|
|
+ // Also the order of the mappings may be backwards.
|
|
|
+ if (newMapper.parentFieldMapper().active()) {
|
|
|
+ IndexMetaData indexMetaData = currentState.metaData().index(index);
|
|
|
+ for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
|
|
+ if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
|
|
+ throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
+ }
|
|
|
+ }
|
|
|
+ newMappers.put(index, newMapper);
|
|
|
+ if (existingMapper != null) {
|
|
|
+ existingMappers.put(index, existingMapper);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
+ String mappingType = request.type();
|
|
|
+ if (mappingType == null) {
|
|
|
+ mappingType = newMappers.values().iterator().next().type();
|
|
|
+ } else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
|
|
+ throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
|
|
+ }
|
|
|
+ if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
|
|
+ throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
|
|
+ }
|
|
|
+ final Map<String, MappingMetaData> mappings = new HashMap<>();
|
|
|
+ for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
|
|
+ String index = entry.getKey();
|
|
|
+ // do the actual merge here on the master, and update the mapping source
|
|
|
+ DocumentMapper newMapper = entry.getValue();
|
|
|
+ IndexService indexService = indicesService.indexService(index);
|
|
|
+ if (indexService == null) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- newMappers.put(index, newMapper);
|
|
|
- if (existingMapper != null) {
|
|
|
- existingMappers.put(index, existingMapper);
|
|
|
+ CompressedXContent existingSource = null;
|
|
|
+ if (existingMappers.containsKey(entry.getKey())) {
|
|
|
+ existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
|
|
+ }
|
|
|
+ DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
|
|
+ CompressedXContent updatedSource = mergedMapper.mappingSource();
|
|
|
+
|
|
|
+ if (existingSource != null) {
|
|
|
+ if (existingSource.equals(updatedSource)) {
|
|
|
+ // same source, no changes, ignore it
|
|
|
+ } else {
|
|
|
+ // use the merged mapping source
|
|
|
+ mappings.put(index, new MappingMetaData(mergedMapper));
|
|
|
+ if (logger.isDebugEnabled()) {
|
|
|
+ logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
|
|
+ } else if (logger.isInfoEnabled()) {
|
|
|
+ logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
|
|
}
|
|
|
- }
|
|
|
|
|
|
- String mappingType = request.type();
|
|
|
- if (mappingType == null) {
|
|
|
- mappingType = newMappers.values().iterator().next().type();
|
|
|
- } else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
|
|
- throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
|
|
}
|
|
|
- if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
|
|
- throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
|
|
+ } else {
|
|
|
+ mappings.put(index, new MappingMetaData(mergedMapper));
|
|
|
+ if (logger.isDebugEnabled()) {
|
|
|
+ logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
|
|
+ } else if (logger.isInfoEnabled()) {
|
|
|
+ logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
|
|
}
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (mappings.isEmpty()) {
|
|
|
+ // no changes, return
|
|
|
+ return currentState;
|
|
|
+ }
|
|
|
+ MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
|
|
+ for (String indexName : request.indices()) {
|
|
|
+ IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
|
|
+ if (indexMetaData == null) {
|
|
|
+ throw new IndexNotFoundException(indexName);
|
|
|
+ }
|
|
|
+ MappingMetaData mappingMd = mappings.get(indexName);
|
|
|
+ if (mappingMd != null) {
|
|
|
+ builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- final Map<String, MappingMetaData> mappings = new HashMap<>();
|
|
|
- for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
|
|
- String index = entry.getKey();
|
|
|
- // do the actual merge here on the master, and update the mapping source
|
|
|
- DocumentMapper newMapper = entry.getValue();
|
|
|
- IndexService indexService = indicesService.indexService(index);
|
|
|
- if (indexService == null) {
|
|
|
- continue;
|
|
|
- }
|
|
|
+ return ClusterState.builder(currentState).metaData(builder).build();
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- CompressedXContent existingSource = null;
|
|
|
- if (existingMappers.containsKey(entry.getKey())) {
|
|
|
- existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
|
|
- }
|
|
|
- DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
|
|
- CompressedXContent updatedSource = mergedMapper.mappingSource();
|
|
|
-
|
|
|
- if (existingSource != null) {
|
|
|
- if (existingSource.equals(updatedSource)) {
|
|
|
- // same source, no changes, ignore it
|
|
|
- } else {
|
|
|
- // use the merged mapping source
|
|
|
- mappings.put(index, new MappingMetaData(mergedMapper));
|
|
|
- if (logger.isDebugEnabled()) {
|
|
|
- logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
|
|
- } else if (logger.isInfoEnabled()) {
|
|
|
- logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
|
|
- }
|
|
|
- }
|
|
|
- } else {
|
|
|
- mappings.put(index, new MappingMetaData(mergedMapper));
|
|
|
- if (logger.isDebugEnabled()) {
|
|
|
- logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
|
|
- } else if (logger.isInfoEnabled()) {
|
|
|
- logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
|
|
- }
|
|
|
- }
|
|
|
+ public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
|
|
+ clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]",
|
|
|
+ request,
|
|
|
+ ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()),
|
|
|
+ putMappingExecutor,
|
|
|
+ new AckedClusterStateTaskListener() {
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void onFailure(String source, Throwable t) {
|
|
|
+ listener.onFailure(t);
|
|
|
}
|
|
|
|
|
|
- if (mappings.isEmpty()) {
|
|
|
- // no changes, return
|
|
|
- return currentState;
|
|
|
+ @Override
|
|
|
+ public boolean mustAck(DiscoveryNode discoveryNode) {
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
- MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
|
|
- for (String indexName : request.indices()) {
|
|
|
- IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
|
|
- if (indexMetaData == null) {
|
|
|
- throw new IndexNotFoundException(indexName);
|
|
|
- }
|
|
|
- MappingMetaData mappingMd = mappings.get(indexName);
|
|
|
- if (mappingMd != null) {
|
|
|
- builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
|
|
- }
|
|
|
+ @Override
|
|
|
+ public void onAllNodesAcked(@Nullable Throwable t) {
|
|
|
+ listener.onResponse(new ClusterStateUpdateResponse(true));
|
|
|
}
|
|
|
|
|
|
- return ClusterState.builder(currentState).metaData(builder).build();
|
|
|
- } finally {
|
|
|
- for (String index : indicesToClose) {
|
|
|
- indicesService.removeIndex(index, "created for mapping processing");
|
|
|
+ @Override
|
|
|
+ public void onAckTimeout() {
|
|
|
+ listener.onResponse(new ClusterStateUpdateResponse(false));
|
|
|
}
|
|
|
- }
|
|
|
- }
|
|
|
- });
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public TimeValue ackTimeout() {
|
|
|
+ return request.ackTimeout();
|
|
|
+ }
|
|
|
+ });
|
|
|
}
|
|
|
}
|