Browse Source

Remove deprecated gateways

Closes #5422
Igor Motov 11 years ago
parent
commit
3ffd0a1dfa
51 changed files with 5 additions and 3791 deletions
  1. 0 3
      docs/reference/indices.asciidoc
  2. 0 29
      docs/reference/indices/gateway-snapshot.asciidoc
  3. 2 7
      docs/reference/modules/gateway.asciidoc
  4. 0 39
      docs/reference/modules/gateway/fs.asciidoc
  5. 0 36
      docs/reference/modules/gateway/hadoop.asciidoc
  6. 0 51
      docs/reference/modules/gateway/s3.asciidoc
  7. 0 33
      rest-api-spec/api/indices.snapshot_index.json
  8. 0 4
      rest-api-spec/test/indices.snapshot_index/10_basic.yaml
  9. 0 3
      src/main/java/org/elasticsearch/action/ActionModule.java
  10. 0 23
      src/main/java/org/elasticsearch/action/admin/indices/gateway/package-info.java
  11. 0 47
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotAction.java
  12. 0 48
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java
  13. 0 42
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequestBuilder.java
  14. 0 55
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java
  15. 0 49
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java
  16. 0 49
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java
  17. 0 137
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java
  18. 0 24
      src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/package-info.java
  19. 0 32
      src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java
  20. 0 33
      src/main/java/org/elasticsearch/client/IndicesAdminClient.java
  21. 0 14
      src/main/java/org/elasticsearch/client/Requests.java
  22. 0 19
      src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java
  23. 0 315
      src/main/java/org/elasticsearch/gateway/blobstore/BlobReuseExistingGatewayAllocator.java
  24. 0 220
      src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGateway.java
  25. 0 38
      src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGatewayModule.java
  26. 0 82
      src/main/java/org/elasticsearch/gateway/fs/FsGateway.java
  27. 0 34
      src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java
  28. 0 197
      src/main/java/org/elasticsearch/gateway/shared/SharedStorageGateway.java
  29. 0 7
      src/main/java/org/elasticsearch/index/engine/Engine.java
  30. 0 21
      src/main/java/org/elasticsearch/index/engine/internal/InternalEngine.java
  31. 0 111
      src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java
  32. 2 210
      src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java
  33. 0 87
      src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexGateway.java
  34. 0 881
      src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexShardGateway.java
  35. 0 49
      src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java
  36. 0 34
      src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java
  37. 0 89
      src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java
  38. 0 30
      src/main/java/org/elasticsearch/index/gateway/local/LocalIndexShardGateway.java
  39. 0 31
      src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java
  40. 0 6
      src/main/java/org/elasticsearch/index/service/InternalIndexService.java
  41. 0 2
      src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java
  42. 0 2
      src/main/java/org/elasticsearch/index/shard/service/IndexShard.java
  43. 0 11
      src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java
  44. 0 3
      src/main/java/org/elasticsearch/rest/action/RestActionModule.java
  45. 0 84
      src/main/java/org/elasticsearch/rest/action/admin/indices/gateway/snapshot/RestGatewaySnapshotAction.java
  46. 0 390
      src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java
  47. 1 60
      src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java
  48. 0 14
      src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java
  49. 0 2
      src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
  50. 0 2
      src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
  51. 0 2
      src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java

+ 0 - 3
docs/reference/indices.asciidoc

@@ -55,7 +55,6 @@ and warmers.
 * <<indices-refresh>>
 * <<indices-flush>>
 * <<indices-optimize>>
-* <<indices-gateway-snapshot>>
 
 --
 
@@ -105,5 +104,3 @@ include::indices/refresh.asciidoc[]
 
 include::indices/optimize.asciidoc[]
 
-include::indices/gateway-snapshot.asciidoc[]
-

+ 0 - 29
docs/reference/indices/gateway-snapshot.asciidoc

@@ -1,29 +0,0 @@
-[[indices-gateway-snapshot]]
-== Gateway Snapshot
-
-The gateway snapshot API allows to explicitly perform a snapshot through
-the gateway of one or more indices (backup them). By default, each index
-gateway periodically snapshot changes, though it can be disabled and be
-controlled completely through this API.
-
-Note, this API only applies when using shared storage gateway
-implementation, and does not apply when using the (default) local
-gateway.
-
-[source,js]
---------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/twitter/_gateway/snapshot'
---------------------------------------------------
-
-[float]
-=== Multi Index
-
-The gateway snapshot API can be applied to more than one index with a
-single call, or even on `_all` the indices.
-
-[source,js]
---------------------------------------------------
-$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_gateway/snapshot'
-
-$ curl -XPOST 'http://localhost:9200/_gateway/snapshot'
---------------------------------------------------

+ 2 - 7
docs/reference/modules/gateway.asciidoc

@@ -12,9 +12,9 @@ When the cluster first starts up, the state will be read from the
 gateway and applied.
 
 The gateway set on the node level will automatically control the index
-gateway that will be used. For example, if the `fs` gateway is used,
+gateway that will be used. For example, if the `local` gateway is used,
 then automatically, each index created on the node will also use its own
-respective index level `fs` gateway. In this case, if an index should
+respective index level `local` gateway. In this case, if an index should
 not persist its state, it should be explicitly set to `none` (which is
 the only other value it can be set to).
 
@@ -68,8 +68,3 @@ will be recovered once the settings has been reached.
 
 include::gateway/local.asciidoc[]
 
-include::gateway/fs.asciidoc[]
-
-include::gateway/hadoop.asciidoc[]
-
-include::gateway/s3.asciidoc[]

+ 0 - 39
docs/reference/modules/gateway/fs.asciidoc

@@ -1,39 +0,0 @@
-[[modules-gateway-fs]]
-=== Shared FS Gateway
-
-*The shared FS gateway is deprecated and will be removed in a future
-version. Please use the
-<<modules-gateway-local,local gateway>>
-instead.*
-
-The file system based gateway stores the cluster meta data and indices
-in a *shared* file system. Note, since it is a distributed system, the
-file system should be shared between all different nodes. Here is an
-example config to enable it:
-
-[source,js]
---------------------------------------------------
-gateway:
-    type: fs
---------------------------------------------------
-
-[float]
-==== location
-
-The location where the gateway stores the cluster state can be set using
-the `gateway.fs.location` setting. By default, it will be stored under
-the `work` directory. Note, the `work` directory is considered a
-temporal directory with Elasticsearch (meaning it is safe to `rm -rf`
-it), the default location of the persistent gateway in work intentional,
-*it should be changed*.
-
-When explicitly specifying the `gateway.fs.location`, each node will
-append its `cluster.name` to the provided location. It means that the
-location provided can safely support several clusters.
-
-[float]
-==== concurrent_streams
-
-The `gateway.fs.concurrent_streams` allow to throttle the number of
-streams (per node) opened against the shared gateway performing the
-snapshot operation. It defaults to `5`.

+ 0 - 36
docs/reference/modules/gateway/hadoop.asciidoc

@@ -1,36 +0,0 @@
-[[modules-gateway-hadoop]]
-=== Hadoop Gateway
-
-*The hadoop gateway is deprecated and will be removed in a future
-version. Please use the
-<<modules-gateway-local,local gateway>>
-instead.*
-
-The hadoop (HDFS) based gateway stores the cluster meta and indices data
-in hadoop. Hadoop support is provided as a plugin and installing is
-explained https://github.com/elasticsearch/elasticsearch-hadoop[here] or
-downloading the hadoop plugin and placing it under the `plugins`
-directory. Here is an example config to enable it:
-
-[source,js]
---------------------------------------------------
-gateway:
-    type: hdfs
-    hdfs:
-        uri: hdfs://myhost:8022
---------------------------------------------------
-
-[float]
-==== Settings
-
-The hadoop gateway requires two simple settings. The `gateway.hdfs.uri`
-controls the URI to connect to the hadoop cluster, for example:
-`hdfs://myhost:8022`. The `gateway.hdfs.path` controls the path under
-which the gateway will store the data.
-
-[float]
-==== concurrent_streams
-
-The `gateway.hdfs.concurrent_streams` allow to throttle the number of
-streams (per node) opened against the shared gateway performing the
-snapshot operation. It defaults to `5`.

+ 0 - 51
docs/reference/modules/gateway/s3.asciidoc

@@ -1,51 +0,0 @@
-[[modules-gateway-s3]]
-=== S3 Gateway
-
-*The S3 gateway is deprecated and will be removed in a future version.
-Please use the <<modules-gateway-local,local
-gateway>> instead.*
-
-S3 based gateway allows to do long term reliable async persistency of
-the cluster state and indices directly to Amazon S3. Here is how it can
-be configured:
-
-[source,js]
---------------------------------------------------
-cloud:
-    aws:
-        access_key: AKVAIQBF2RECL7FJWGJQ
-        secret_key: vExyMThREXeRMm/b/LRzEB8jWwvzQeXgjqMX+6br
-
-
-gateway:
-    type: s3
-    s3:
-        bucket: bucket_name
---------------------------------------------------
-
-You’ll need to install the `cloud-aws` plugin, by running
-`bin/plugin install cloud-aws` before (re)starting elasticsearch.
-
-The following are a list of settings (prefixed with `gateway.s3`) that
-can further control the S3 gateway:
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting |Description
-|`chunk_size` |Big files are broken down into chunks (to overcome AWS 5g
-limit and use concurrent snapshotting). Default set to `100m`.
-|=======================================================================
-
-[float]
-==== concurrent_streams
-
-The `gateway.s3.concurrent_streams` allow to throttle the number of
-streams (per node) opened against the shared gateway performing the
-snapshot operation. It defaults to `5`.
-
-[float]
-==== Region
-
-The `cloud.aws.region` can be set to a region and will automatically use
-the relevant settings for both `ec2` and `s3`. The available values are:
-`us-east-1`, `us-west-1`, `ap-southeast-1`, `eu-west-1`.

+ 0 - 33
rest-api-spec/api/indices.snapshot_index.json

@@ -1,33 +0,0 @@
-{
-  "indices.snapshot_index": {
-    "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/indices-gateway-snapshot.html",
-    "methods": ["POST"],
-    "url": {
-      "path": "/_gateway/snapshot",
-      "paths": ["/_gateway/snapshot", "/{index}/_gateway/snapshot"],
-      "parts": {
-        "index": {
-          "type" : "list",
-          "description" : "A comma-separated list of index names; use `_all` or empty string for all indices"
-        }
-      },
-      "params": {
-        "ignore_unavailable": {
-            "type" : "boolean",
-            "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
-        },
-        "allow_no_indices": {
-            "type" : "boolean",
-            "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
-        },
-        "expand_wildcards": {
-            "type" : "enum",
-            "options" : ["open","closed"],
-            "default" : "open",
-            "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
-        }
-      }
-    },
-    "body": null
-  }
-}

+ 0 - 4
rest-api-spec/test/indices.snapshot_index/10_basic.yaml

@@ -1,4 +0,0 @@
----
-"snapshot_index test":
-  - do:
-      indices.snapshot_index: {}

+ 0 - 3
src/main/java/org/elasticsearch/action/ActionModule.java

@@ -82,8 +82,6 @@ import org.elasticsearch.action.admin.indices.exists.types.TransportTypesExistsA
 import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
 import org.elasticsearch.action.admin.indices.flush.FlushAction;
 import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotAction;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.TransportGatewaySnapshotAction;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingAction;
 import org.elasticsearch.action.admin.indices.mapping.delete.TransportDeleteMappingAction;
 import org.elasticsearch.action.admin.indices.mapping.get.*;
@@ -240,7 +238,6 @@ public class ActionModule extends AbstractModule {
         registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
         registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
         registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
-        registerAction(GatewaySnapshotAction.INSTANCE, TransportGatewaySnapshotAction.class);
         registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
         registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
         registerAction(OptimizeAction.INSTANCE, TransportOptimizeAction.class);

+ 0 - 23
src/main/java/org/elasticsearch/action/admin/indices/gateway/package-info.java

@@ -1,23 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Indices Gateway Administrative Actions.
- */
-package org.elasticsearch.action.admin.indices.gateway;

+ 0 - 47
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotAction.java

@@ -1,47 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.admin.indices.IndicesAction;
-import org.elasticsearch.client.IndicesAdminClient;
-
-/**
- * @deprecated Use snapshot/restore API instead
- */
-@Deprecated
-public class GatewaySnapshotAction extends IndicesAction<GatewaySnapshotRequest, GatewaySnapshotResponse, GatewaySnapshotRequestBuilder> {
-
-    public static final GatewaySnapshotAction INSTANCE = new GatewaySnapshotAction();
-    public static final String NAME = "indices/gateway/snapshot";
-
-    private GatewaySnapshotAction() {
-        super(NAME);
-    }
-
-    @Override
-    public GatewaySnapshotResponse newResponse() {
-        return new GatewaySnapshotResponse();
-    }
-
-    @Override
-    public GatewaySnapshotRequestBuilder newRequestBuilder(IndicesAdminClient client) {
-        return new GatewaySnapshotRequestBuilder(client);
-    }
-}

+ 0 - 48
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequest.java

@@ -1,48 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest;
-
-/**
- * Gateway snapshot allows to explicitly perform a snapshot through the gateway of one or more indices (backup them).
- * By default, each index gateway periodically snapshot changes, though it can be disabled and be controlled completely
- * through this API. Best created using {@link org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)}.
- *
- * @see org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)
- * @see org.elasticsearch.client.IndicesAdminClient#gatewaySnapshot(GatewaySnapshotRequest)
- * @see GatewaySnapshotResponse
- * @deprecated Use snapshot/restore API instead
- */
-@Deprecated
-public class GatewaySnapshotRequest extends BroadcastOperationRequest<GatewaySnapshotRequest> {
-
-    GatewaySnapshotRequest() {
-
-    }
-
-    /**
-     * Constructs a new gateway snapshot against one or more indices. No indices means the gateway snapshot
-     * will be executed against all indices.
-     */
-    public GatewaySnapshotRequest(String... indices) {
-        this.indices = indices;
-    }
-}

+ 0 - 42
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotRequestBuilder.java

@@ -1,42 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
-import org.elasticsearch.client.IndicesAdminClient;
-import org.elasticsearch.client.internal.InternalIndicesAdminClient;
-
-/**
- * @deprecated Use snapshot/restore API instead
- */
-@Deprecated
-
-public class GatewaySnapshotRequestBuilder extends BroadcastOperationRequestBuilder<GatewaySnapshotRequest, GatewaySnapshotResponse, GatewaySnapshotRequestBuilder> {
-
-    public GatewaySnapshotRequestBuilder(IndicesAdminClient indicesClient) {
-        super((InternalIndicesAdminClient) indicesClient, new GatewaySnapshotRequest());
-    }
-
-    @Override
-    protected void doExecute(ActionListener<GatewaySnapshotResponse> listener) {
-        ((IndicesAdminClient) client).gatewaySnapshot(request, listener);
-    }
-}

+ 0 - 55
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/GatewaySnapshotResponse.java

@@ -1,55 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.ShardOperationFailedException;
-import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Response for the gateway snapshot action.
- *
- * @deprecated Use snapshot/restore API instead
- */
-@Deprecated
-public class GatewaySnapshotResponse extends BroadcastOperationResponse {
-
-    GatewaySnapshotResponse() {
-
-    }
-
-    GatewaySnapshotResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
-        super(totalShards, successfulShards, failedShards, shardFailures);
-    }
-
-    @Override
-    public void readFrom(StreamInput in) throws IOException {
-        super.readFrom(in);
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        super.writeTo(out);
-    }
-}

+ 0 - 49
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotRequest.java

@@ -1,49 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-
-import java.io.IOException;
-
-/**
- *
- */
-class ShardGatewaySnapshotRequest extends BroadcastShardOperationRequest {
-
-    ShardGatewaySnapshotRequest() {
-    }
-
-    public ShardGatewaySnapshotRequest(String index, int shardId, GatewaySnapshotRequest request) {
-        super(index, shardId, request);
-    }
-
-    @Override
-    public void readFrom(StreamInput in) throws IOException {
-        super.readFrom(in);
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        super.writeTo(out);
-    }
-}

+ 0 - 49
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/ShardGatewaySnapshotResponse.java

@@ -1,49 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-
-import java.io.IOException;
-
-/**
- *
- */
-class ShardGatewaySnapshotResponse extends BroadcastShardOperationResponse {
-
-    ShardGatewaySnapshotResponse() {
-    }
-
-    public ShardGatewaySnapshotResponse(String index, int shardId) {
-        super(index, shardId);
-    }
-
-    @Override
-    public void readFrom(StreamInput in) throws IOException {
-        super.readFrom(in);
-    }
-
-    @Override
-    public void writeTo(StreamOutput out) throws IOException {
-        super.writeTo(out);
-    }
-}

+ 0 - 137
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/TransportGatewaySnapshotAction.java

@@ -1,137 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.admin.indices.gateway.snapshot;
-
-import com.google.common.collect.Lists;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.action.ShardOperationFailedException;
-import org.elasticsearch.action.support.DefaultShardOperationFailedException;
-import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
-import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction;
-import org.elasticsearch.cluster.ClusterService;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.block.ClusterBlockException;
-import org.elasticsearch.cluster.block.ClusterBlockLevel;
-import org.elasticsearch.cluster.routing.GroupShardsIterator;
-import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.gateway.IndexShardGatewayService;
-import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportService;
-
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReferenceArray;
-
-/**
- * @deprecated use Snapshot/Restore API instead
- */
-@Deprecated
-public class TransportGatewaySnapshotAction extends TransportBroadcastOperationAction<GatewaySnapshotRequest, GatewaySnapshotResponse, ShardGatewaySnapshotRequest, ShardGatewaySnapshotResponse> {
-
-    private final IndicesService indicesService;
-
-    @Inject
-    public TransportGatewaySnapshotAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
-                                          TransportService transportService, IndicesService indicesService) {
-        super(settings, threadPool, clusterService, transportService);
-        this.indicesService = indicesService;
-    }
-
-    @Override
-    protected String executor() {
-        return ThreadPool.Names.SNAPSHOT;
-    }
-
-    @Override
-    protected String transportAction() {
-        return GatewaySnapshotAction.NAME;
-    }
-
-    @Override
-    protected GatewaySnapshotRequest newRequest() {
-        return new GatewaySnapshotRequest();
-    }
-
-    @Override
-    protected GatewaySnapshotResponse newResponse(GatewaySnapshotRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
-        int successfulShards = 0;
-        int failedShards = 0;
-        List<ShardOperationFailedException> shardFailures = null;
-        for (int i = 0; i < shardsResponses.length(); i++) {
-            Object shardResponse = shardsResponses.get(i);
-            if (shardResponse == null) {
-                // non active shard, ignore
-            } else if (shardResponse instanceof BroadcastShardOperationFailedException) {
-                failedShards++;
-                if (shardFailures == null) {
-                    shardFailures = Lists.newArrayList();
-                }
-                shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
-            } else {
-                successfulShards++;
-            }
-        }
-        return new GatewaySnapshotResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
-    }
-
-    @Override
-    protected ShardGatewaySnapshotRequest newShardRequest() {
-        return new ShardGatewaySnapshotRequest();
-    }
-
-    @Override
-    protected ShardGatewaySnapshotRequest newShardRequest(ShardRouting shard, GatewaySnapshotRequest request) {
-        return new ShardGatewaySnapshotRequest(shard.index(), shard.id(), request);
-    }
-
-    @Override
-    protected ShardGatewaySnapshotResponse newShardResponse() {
-        return new ShardGatewaySnapshotResponse();
-    }
-
-    @Override
-    protected ShardGatewaySnapshotResponse shardOperation(ShardGatewaySnapshotRequest request) throws ElasticsearchException {
-        IndexShardGatewayService shardGatewayService = indicesService.indexServiceSafe(request.index())
-                .shardInjectorSafe(request.shardId()).getInstance(IndexShardGatewayService.class);
-        shardGatewayService.snapshot("api");
-        return new ShardGatewaySnapshotResponse(request.index(), request.shardId());
-    }
-
-    /**
-     * The snapshot request works against all primary shards.
-     */
-    @Override
-    protected GroupShardsIterator shards(ClusterState clusterState, GatewaySnapshotRequest request, String[] concreteIndices) {
-        return clusterState.routingTable().activePrimaryShardsGrouped(concreteIndices, true);
-    }
-
-    @Override
-    protected ClusterBlockException checkGlobalBlock(ClusterState state, GatewaySnapshotRequest request) {
-        return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
-    }
-
-    @Override
-    protected ClusterBlockException checkRequestBlock(ClusterState state, GatewaySnapshotRequest request, String[] concreteIndices) {
-        return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
-    }
-
-}

+ 0 - 24
src/main/java/org/elasticsearch/action/admin/indices/gateway/snapshot/package-info.java

@@ -1,24 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * GAteway Snapshot Action.
- * @deprecated Use snapshot/restore API instead
- */
-package org.elasticsearch.action.admin.indices.gateway.snapshot;

+ 0 - 32
src/main/java/org/elasticsearch/action/admin/indices/status/TransportIndicesStatusAction.java

@@ -38,7 +38,6 @@ import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.engine.Engine;
 import org.elasticsearch.index.gateway.IndexShardGatewayService;
 import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.index.gateway.SnapshotStatus;
 import org.elasticsearch.index.service.InternalIndexService;
 import org.elasticsearch.index.shard.IndexShardState;
 import org.elasticsearch.index.shard.service.InternalIndexShard;
@@ -231,37 +230,6 @@ public class TransportIndicesStatusAction extends TransportBroadcastOperationAct
                         gatewayRecoveryState.getIndex().totalByteCount(), gatewayRecoveryState.getIndex().reusedByteCount(), gatewayRecoveryState.getIndex().recoveredByteCount(), gatewayRecoveryState.getTranslog().currentTranslogOperations());
             }
         }
-
-        if (request.snapshot) {
-            IndexShardGatewayService gatewayService = indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
-            SnapshotStatus snapshotStatus = gatewayService.snapshotStatus();
-            if (snapshotStatus != null) {
-                GatewaySnapshotStatus.Stage stage;
-                switch (snapshotStatus.stage()) {
-                    case DONE:
-                        stage = GatewaySnapshotStatus.Stage.DONE;
-                        break;
-                    case FAILURE:
-                        stage = GatewaySnapshotStatus.Stage.FAILURE;
-                        break;
-                    case TRANSLOG:
-                        stage = GatewaySnapshotStatus.Stage.TRANSLOG;
-                        break;
-                    case FINALIZE:
-                        stage = GatewaySnapshotStatus.Stage.FINALIZE;
-                        break;
-                    case INDEX:
-                        stage = GatewaySnapshotStatus.Stage.INDEX;
-                        break;
-                    default:
-                        stage = GatewaySnapshotStatus.Stage.NONE;
-                        break;
-                }
-                shardStatus.gatewaySnapshotStatus = new GatewaySnapshotStatus(stage, snapshotStatus.startTime(), snapshotStatus.time(),
-                        snapshotStatus.index().totalSize(), snapshotStatus.translog().expectedNumberOfOperations());
-            }
-        }
-
         return shardStatus;
     }
 

+ 0 - 33
src/main/java/org/elasticsearch/client/IndicesAdminClient.java

@@ -53,9 +53,6 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
 import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
 import org.elasticsearch.action.admin.indices.flush.FlushResponse;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequestBuilder;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingResponse;
@@ -493,36 +490,6 @@ public interface IndicesAdminClient {
      */
     DeleteMappingRequestBuilder prepareDeleteMapping(String... indices);
 
-    /**
-     * Explicitly perform gateway snapshot for one or more indices.
-     *
-     * @param request The gateway snapshot request
-     * @return The result future
-     * @see org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)
-     * @deprecated Use snapshot/restore API instead
-     */
-    @Deprecated
-    ActionFuture<GatewaySnapshotResponse> gatewaySnapshot(GatewaySnapshotRequest request);
-
-    /**
-     * Explicitly perform gateway snapshot for one or more indices.
-     *
-     * @param request  The gateway snapshot request
-     * @param listener A listener to be notified with a result
-     * @see org.elasticsearch.client.Requests#gatewaySnapshotRequest(String...)
-     * @deprecated Use snapshot/restore API instead
-     */
-    @Deprecated
-    void gatewaySnapshot(GatewaySnapshotRequest request, ActionListener<GatewaySnapshotResponse> listener);
-
-    /**
-     * Explicitly perform gateway snapshot for one or more indices.
-     *
-     * @deprecated Use snapshot/restore API instead
-     */
-    @Deprecated
-    GatewaySnapshotRequestBuilder prepareGatewaySnapshot(String... indices);
-
     /**
      * Allows to add/remove aliases from indices.
      *

+ 0 - 14
src/main/java/org/elasticsearch/client/Requests.java

@@ -44,7 +44,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
 import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
 import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
 import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
 import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
@@ -319,19 +318,6 @@ public class Requests {
         return new OptimizeRequest(indices);
     }
 
-    /**
-     * Creates a gateway snapshot indices request.
-     *
-     * @param indices The indices the gateway snapshot will be performed on. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
-     * @return The gateway snapshot request
-     * @see org.elasticsearch.client.IndicesAdminClient#gatewaySnapshot(org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest)
-     * @deprecated Use snapshot/restore API instead
-     */
-    @Deprecated
-    public static GatewaySnapshotRequest gatewaySnapshotRequest(String... indices) {
-        return new GatewaySnapshotRequest(indices);
-    }
-
     /**
      * Creates a clean indices cache request.
      *

+ 0 - 19
src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java

@@ -64,10 +64,6 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction;
 import org.elasticsearch.action.admin.indices.flush.FlushRequest;
 import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
 import org.elasticsearch.action.admin.indices.flush.FlushResponse;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotAction;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingAction;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequest;
 import org.elasticsearch.action.admin.indices.mapping.delete.DeleteMappingRequestBuilder;
@@ -319,21 +315,6 @@ public abstract class AbstractIndicesAdminClient implements InternalIndicesAdmin
         return new FlushRequestBuilder(this).setIndices(indices);
     }
 
-    @Override
-    public ActionFuture<GatewaySnapshotResponse> gatewaySnapshot(final GatewaySnapshotRequest request) {
-        return execute(GatewaySnapshotAction.INSTANCE, request);
-    }
-
-    @Override
-    public void gatewaySnapshot(final GatewaySnapshotRequest request, final ActionListener<GatewaySnapshotResponse> listener) {
-        execute(GatewaySnapshotAction.INSTANCE, request, listener);
-    }
-
-    @Override
-    public GatewaySnapshotRequestBuilder prepareGatewaySnapshot(String... indices) {
-        return new GatewaySnapshotRequestBuilder(this).setIndices(indices);
-    }
-
     @Override
     public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
         execute(GetMappingsAction.INSTANCE, request, listener);

+ 0 - 315
src/main/java/org/elasticsearch/gateway/blobstore/BlobReuseExistingGatewayAllocator.java

@@ -1,315 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.blobstore;
-
-import com.carrotsearch.hppc.ObjectOpenHashSet;
-import com.carrotsearch.hppc.cursors.ObjectCursor;
-import com.google.common.collect.Maps;
-import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
-import org.elasticsearch.cluster.routing.MutableShardRouting;
-import org.elasticsearch.cluster.routing.RoutingNode;
-import org.elasticsearch.cluster.routing.RoutingNodes;
-import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
-import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
-import org.elasticsearch.cluster.routing.allocation.allocator.GatewayAllocator;
-import org.elasticsearch.cluster.routing.allocation.decider.Decision;
-import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.gateway.Gateway;
-import org.elasticsearch.index.gateway.CommitPoint;
-import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.index.store.StoreFileMetaData;
-import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
-import org.elasticsearch.node.Node;
-import org.elasticsearch.node.internal.InternalNode;
-import org.elasticsearch.transport.ConnectTransportException;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-
-/**
- *
- */
-public class BlobReuseExistingGatewayAllocator extends AbstractComponent implements GatewayAllocator {
-
-    private final Node node;
-
-    private final TransportNodesListShardStoreMetaData listShardStoreMetaData;
-
-    private final TimeValue listTimeout;
-
-    private final ConcurrentMap<ShardId, CommitPoint> cachedCommitPoints = ConcurrentCollections.newConcurrentMap();
-
-    private final ConcurrentMap<ShardId, Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData>> cachedStores = ConcurrentCollections.newConcurrentMap();
-
-    @Inject
-    public BlobReuseExistingGatewayAllocator(Settings settings, Node node,
-                                             TransportNodesListShardStoreMetaData transportNodesListShardStoreMetaData) {
-        super(settings);
-        this.node = node; // YACK!, we need the Gateway, but it creates crazy circular dependency
-        this.listShardStoreMetaData = transportNodesListShardStoreMetaData;
-
-        this.listTimeout = componentSettings.getAsTime("list_timeout", TimeValue.timeValueSeconds(30));
-    }
-
-    @Override
-    public void applyStartedShards(StartedRerouteAllocation allocation) {
-        for (ShardRouting shardRouting : allocation.startedShards()) {
-            cachedCommitPoints.remove(shardRouting.shardId());
-            cachedStores.remove(shardRouting.shardId());
-        }
-    }
-
-    @Override
-    public void applyFailedShards(FailedRerouteAllocation allocation) {
-        for (ShardRouting failedShard : allocation.failedShards()) {
-            cachedCommitPoints.remove(failedShard.shardId());
-            cachedStores.remove(failedShard.shardId());
-        }
-    }
-
-    @Override
-    public boolean allocateUnassigned(RoutingAllocation allocation) {
-        boolean changed = false;
-
-        DiscoveryNodes nodes = allocation.nodes();
-        RoutingNodes routingNodes = allocation.routingNodes();
-
-        if (nodes.dataNodes().isEmpty()) {
-            return changed;
-        }
-
-        if (!routingNodes.hasUnassigned()) {
-            return changed;
-        }
-
-        Iterator<MutableShardRouting> unassignedIterator = routingNodes.unassigned().iterator();
-        while (unassignedIterator.hasNext()) {
-            MutableShardRouting shard = unassignedIterator.next();
-
-            // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
-            boolean canBeAllocatedToAtLeastOneNode = false;
-            for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
-                RoutingNode node = routingNodes.node(cursor.value.id());
-                if (node == null) {
-                    continue;
-                }
-                // if its THROTTLING, we are not going to allocate it to this node, so ignore it as well
-                Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
-                if (decision.type() == Decision.Type.YES) {
-                    canBeAllocatedToAtLeastOneNode = true;
-                    break;
-                }
-            }
-
-            if (!canBeAllocatedToAtLeastOneNode) {
-                continue;
-            }
-
-            Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = buildShardStores(nodes, shard);
-
-            long lastSizeMatched = 0;
-            DiscoveryNode lastDiscoNodeMatched = null;
-            RoutingNode lastNodeMatched = null;
-
-            for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> nodeStoreEntry : shardStores.entrySet()) {
-                DiscoveryNode discoNode = nodeStoreEntry.getKey();
-                TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue();
-                logger.trace("{}: checking node [{}]", shard, discoNode);
-
-                if (storeFilesMetaData == null) {
-                    // already allocated on that node...
-                    continue;
-                }
-
-                RoutingNode node = routingNodes.node(discoNode.id());
-                if (node == null) {
-                    continue;
-                }
-
-                // check if we can allocate on that node...
-                // we only check for NO, since if this node is THROTTLING and it has enough "same data"
-                // then we will try and assign it next time
-                if (allocation.deciders().canAllocate(shard, node, allocation).type() == Decision.Type.NO) {
-                    continue;
-                }
-
-                // if it is already allocated, we can't assign to it...
-                if (storeFilesMetaData.allocated()) {
-                    continue;
-                }
-
-
-                // if its a primary, it will be recovered from the gateway, find one that is closet to it
-                if (shard.primary()) {
-                    try {
-                        CommitPoint commitPoint = cachedCommitPoints.get(shard.shardId());
-                        if (commitPoint == null) {
-                            commitPoint = ((BlobStoreGateway) ((InternalNode) this.node).injector().getInstance(Gateway.class)).findCommitPoint(shard.index(), shard.id());
-                            if (commitPoint != null) {
-                                cachedCommitPoints.put(shard.shardId(), commitPoint);
-                            } else {
-                                cachedCommitPoints.put(shard.shardId(), CommitPoint.NULL);
-                            }
-                        } else if (commitPoint == CommitPoint.NULL) {
-                            commitPoint = null;
-                        }
-
-                        if (commitPoint == null) {
-                            break;
-                        }
-
-                        long sizeMatched = 0;
-                        for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
-                            CommitPoint.FileInfo fileInfo = commitPoint.findPhysicalIndexFile(storeFileMetaData.name());
-                            if (fileInfo != null) {
-                                if (fileInfo.isSame(storeFileMetaData)) {
-                                    logger.trace("{}: [{}] reusing file since it exists on remote node and on gateway", shard, storeFileMetaData.name());
-                                    sizeMatched += storeFileMetaData.length();
-                                } else {
-                                    logger.trace("{}: [{}] ignore file since it exists on remote node and on gateway but is different", shard, storeFileMetaData.name());
-                                }
-                            } else {
-                                logger.trace("{}: [{}] exists on remote node, does not exists on gateway", shard, storeFileMetaData.name());
-                            }
-                        }
-                        if (sizeMatched > lastSizeMatched) {
-                            lastSizeMatched = sizeMatched;
-                            lastDiscoNodeMatched = discoNode;
-                            lastNodeMatched = node;
-                            logger.trace("{}: node elected for pre_allocation [{}], total_size_matched [{}]", shard, discoNode, new ByteSizeValue(sizeMatched));
-                        } else {
-                            logger.trace("{}: node ignored for pre_allocation [{}], total_size_matched [{}] smaller than last_size_matched [{}]", shard, discoNode, new ByteSizeValue(sizeMatched), new ByteSizeValue(lastSizeMatched));
-                        }
-                    } catch (Exception e) {
-                        // failed, log and try and allocate based on size
-                        logger.debug("Failed to guess allocation of primary based on gateway for " + shard, e);
-                    }
-                } else {
-                    // if its backup, see if there is a primary that *is* allocated, and try and assign a location that is closest to it
-                    // note, since we replicate operations, this might not be the same (different flush intervals)
-                    MutableShardRouting primaryShard = routingNodes.activePrimary(shard);
-                    if (primaryShard != null) {
-                        assert primaryShard.active();
-                        DiscoveryNode primaryNode = nodes.get(primaryShard.currentNodeId());
-                        if (primaryNode != null) {
-                            TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryNodeStore = shardStores.get(primaryNode);
-                            if (primaryNodeStore != null && primaryNodeStore.allocated()) {
-                                long sizeMatched = 0;
-
-                                for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
-                                    if (primaryNodeStore.fileExists(storeFileMetaData.name()) && primaryNodeStore.file(storeFileMetaData.name()).isSame(storeFileMetaData)) {
-                                        sizeMatched += storeFileMetaData.length();
-                                    }
-                                }
-                                if (sizeMatched > lastSizeMatched) {
-                                    lastSizeMatched = sizeMatched;
-                                    lastDiscoNodeMatched = discoNode;
-                                    lastNodeMatched = node;
-                                }
-                            }
-                        }
-                    }
-                }
-            }
-
-            if (lastNodeMatched != null) {
-                if (allocation.deciders().canAllocate(shard, lastNodeMatched, allocation).type() == Decision.Type.THROTTLE) {
-                    if (logger.isTraceEnabled()) {
-                        logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
-                    }
-                    // we are throttling this, but we have enough to allocate to this node, ignore it for now
-                    unassignedIterator.remove();
-                    routingNodes.ignoredUnassigned().add(shard);
-                } else {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
-                    }
-                    // we found a match
-                    changed = true;
-                    allocation.routingNodes().assign(shard, lastNodeMatched.nodeId());
-                    unassignedIterator.remove();
-                }
-            }
-        }
-        return changed;
-    }
-
-    private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) {
-        Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = cachedStores.get(shard.shardId());
-        ObjectOpenHashSet<String> nodesIds;
-        if (shardStores == null) {
-            shardStores = Maps.newHashMap();
-            cachedStores.put(shard.shardId(), shardStores);
-            nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys());
-        } else {
-            nodesIds = ObjectOpenHashSet.newInstance();
-            // clean nodes that have failed
-            for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) {
-                DiscoveryNode node = it.next();
-                if (!nodes.nodeExists(node.id())) {
-                    it.remove();
-                }
-            }
-
-            for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) {
-                DiscoveryNode node = cursor.value;
-                if (!shardStores.containsKey(node)) {
-                    nodesIds.add(node.id());
-                }
-            }
-        }
-
-        if (!nodesIds.isEmpty()) {
-            String[] nodesIdsArray = nodesIds.toArray(String.class);
-            TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData = listShardStoreMetaData.list(shard.shardId(), false, nodesIdsArray, listTimeout).actionGet();
-            if (logger.isTraceEnabled()) {
-                if (nodesStoreFilesMetaData.failures().length > 0) {
-                    StringBuilder sb = new StringBuilder(shard + ": failures when trying to list stores on nodes:");
-                    for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) {
-                        Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]);
-                        if (cause instanceof ConnectTransportException) {
-                            continue;
-                        }
-                        sb.append("\n    -> ").append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage());
-                    }
-                    logger.trace(sb.toString());
-                }
-            }
-
-            for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData : nodesStoreFilesMetaData) {
-                if (nodeStoreFilesMetaData.storeFilesMetaData() != null) {
-                    shardStores.put(nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData());
-                }
-            }
-        }
-
-        return shardStores;
-    }
-}

+ 0 - 220
src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGateway.java

@@ -1,220 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.blobstore;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.cluster.ClusterName;
-import org.elasticsearch.cluster.ClusterService;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.blobstore.*;
-import org.elasticsearch.common.compress.CompressorFactory;
-import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.common.xcontent.*;
-import org.elasticsearch.gateway.GatewayException;
-import org.elasticsearch.gateway.shared.SharedStorageGateway;
-import org.elasticsearch.index.gateway.CommitPoint;
-import org.elasticsearch.index.gateway.CommitPoints;
-import org.elasticsearch.index.gateway.blobstore.BlobStoreIndexGateway;
-import org.elasticsearch.threadpool.ThreadPool;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- *
- */
-public abstract class BlobStoreGateway extends SharedStorageGateway {
-
-    private BlobStore blobStore;
-
-    private ByteSizeValue chunkSize;
-
-    private BlobPath basePath;
-
-    private ImmutableBlobContainer metaDataBlobContainer;
-
-    private boolean compress;
-
-    private volatile int currentIndex;
-
-    protected BlobStoreGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService) {
-        super(settings, threadPool, clusterService);
-    }
-
-    protected void initialize(BlobStore blobStore, ClusterName clusterName, @Nullable ByteSizeValue defaultChunkSize) throws IOException {
-        this.blobStore = blobStore;
-        this.chunkSize = componentSettings.getAsBytesSize("chunk_size", defaultChunkSize);
-        this.basePath = BlobPath.cleanPath().add(clusterName.value());
-        this.metaDataBlobContainer = blobStore.immutableBlobContainer(basePath.add("metadata"));
-        this.currentIndex = findLatestIndex();
-        this.compress = componentSettings.getAsBoolean("compress", true);
-        logger.debug("Latest metadata found at index [" + currentIndex + "]");
-    }
-
-    @Override
-    public String toString() {
-        return type() + "://" + blobStore + "/" + basePath;
-    }
-
-    public BlobStore blobStore() {
-        return blobStore;
-    }
-
-    public BlobPath basePath() {
-        return basePath;
-    }
-
-    public ByteSizeValue chunkSize() {
-        return this.chunkSize;
-    }
-
-    @Override
-    public void reset() throws Exception {
-        blobStore.delete(BlobPath.cleanPath());
-    }
-
-    @Override
-    public MetaData read() throws GatewayException {
-        try {
-            this.currentIndex = findLatestIndex();
-        } catch (IOException e) {
-            throw new GatewayException("Failed to find latest metadata to read from", e);
-        }
-        if (currentIndex == -1)
-            return null;
-        String metaData = "metadata-" + currentIndex;
-
-        try {
-            return readMetaData(metaDataBlobContainer.readBlobFully(metaData));
-        } catch (GatewayException e) {
-            throw e;
-        } catch (Exception e) {
-            throw new GatewayException("Failed to read metadata [" + metaData + "] from gateway", e);
-        }
-    }
-
-    public CommitPoint findCommitPoint(String index, int shardId) throws IOException {
-        BlobPath path = BlobStoreIndexGateway.shardPath(basePath, index, shardId);
-        ImmutableBlobContainer container = blobStore.immutableBlobContainer(path);
-        ImmutableMap<String, BlobMetaData> blobs = container.listBlobs();
-        List<CommitPoint> commitPointsList = Lists.newArrayList();
-        for (BlobMetaData md : blobs.values()) {
-            if (md.length() == 0) { // a commit point that was not flushed yet...
-                continue;
-            }
-            if (md.name().startsWith("commit-")) {
-                try {
-                    commitPointsList.add(CommitPoints.fromXContent(container.readBlobFully(md.name())));
-                } catch (Exception e) {
-                    logger.warn("failed to read commit point at path {} with name [{}]", e, path, md.name());
-                }
-            }
-        }
-        CommitPoints commitPoints = new CommitPoints(commitPointsList);
-        if (commitPoints.commits().isEmpty()) {
-            return null;
-        }
-        return commitPoints.commits().get(0);
-    }
-
-    @Override
-    protected void delete(IndexMetaData indexMetaData) throws ElasticsearchException {
-        BlobPath indexPath = basePath().add("indices").add(indexMetaData.index());
-        blobStore.delete(indexPath);
-    }
-
-    @Override
-    public void write(MetaData metaData) throws GatewayException {
-        final String newMetaData = "metadata-" + (currentIndex + 1);
-        try {
-            BytesStreamOutput bStream = new BytesStreamOutput();
-            StreamOutput stream = bStream;
-            if (compress) {
-                stream = CompressorFactory.defaultCompressor().streamOutput(stream);
-            }
-            XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
-            builder.startObject();
-            MetaData.Builder.toXContent(metaData, builder, ToXContent.EMPTY_PARAMS);
-            builder.endObject();
-            builder.close();
-            metaDataBlobContainer.writeBlob(newMetaData, bStream.bytes().streamInput(), bStream.bytes().length());
-        } catch (IOException e) {
-            throw new GatewayException("Failed to write metadata [" + newMetaData + "]", e);
-        }
-
-        currentIndex++;
-
-        try {
-            metaDataBlobContainer.deleteBlobsByFilter(new BlobContainer.BlobNameFilter() {
-                @Override
-                public boolean accept(String blobName) {
-                    return blobName.startsWith("metadata-") && !newMetaData.equals(blobName);
-                }
-            });
-        } catch (IOException e) {
-            logger.debug("Failed to delete old metadata, will do it next time", e);
-        }
-    }
-
-    private int findLatestIndex() throws IOException {
-        ImmutableMap<String, BlobMetaData> blobs = metaDataBlobContainer.listBlobsByPrefix("metadata-");
-
-        int index = -1;
-        for (BlobMetaData md : blobs.values()) {
-            if (logger.isTraceEnabled()) {
-                logger.trace("[findLatestMetadata]: Processing [" + md.name() + "]");
-            }
-            String name = md.name();
-            int fileIndex = Integer.parseInt(name.substring(name.indexOf('-') + 1));
-            if (fileIndex >= index) {
-                // try and read the meta data
-                byte[] data = null;
-                try {
-                    data = metaDataBlobContainer.readBlobFully(name);
-                    readMetaData(data);
-                    index = fileIndex;
-                } catch (IOException e) {
-                    logger.warn("[findLatestMetadata]: failed to read metadata from [{}], data_length [{}] ignoring...", e, name, data == null ? "na" : data.length);
-                }
-            }
-        }
-
-        return index;
-    }
-
-    private MetaData readMetaData(byte[] data) throws IOException {
-        XContentParser parser = null;
-        try {
-            parser = XContentHelper.createParser(data, 0, data.length);
-            return MetaData.Builder.fromXContent(parser);
-        } finally {
-            if (parser != null) {
-                parser.close();
-            }
-        }
-    }
-}

+ 0 - 38
src/main/java/org/elasticsearch/gateway/blobstore/BlobStoreGatewayModule.java

@@ -1,38 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.blobstore;
-
-import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.inject.Module;
-import org.elasticsearch.common.inject.PreProcessModule;
-
-/**
- *
- */
-public abstract class BlobStoreGatewayModule extends AbstractModule implements PreProcessModule {
-
-    @Override
-    public void processModule(Module module) {
-        if (module instanceof ShardsAllocatorModule) {
-            ((ShardsAllocatorModule) module).setGatewayAllocator(BlobReuseExistingGatewayAllocator.class);
-        }
-    }
-}

+ 0 - 82
src/main/java/org/elasticsearch/gateway/fs/FsGateway.java

@@ -1,82 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.fs;
-
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.cluster.ClusterName;
-import org.elasticsearch.cluster.ClusterService;
-import org.elasticsearch.common.blobstore.fs.FsBlobStore;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.inject.Module;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.util.concurrent.EsExecutors;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.gateway.blobstore.BlobStoreGateway;
-import org.elasticsearch.index.gateway.fs.FsIndexGatewayModule;
-import org.elasticsearch.threadpool.ThreadPool;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-/**
- *
- */
-public class FsGateway extends BlobStoreGateway {
-
-    private final ExecutorService concurrentStreamPool;
-
-    @Inject
-    public FsGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService,
-                     Environment environment, ClusterName clusterName) throws IOException {
-        super(settings, threadPool, clusterService);
-
-        File gatewayFile;
-        String location = componentSettings.get("location");
-        if (location == null) {
-            logger.warn("using local fs location for gateway, should be changed to be a shared location across nodes");
-            gatewayFile = new File(environment.dataFiles()[0], "gateway");
-        } else {
-            gatewayFile = new File(location);
-        }
-
-        int concurrentStreams = componentSettings.getAsInt("concurrent_streams", 5);
-        this.concurrentStreamPool = EsExecutors.newScaling(1, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[fs_stream]"));
-
-        initialize(new FsBlobStore(componentSettings, concurrentStreamPool, gatewayFile), clusterName, null);
-    }
-
-    @Override
-    public String type() {
-        return "fs";
-    }
-
-    @Override
-    public Class<? extends Module> suggestIndexGateway() {
-        return FsIndexGatewayModule.class;
-    }
-
-    @Override
-    protected void doClose() throws ElasticsearchException {
-        super.doClose();
-        concurrentStreamPool.shutdown();
-    }
-}

+ 0 - 34
src/main/java/org/elasticsearch/gateway/fs/FsGatewayModule.java

@@ -1,34 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.fs;
-
-import org.elasticsearch.gateway.Gateway;
-import org.elasticsearch.gateway.blobstore.BlobStoreGatewayModule;
-
-/**
- *
- */
-public class FsGatewayModule extends BlobStoreGatewayModule {
-
-    @Override
-    protected void configure() {
-        bind(Gateway.class).to(FsGateway.class).asEagerSingleton();
-    }
-}

+ 0 - 197
src/main/java/org/elasticsearch/gateway/shared/SharedStorageGateway.java

@@ -1,197 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.shared;
-
-import com.google.common.collect.Sets;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.cluster.ClusterChangedEvent;
-import org.elasticsearch.cluster.ClusterService;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.ClusterStateListener;
-import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.common.StopWatch;
-import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.gateway.Gateway;
-import org.elasticsearch.gateway.GatewayException;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.threadpool.ThreadPool;
-
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import static java.util.concurrent.Executors.newSingleThreadExecutor;
-import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
-
-/**
- *
- */
-public abstract class SharedStorageGateway extends AbstractLifecycleComponent<Gateway> implements Gateway, ClusterStateListener {
-
-    private final ClusterService clusterService;
-
-    private final ThreadPool threadPool;
-
-    private ExecutorService writeStateExecutor;
-
-    private volatile MetaData currentMetaData;
-
-    private NodeEnvironment nodeEnv;
-
-    private NodeIndexDeletedAction nodeIndexDeletedAction;
-
-    public SharedStorageGateway(Settings settings, ThreadPool threadPool, ClusterService clusterService) {
-        super(settings);
-        this.threadPool = threadPool;
-        this.clusterService = clusterService;
-        this.writeStateExecutor = newSingleThreadExecutor(daemonThreadFactory(settings, "gateway#writeMetaData"));
-        clusterService.addLast(this);
-        logger.warn("shared gateway has been deprecated, please use the (default) local gateway");
-    }
-
-    @Inject
-    public void setNodeEnv(NodeEnvironment nodeEnv) {
-        this.nodeEnv = nodeEnv;
-    }
-
-    // here as setter injection not to break backward comp. with extensions of this class..
-    @Inject
-    public void setNodeIndexDeletedAction(NodeIndexDeletedAction nodeIndexDeletedAction) {
-        this.nodeIndexDeletedAction = nodeIndexDeletedAction;
-    }
-
-    @Override
-    protected void doStart() throws ElasticsearchException {
-    }
-
-    @Override
-    protected void doStop() throws ElasticsearchException {
-    }
-
-    @Override
-    protected void doClose() throws ElasticsearchException {
-        clusterService.remove(this);
-        writeStateExecutor.shutdown();
-        try {
-            writeStateExecutor.awaitTermination(10, TimeUnit.SECONDS);
-        } catch (InterruptedException e) {
-            // ignore
-        }
-    }
-
-    @Override
-    public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
-        threadPool.generic().execute(new Runnable() {
-            @Override
-            public void run() {
-                logger.debug("reading state from gateway {} ...", this);
-                StopWatch stopWatch = new StopWatch().start();
-                MetaData metaData;
-                try {
-                    metaData = read();
-                    logger.debug("read state from gateway {}, took {}", this, stopWatch.stop().totalTime());
-                    if (metaData == null) {
-                        logger.debug("no state read from gateway");
-                        listener.onSuccess(ClusterState.builder().build());
-                    } else {
-                        listener.onSuccess(ClusterState.builder().metaData(metaData).build());
-                    }
-                } catch (Exception e) {
-                    logger.error("failed to read from gateway", e);
-                    listener.onFailure(ExceptionsHelper.detailedMessage(e));
-                }
-            }
-        });
-    }
-
-    @Override
-    public void clusterChanged(final ClusterChangedEvent event) {
-        if (!lifecycle.started()) {
-            return;
-        }
-
-        // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
-        if (event.state().blocks().disableStatePersistence()) {
-            this.currentMetaData = null;
-            return;
-        }
-
-        if (!event.metaDataChanged()) {
-            return;
-        }
-        writeStateExecutor.execute(new Runnable() {
-            @Override
-            public void run() {
-                Set<String> indicesDeleted = Sets.newHashSet();
-                if (event.localNodeMaster()) {
-                    logger.debug("writing to gateway {} ...", this);
-                    StopWatch stopWatch = new StopWatch().start();
-                    try {
-                        write(event.state().metaData());
-                        logger.debug("wrote to gateway {}, took {}", this, stopWatch.stop().totalTime());
-                        // TODO, we need to remember that we failed, maybe add a retry scheduler?
-                    } catch (Exception e) {
-                        logger.error("failed to write to gateway", e);
-                    }
-                    if (currentMetaData != null) {
-                        for (IndexMetaData current : currentMetaData) {
-                            if (!event.state().metaData().hasIndex(current.index())) {
-                                delete(current);
-                                indicesDeleted.add(current.index());
-                            }
-                        }
-                    }
-                }
-                if (nodeEnv != null && nodeEnv.hasNodeFile()) {
-                    if (currentMetaData != null) {
-                        for (IndexMetaData current : currentMetaData) {
-                            if (!event.state().metaData().hasIndex(current.index())) {
-                                FileSystemUtils.deleteRecursively(nodeEnv.indexLocations(new Index(current.index())));
-                                indicesDeleted.add(current.index());
-                            }
-                        }
-                    }
-                }
-                currentMetaData = event.state().metaData();
-
-                for (String indexDeleted : indicesDeleted) {
-                    try {
-                        nodeIndexDeletedAction.nodeIndexStoreDeleted(event.state(), indexDeleted, event.state().nodes().localNodeId());
-                    } catch (Exception e) {
-                        logger.debug("[{}] failed to notify master on local index store deletion", e, indexDeleted);
-                    }
-                }
-            }
-        });
-    }
-
-    protected abstract MetaData read() throws ElasticsearchException;
-
-    protected abstract void write(MetaData metaData) throws ElasticsearchException;
-
-    protected abstract void delete(IndexMetaData indexMetaData) throws ElasticsearchException;
-}

+ 0 - 7
src/main/java/org/elasticsearch/index/engine/Engine.java

@@ -126,8 +126,6 @@ public interface Engine extends IndexShardComponent, CloseableComponent {
 
     void optimize(Optimize optimize) throws EngineException;
 
-    <T> T snapshot(SnapshotHandler<T> snapshotHandler) throws EngineException;
-
     /**
      * Snapshots the index and returns a handle to it. Will always try and "commit" the
      * lucene index to make sure we have a "fresh" copy of the files to snapshot.
@@ -161,11 +159,6 @@ public interface Engine extends IndexShardComponent, CloseableComponent {
         void phase3(Translog.Snapshot snapshot) throws ElasticsearchException;
     }
 
-    static interface SnapshotHandler<T> {
-
-        T snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException;
-    }
-
     static interface Searcher extends Releasable {
 
         /**

+ 0 - 21
src/main/java/org/elasticsearch/index/engine/internal/InternalEngine.java

@@ -1005,27 +1005,6 @@ public class InternalEngine extends AbstractIndexShardComponent implements Engin
         }
     }
 
-    @Override
-    public <T> T snapshot(SnapshotHandler<T> snapshotHandler) throws EngineException {
-        SnapshotIndexCommit snapshotIndexCommit = null;
-        Translog.Snapshot translogSnapshot = null;
-        rwl.readLock().lock();
-        try {
-            snapshotIndexCommit = deletionPolicy.snapshot();
-            translogSnapshot = translog.snapshot();
-        } catch (Throwable e) {
-            Releasables.releaseWhileHandlingException(snapshotIndexCommit);
-            throw new SnapshotFailedEngineException(shardId, e);
-        } finally {
-            rwl.readLock().unlock();
-        }
-
-        try {
-            return snapshotHandler.snapshot(snapshotIndexCommit, translogSnapshot);
-        } finally {
-            Releasables.release(snapshotIndexCommit, translogSnapshot);
-        }
-    }
 
     @Override
     public SnapshotIndexCommit snapshotIndex() throws EngineException {

+ 0 - 111
src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java

@@ -19,11 +19,8 @@
 
 package org.elasticsearch.index.gateway;
 
-import org.elasticsearch.ElasticsearchIllegalStateException;
 import org.elasticsearch.index.CloseableIndexComponent;
-import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
 import org.elasticsearch.index.shard.IndexShardComponent;
-import org.elasticsearch.index.translog.Translog;
 import org.elasticsearch.indices.recovery.RecoveryState;
 
 /**
@@ -38,117 +35,9 @@ public interface IndexShardGateway extends IndexShardComponent, CloseableIndexCo
      */
     RecoveryState recoveryState();
 
-    /**
-     * The last snapshot status performed. Can be <tt>null</tt>.
-     */
-    SnapshotStatus lastSnapshotStatus();
-
-    /**
-     * The current snapshot status being performed. Can be <tt>null</tt> indicating that no snapshot
-     * is being executed currently.
-     */
-    SnapshotStatus currentSnapshotStatus();
-
     /**
      * Recovers the state of the shard from the gateway.
      */
     void recover(boolean indexShouldExists, RecoveryState recoveryState) throws IndexShardGatewayRecoveryException;
 
-    /**
-     * Snapshots the given shard into the gateway.
-     */
-    SnapshotStatus snapshot(Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException;
-
-    /**
-     * Returns <tt>true</tt> if snapshot is even required on this gateway (i.e. mainly handles recovery).
-     */
-    boolean requiresSnapshot();
-
-    /**
-     * Returns <tt>true</tt> if this gateway requires scheduling management for snapshot
-     * operations.
-     */
-    boolean requiresSnapshotScheduling();
-
-    SnapshotLock obtainSnapshotLock() throws Exception;
-
-    public static interface SnapshotLock {
-        void release();
-    }
-
-    public static final SnapshotLock NO_SNAPSHOT_LOCK = new SnapshotLock() {
-        @Override
-        public void release() {
-        }
-    };
-
-    public static class Snapshot {
-        private final SnapshotIndexCommit indexCommit;
-        private final Translog.Snapshot translogSnapshot;
-
-        private final long lastIndexVersion;
-        private final long lastTranslogId;
-        private final long lastTranslogLength;
-        private final int lastTotalTranslogOperations;
-
-        public Snapshot(SnapshotIndexCommit indexCommit, Translog.Snapshot translogSnapshot, long lastIndexVersion, long lastTranslogId, long lastTranslogLength, int lastTotalTranslogOperations) {
-            this.indexCommit = indexCommit;
-            this.translogSnapshot = translogSnapshot;
-            this.lastIndexVersion = lastIndexVersion;
-            this.lastTranslogId = lastTranslogId;
-            this.lastTranslogLength = lastTranslogLength;
-            this.lastTotalTranslogOperations = lastTotalTranslogOperations;
-        }
-
-        /**
-         * Indicates that the index has changed from the latest snapshot.
-         */
-        public boolean indexChanged() {
-            return lastIndexVersion != indexCommit.getGeneration();
-        }
-
-        /**
-         * Indicates that a new transaction log has been created. Note check this <b>before</b> you
-         * check {@link #sameTranslogNewOperations()}.
-         */
-        public boolean newTranslogCreated() {
-            return translogSnapshot.translogId() != lastTranslogId;
-        }
-
-        /**
-         * Indicates that the same translog exists, but new operations have been appended to it. Throws
-         * {@link org.elasticsearch.ElasticsearchIllegalStateException} if {@link #newTranslogCreated()} is <tt>true</tt>, so
-         * always check that first.
-         */
-        public boolean sameTranslogNewOperations() {
-            if (newTranslogCreated()) {
-                throw new ElasticsearchIllegalStateException("Should not be called when there is a new translog");
-            }
-            return translogSnapshot.length() > lastTranslogLength;
-        }
-
-        public SnapshotIndexCommit indexCommit() {
-            return indexCommit;
-        }
-
-        public Translog.Snapshot translogSnapshot() {
-            return translogSnapshot;
-        }
-
-        public long lastIndexVersion() {
-            return lastIndexVersion;
-        }
-
-        public long lastTranslogId() {
-            return lastTranslogId;
-        }
-
-        public long lastTranslogLength() {
-            return lastTranslogLength;
-        }
-
-        public int lastTotalTranslogOperations() {
-            return this.lastTotalTranslogOperations;
-        }
-    }
 }

+ 2 - 210
src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java

@@ -25,22 +25,15 @@ import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.CloseableIndexComponent;
-import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
 import org.elasticsearch.index.engine.Engine;
-import org.elasticsearch.index.engine.EngineException;
-import org.elasticsearch.index.engine.SnapshotFailedEngineException;
 import org.elasticsearch.index.settings.IndexSettings;
-import org.elasticsearch.index.settings.IndexSettingsService;
 import org.elasticsearch.index.shard.*;
 import org.elasticsearch.index.shard.service.IndexShard;
 import org.elasticsearch.index.shard.service.InternalIndexShard;
 import org.elasticsearch.index.snapshots.IndexShardSnapshotAndRestoreService;
-import org.elasticsearch.index.translog.Translog;
 import org.elasticsearch.indices.recovery.RecoveryState;
 import org.elasticsearch.threadpool.ThreadPool;
 
-import java.util.concurrent.ScheduledFuture;
-
 import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
 
 /**
@@ -48,12 +41,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
  */
 public class IndexShardGatewayService extends AbstractIndexShardComponent implements CloseableIndexComponent {
 
-    private final boolean snapshotOnClose;
-
     private final ThreadPool threadPool;
 
-    private final IndexSettingsService indexSettingsService;
-
     private final ClusterService clusterService;
 
     private final InternalIndexShard indexShard;
@@ -62,70 +51,24 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
 
     private final IndexShardSnapshotAndRestoreService snapshotService;
 
-
-    private volatile long lastIndexVersion;
-
-    private volatile long lastTranslogId = -1;
-
-    private volatile int lastTotalTranslogOperations;
-
-    private volatile long lastTranslogLength;
-
-    private volatile TimeValue snapshotInterval;
-
-    private volatile ScheduledFuture snapshotScheduleFuture;
-
     private RecoveryState recoveryState;
 
-    private IndexShardGateway.SnapshotLock snapshotLock;
-
-    private final SnapshotRunnable snapshotRunnable = new SnapshotRunnable();
-
-    private final ApplySettings applySettings = new ApplySettings();
-
-
     @Inject
-    public IndexShardGatewayService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService,
-                                    ThreadPool threadPool, IndexShard indexShard, IndexShardGateway shardGateway, IndexShardSnapshotAndRestoreService snapshotService,
-                                    ClusterService clusterService) {
+    public IndexShardGatewayService(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
+                                    IndexShard indexShard, IndexShardGateway shardGateway, IndexShardSnapshotAndRestoreService snapshotService, ClusterService clusterService) {
         super(shardId, indexSettings);
         this.threadPool = threadPool;
-        this.indexSettingsService = indexSettingsService;
         this.indexShard = (InternalIndexShard) indexShard;
         this.shardGateway = shardGateway;
         this.snapshotService = snapshotService;
         this.recoveryState = new RecoveryState(shardId);
         this.clusterService = clusterService;
-
-        this.snapshotOnClose = componentSettings.getAsBoolean("snapshot_on_close", true);
-        this.snapshotInterval = componentSettings.getAsTime("snapshot_interval", TimeValue.timeValueSeconds(10));
-
-        indexSettingsService.addListener(applySettings);
-    }
-
-    public static final String INDEX_GATEWAY_SNAPSHOT_INTERVAL = "index.gateway.snapshot_interval";
-
-    class ApplySettings implements IndexSettingsService.Listener {
-        @Override
-        public void onRefreshSettings(Settings settings) {
-            TimeValue snapshotInterval = settings.getAsTime(INDEX_GATEWAY_SNAPSHOT_INTERVAL, IndexShardGatewayService.this.snapshotInterval);
-            if (!snapshotInterval.equals(IndexShardGatewayService.this.snapshotInterval)) {
-                logger.info("updating snapshot_interval from [{}] to [{}]", IndexShardGatewayService.this.snapshotInterval, snapshotInterval);
-                IndexShardGatewayService.this.snapshotInterval = snapshotInterval;
-                if (snapshotScheduleFuture != null) {
-                    snapshotScheduleFuture.cancel(false);
-                    snapshotScheduleFuture = null;
-                }
-                scheduleSnapshotIfNeeded();
-            }
-        }
     }
 
     /**
      * Should be called when the shard routing state has changed (note, after the state has been set on the shard).
      */
     public void routingStateChanged() {
-        scheduleSnapshotIfNeeded();
     }
 
     public static interface RecoveryListener {
@@ -143,14 +86,6 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
         return recoveryState;
     }
 
-    public SnapshotStatus snapshotStatus() {
-        SnapshotStatus snapshotStatus = shardGateway.currentSnapshotStatus();
-        if (snapshotStatus != null) {
-            return snapshotStatus;
-        }
-        return shardGateway.lastSnapshotStatus();
-    }
-
     /**
      * Recovers the state of the shard from the gateway.
      */
@@ -197,11 +132,6 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
                         shardGateway.recover(indexShouldExists, recoveryState);
                     }
 
-                    lastIndexVersion = recoveryState.getIndex().version();
-                    lastTranslogId = -1;
-                    lastTranslogLength = 0;
-                    lastTotalTranslogOperations = recoveryState.getTranslog().currentTranslogOperations();
-
                     // start the shard if the gateway has not started it already. Note that if the gateway
                     // moved shard to POST_RECOVERY, it may have been started as well if:
                     // 1) master sent a new cluster state indicating shard is initializing
@@ -230,7 +160,6 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
                         logger.debug("recovery completed from [{}], took [{}]", shardGateway, timeValueMillis(recoveryState.getTimer().time()));
                     }
                     listener.onRecoveryDone();
-                    scheduleSnapshotIfNeeded();
                 } catch (IndexShardGatewayRecoveryException e) {
                     if (indexShard.state() == IndexShardState.CLOSED) {
                         // got closed on us, just ignore this recovery
@@ -259,145 +188,8 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
         });
     }
 
-    /**
-     * Snapshots the given shard into the gateway.
-     */
-    public synchronized void snapshot(final String reason) throws IndexShardGatewaySnapshotFailedException {
-        if (!indexShard.routingEntry().primary()) {
-            return;
-//            throw new IndexShardGatewaySnapshotNotAllowedException(shardId, "Snapshot not allowed on non primary shard");
-        }
-        if (indexShard.routingEntry().relocating()) {
-            // do not snapshot when in the process of relocation of primaries so we won't get conflicts
-            return;
-        }
-        if (indexShard.state() == IndexShardState.CREATED) {
-            // shard has just been created, ignore it and return
-            return;
-        }
-        if (indexShard.state() == IndexShardState.RECOVERING) {
-            // shard is recovering, don't snapshot
-            return;
-        }
-
-        if (snapshotLock == null) {
-            try {
-                snapshotLock = shardGateway.obtainSnapshotLock();
-            } catch (Exception e) {
-                logger.warn("failed to obtain snapshot lock, ignoring snapshot", e);
-                return;
-            }
-        }
-
-        try {
-            SnapshotStatus snapshotStatus = indexShard.snapshot(new Engine.SnapshotHandler<SnapshotStatus>() {
-                @Override
-                public SnapshotStatus snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
-                    if (lastIndexVersion != snapshotIndexCommit.getGeneration() || lastTranslogId != translogSnapshot.translogId() || lastTranslogLength < translogSnapshot.length()) {
-
-                        logger.debug("snapshot ({}) to {} ...", reason, shardGateway);
-                        SnapshotStatus snapshotStatus =
-                                shardGateway.snapshot(new IndexShardGateway.Snapshot(snapshotIndexCommit, translogSnapshot, lastIndexVersion, lastTranslogId, lastTranslogLength, lastTotalTranslogOperations));
-
-                        lastIndexVersion = snapshotIndexCommit.getGeneration();
-                        lastTranslogId = translogSnapshot.translogId();
-                        lastTranslogLength = translogSnapshot.length();
-                        lastTotalTranslogOperations = translogSnapshot.estimatedTotalOperations();
-                        return snapshotStatus;
-                    }
-                    return null;
-                }
-            });
-            if (snapshotStatus != null) {
-                if (logger.isDebugEnabled()) {
-                    StringBuilder sb = new StringBuilder();
-                    sb.append("snapshot (").append(reason).append(") completed to ").append(shardGateway).append(", took [").append(TimeValue.timeValueMillis(snapshotStatus.time())).append("]\n");
-                    sb.append("    index    : version [").append(lastIndexVersion).append("], number_of_files [").append(snapshotStatus.index().numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.index().totalSize())).append("], took [").append(TimeValue.timeValueMillis(snapshotStatus.index().time())).append("]\n");
-                    sb.append("    translog : id      [").append(lastTranslogId).append("], number_of_operations [").append(snapshotStatus.translog().expectedNumberOfOperations()).append("], took [").append(TimeValue.timeValueMillis(snapshotStatus.translog().time())).append("]");
-                    logger.debug(sb.toString());
-                }
-            }
-        } catch (SnapshotFailedEngineException e) {
-            if (e.getCause() instanceof IllegalStateException) {
-                // ignore, that's fine, snapshot has not started yet
-            } else {
-                throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to snapshot", e);
-            }
-        } catch (IllegalIndexShardStateException e) {
-            // ignore, that's fine, snapshot has not started yet
-        } catch (IndexShardGatewaySnapshotFailedException e) {
-            throw e;
-        } catch (Exception e) {
-            throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to snapshot", e);
-        }
-    }
-
-    public void snapshotOnClose() {
-        if (shardGateway.requiresSnapshot() && snapshotOnClose) {
-            try {
-                snapshot("shutdown");
-            } catch (Exception e) {
-                logger.warn("failed to snapshot on close", e);
-            }
-        }
-    }
-
     @Override
     public synchronized void close() {
-        indexSettingsService.removeListener(applySettings);
-        if (snapshotScheduleFuture != null) {
-            snapshotScheduleFuture.cancel(true);
-            snapshotScheduleFuture = null;
-        }
         shardGateway.close();
-        if (snapshotLock != null) {
-            snapshotLock.release();
-        }
-    }
-
-    private synchronized void scheduleSnapshotIfNeeded() {
-        if (!shardGateway.requiresSnapshot()) {
-            return;
-        }
-        if (!shardGateway.requiresSnapshotScheduling()) {
-            return;
-        }
-        if (!indexShard.routingEntry().primary()) {
-            // we only do snapshotting on the primary shard
-            return;
-        }
-        if (!indexShard.routingEntry().started()) {
-            // we only schedule when the cluster assumes we have started
-            return;
-        }
-        if (snapshotScheduleFuture != null) {
-            // we are already scheduling this one, ignore
-            return;
-        }
-        if (snapshotInterval.millis() != -1) {
-            // we need to schedule snapshot
-            if (logger.isDebugEnabled()) {
-                logger.debug("scheduling snapshot every [{}]", snapshotInterval);
-            }
-            snapshotScheduleFuture = threadPool.schedule(snapshotInterval, ThreadPool.Names.SNAPSHOT, snapshotRunnable);
-        }
-    }
-
-    private class SnapshotRunnable implements Runnable {
-        @Override
-        public synchronized void run() {
-            try {
-                snapshot("scheduled");
-            } catch (Throwable e) {
-                if (indexShard.state() == IndexShardState.CLOSED) {
-                    return;
-                }
-                logger.warn("failed to snapshot (scheduled)", e);
-            }
-            // schedule it again
-            if (indexShard.state() != IndexShardState.CLOSED) {
-                snapshotScheduleFuture = threadPool.schedule(snapshotInterval, ThreadPool.Names.SNAPSHOT, this);
-            }
-        }
     }
 }

+ 0 - 87
src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexGateway.java

@@ -1,87 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.gateway.blobstore;
-
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.blobstore.BlobPath;
-import org.elasticsearch.common.blobstore.BlobStore;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.gateway.Gateway;
-import org.elasticsearch.gateway.blobstore.BlobStoreGateway;
-import org.elasticsearch.gateway.none.NoneGateway;
-import org.elasticsearch.index.AbstractIndexComponent;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.index.gateway.IndexGateway;
-import org.elasticsearch.index.settings.IndexSettings;
-
-/**
- *
- */
-public abstract class BlobStoreIndexGateway extends AbstractIndexComponent implements IndexGateway {
-
-    private final BlobStoreGateway gateway;
-
-    private final BlobStore blobStore;
-
-    private final BlobPath indexPath;
-
-    protected ByteSizeValue chunkSize;
-
-    protected BlobStoreIndexGateway(Index index, @IndexSettings Settings indexSettings, Gateway gateway) {
-        super(index, indexSettings);
-
-        if (gateway.type().equals(NoneGateway.TYPE)) {
-            logger.warn("index gateway is configured, but no cluster level gateway configured, cluster level metadata will be lost on full shutdown");
-        }
-
-        this.gateway = (BlobStoreGateway) gateway;
-        this.blobStore = this.gateway.blobStore();
-
-        this.chunkSize = componentSettings.getAsBytesSize("chunk_size", this.gateway.chunkSize());
-
-        this.indexPath = this.gateway.basePath().add("indices").add(index.name());
-    }
-
-    @Override
-    public String toString() {
-        return type() + "://" + blobStore + "/" + indexPath;
-    }
-
-    public BlobStore blobStore() {
-        return blobStore;
-    }
-
-    public ByteSizeValue chunkSize() {
-        return this.chunkSize;
-    }
-
-    public BlobPath shardPath(int shardId) {
-        return indexPath.add(Integer.toString(shardId));
-    }
-
-    public static BlobPath shardPath(BlobPath basePath, String index, int shardId) {
-        return basePath.add("indices").add(index).add(Integer.toString(shardId));
-    }
-
-    @Override
-    public void close() throws ElasticsearchException {
-    }
-}

+ 0 - 881
src/main/java/org/elasticsearch/index/gateway/blobstore/BlobStoreIndexShardGateway.java

@@ -1,881 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.gateway.blobstore;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IndexOutput;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.blobstore.*;
-import org.elasticsearch.common.io.stream.BytesStreamInput;
-import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.lucene.Lucene;
-import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
-import org.elasticsearch.common.lucene.store.ThreadSafeInputStreamIndexInput;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
-import org.elasticsearch.index.gateway.*;
-import org.elasticsearch.index.settings.IndexSettings;
-import org.elasticsearch.index.shard.AbstractIndexShardComponent;
-import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.index.shard.service.IndexShard;
-import org.elasticsearch.index.shard.service.InternalIndexShard;
-import org.elasticsearch.index.store.Store;
-import org.elasticsearch.index.store.StoreFileMetaData;
-import org.elasticsearch.index.translog.Translog;
-import org.elasticsearch.index.translog.TranslogStreams;
-import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.threadpool.ThreadPool;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-
-/**
- *
- */
-public abstract class BlobStoreIndexShardGateway extends AbstractIndexShardComponent implements IndexShardGateway {
-
-    protected final ThreadPool threadPool;
-
-    protected final InternalIndexShard indexShard;
-
-    protected final Store store;
-
-    protected final ByteSizeValue chunkSize;
-
-    protected final BlobStore blobStore;
-
-    protected final BlobPath shardPath;
-
-    protected final ImmutableBlobContainer blobContainer;
-
-    private volatile RecoveryState recoveryState;
-
-    private volatile SnapshotStatus lastSnapshotStatus;
-
-    private volatile SnapshotStatus currentSnapshotStatus;
-
-    protected BlobStoreIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, IndexGateway indexGateway,
-                                         IndexShard indexShard, Store store) {
-        super(shardId, indexSettings);
-
-        this.threadPool = threadPool;
-        this.indexShard = (InternalIndexShard) indexShard;
-        this.store = store;
-
-        BlobStoreIndexGateway blobStoreIndexGateway = (BlobStoreIndexGateway) indexGateway;
-
-        this.chunkSize = blobStoreIndexGateway.chunkSize(); // can be null -> no chunking
-        this.blobStore = blobStoreIndexGateway.blobStore();
-        this.shardPath = blobStoreIndexGateway.shardPath(shardId.id());
-
-        this.blobContainer = blobStore.immutableBlobContainer(shardPath);
-
-        this.recoveryState = new RecoveryState();
-    }
-
-    @Override
-    public RecoveryState recoveryState() {
-        return this.recoveryState;
-    }
-
-    @Override
-    public String toString() {
-        return type() + "://" + blobStore + "/" + shardPath;
-    }
-
-    @Override
-    public boolean requiresSnapshot() {
-        return true;
-    }
-
-    @Override
-    public boolean requiresSnapshotScheduling() {
-        return true;
-    }
-
-    @Override
-    public SnapshotLock obtainSnapshotLock() throws Exception {
-        return NO_SNAPSHOT_LOCK;
-    }
-
-    @Override
-    public void close() throws ElasticsearchException {
-    }
-
-    @Override
-    public SnapshotStatus lastSnapshotStatus() {
-        return this.lastSnapshotStatus;
-    }
-
-    @Override
-    public SnapshotStatus currentSnapshotStatus() {
-        SnapshotStatus snapshotStatus = this.currentSnapshotStatus;
-        if (snapshotStatus == null) {
-            return snapshotStatus;
-        }
-        if (snapshotStatus.stage() != SnapshotStatus.Stage.DONE || snapshotStatus.stage() != SnapshotStatus.Stage.FAILURE) {
-            snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
-        }
-        return snapshotStatus;
-    }
-
-    @Override
-    public SnapshotStatus snapshot(final Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException {
-        currentSnapshotStatus = new SnapshotStatus();
-        currentSnapshotStatus.startTime(System.currentTimeMillis());
-
-        try {
-            doSnapshot(snapshot);
-            currentSnapshotStatus.time(System.currentTimeMillis() - currentSnapshotStatus.startTime());
-            currentSnapshotStatus.updateStage(SnapshotStatus.Stage.DONE);
-        } catch (Exception e) {
-            currentSnapshotStatus.time(System.currentTimeMillis() - currentSnapshotStatus.startTime());
-            currentSnapshotStatus.updateStage(SnapshotStatus.Stage.FAILURE);
-            currentSnapshotStatus.failed(e);
-            if (e instanceof IndexShardGatewaySnapshotFailedException) {
-                throw (IndexShardGatewaySnapshotFailedException) e;
-            } else {
-                throw new IndexShardGatewaySnapshotFailedException(shardId, e.getMessage(), e);
-            }
-        } finally {
-            this.lastSnapshotStatus = currentSnapshotStatus;
-            this.currentSnapshotStatus = null;
-        }
-        return this.lastSnapshotStatus;
-    }
-
-    private void doSnapshot(final Snapshot snapshot) throws IndexShardGatewaySnapshotFailedException {
-        ImmutableMap<String, BlobMetaData> blobs;
-        try {
-            blobs = blobContainer.listBlobs();
-        } catch (IOException e) {
-            throw new IndexShardGatewaySnapshotFailedException(shardId, "failed to list blobs", e);
-        }
-
-        long generation = findLatestFileNameGeneration(blobs);
-        CommitPoints commitPoints = buildCommitPoints(blobs);
-
-        currentSnapshotStatus.index().startTime(System.currentTimeMillis());
-        currentSnapshotStatus.updateStage(SnapshotStatus.Stage.INDEX);
-
-        final SnapshotIndexCommit snapshotIndexCommit = snapshot.indexCommit();
-        final Translog.Snapshot translogSnapshot = snapshot.translogSnapshot();
-
-        final CountDownLatch indexLatch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
-        final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
-        final List<CommitPoint.FileInfo> indexCommitPointFiles = Lists.newArrayList();
-
-        int indexNumberOfFiles = 0;
-        long indexTotalFilesSize = 0;
-        for (final String fileName : snapshotIndexCommit.getFiles()) {
-            StoreFileMetaData md;
-            try {
-                md = store.metaData(fileName);
-            } catch (IOException e) {
-                throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to get store file metadata", e);
-            }
-
-            boolean snapshotRequired = false;
-            if (snapshot.indexChanged() && fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
-                snapshotRequired = true; // we want to always snapshot the segment file if the index changed
-            }
-
-            CommitPoint.FileInfo fileInfo = commitPoints.findPhysicalIndexFile(fileName);
-            if (fileInfo == null || !fileInfo.isSame(md) || !commitPointFileExistsInBlobs(fileInfo, blobs)) {
-                // commit point file does not exists in any commit point, or has different length, or does not fully exists in the listed blobs
-                snapshotRequired = true;
-            }
-
-            if (snapshotRequired) {
-                indexNumberOfFiles++;
-                indexTotalFilesSize += md.length();
-                // create a new FileInfo
-                try {
-                    CommitPoint.FileInfo snapshotFileInfo = new CommitPoint.FileInfo(fileNameFromGeneration(++generation), fileName, md.length(), md.checksum());
-                    indexCommitPointFiles.add(snapshotFileInfo);
-                    snapshotFile(snapshotIndexCommit.getDirectory(), snapshotFileInfo, indexLatch, failures);
-                } catch (IOException e) {
-                    failures.add(e);
-                    indexLatch.countDown();
-                }
-            } else {
-                indexCommitPointFiles.add(fileInfo);
-                indexLatch.countDown();
-            }
-        }
-        currentSnapshotStatus.index().files(indexNumberOfFiles, indexTotalFilesSize);
-
-        try {
-            indexLatch.await();
-        } catch (InterruptedException e) {
-            failures.add(e);
-        }
-        if (!failures.isEmpty()) {
-            throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to perform snapshot (index files)", failures.get(failures.size() - 1));
-        }
-
-        currentSnapshotStatus.index().time(System.currentTimeMillis() - currentSnapshotStatus.index().startTime());
-
-        currentSnapshotStatus.updateStage(SnapshotStatus.Stage.TRANSLOG);
-        currentSnapshotStatus.translog().startTime(System.currentTimeMillis());
-
-        // Note, we assume the snapshot is always started from "base 0". We need to seek forward if we want to lastTranslogPosition if we want the delta
-        List<CommitPoint.FileInfo> translogCommitPointFiles = Lists.newArrayList();
-        int expectedNumberOfOperations = 0;
-        boolean snapshotRequired = false;
-        if (snapshot.newTranslogCreated()) {
-            if (translogSnapshot.lengthInBytes() > 0) {
-                snapshotRequired = true;
-                expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
-            }
-        } else {
-            // if we have a commit point, check that we have all the files listed in it in the blob store
-            if (!commitPoints.commits().isEmpty()) {
-                CommitPoint commitPoint = commitPoints.commits().get(0);
-                boolean allTranslogFilesExists = true;
-                for (CommitPoint.FileInfo fileInfo : commitPoint.translogFiles()) {
-                    if (!commitPointFileExistsInBlobs(fileInfo, blobs)) {
-                        allTranslogFilesExists = false;
-                        break;
-                    }
-                }
-                // if everything exists, we can seek forward in case there are new operations, otherwise, we copy over all again...
-                if (allTranslogFilesExists) {
-                    translogCommitPointFiles.addAll(commitPoint.translogFiles());
-                    if (snapshot.sameTranslogNewOperations()) {
-                        translogSnapshot.seekForward(snapshot.lastTranslogLength());
-                        if (translogSnapshot.lengthInBytes() > 0) {
-                            snapshotRequired = true;
-                            expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations() - snapshot.lastTotalTranslogOperations();
-                        }
-                    } // else (no operations, nothing to snapshot)
-                } else {
-                    // a full translog snapshot is required
-                    if (translogSnapshot.lengthInBytes() > 0) {
-                        expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
-                        snapshotRequired = true;
-                    }
-                }
-            } else {
-                // no commit point, snapshot all the translog
-                if (translogSnapshot.lengthInBytes() > 0) {
-                    expectedNumberOfOperations = translogSnapshot.estimatedTotalOperations();
-                    snapshotRequired = true;
-                }
-            }
-        }
-        currentSnapshotStatus.translog().expectedNumberOfOperations(expectedNumberOfOperations);
-
-        if (snapshotRequired) {
-            CommitPoint.FileInfo addedTranslogFileInfo = new CommitPoint.FileInfo(fileNameFromGeneration(++generation), "translog-" + translogSnapshot.translogId(), translogSnapshot.lengthInBytes(), null /* no need for checksum in translog */);
-            translogCommitPointFiles.add(addedTranslogFileInfo);
-            try {
-                snapshotTranslog(translogSnapshot, addedTranslogFileInfo);
-            } catch (Exception e) {
-                throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to snapshot translog", e);
-            }
-        }
-        currentSnapshotStatus.translog().time(System.currentTimeMillis() - currentSnapshotStatus.translog().startTime());
-
-        // now create and write the commit point
-        currentSnapshotStatus.updateStage(SnapshotStatus.Stage.FINALIZE);
-        long version = 0;
-        if (!commitPoints.commits().isEmpty()) {
-            version = commitPoints.commits().iterator().next().version() + 1;
-        }
-        String commitPointName = "commit-" + Long.toString(version, Character.MAX_RADIX);
-        CommitPoint commitPoint = new CommitPoint(version, commitPointName, CommitPoint.Type.GENERATED, indexCommitPointFiles, translogCommitPointFiles);
-        try {
-            byte[] commitPointData = CommitPoints.toXContent(commitPoint);
-            blobContainer.writeBlob(commitPointName, new BytesStreamInput(commitPointData, false), commitPointData.length);
-        } catch (Exception e) {
-            throw new IndexShardGatewaySnapshotFailedException(shardId, "Failed to write commit point", e);
-        }
-
-        // delete all files that are not referenced by any commit point
-        // build a new CommitPoint, that includes this one and all the saved ones
-        List<CommitPoint> newCommitPointsList = Lists.newArrayList();
-        newCommitPointsList.add(commitPoint);
-        for (CommitPoint point : commitPoints) {
-            if (point.type() == CommitPoint.Type.SAVED) {
-                newCommitPointsList.add(point);
-            }
-        }
-        CommitPoints newCommitPoints = new CommitPoints(newCommitPointsList);
-        // first, go over and delete all the commit points
-        for (String blobName : blobs.keySet()) {
-            if (!blobName.startsWith("commit-")) {
-                continue;
-            }
-            long checkedVersion = Long.parseLong(blobName.substring("commit-".length()), Character.MAX_RADIX);
-            if (!newCommitPoints.hasVersion(checkedVersion)) {
-                try {
-                    blobContainer.deleteBlob(blobName);
-                } catch (IOException e) {
-                    // ignore
-                }
-            }
-        }
-        // now go over all the blobs, and if they don't exists in a commit point, delete them
-        for (String blobName : blobs.keySet()) {
-            String name = blobName;
-            if (!name.startsWith("__")) {
-                continue;
-            }
-            if (blobName.contains(".part")) {
-                name = blobName.substring(0, blobName.indexOf(".part"));
-            }
-            if (newCommitPoints.findNameFile(name) == null) {
-                try {
-                    blobContainer.deleteBlob(blobName);
-                } catch (IOException e) {
-                    // ignore, will delete it laters
-                }
-            }
-        }
-    }
-
-    @Override
-    public void recover(boolean indexShouldExists, RecoveryState recoveryState) throws IndexShardGatewayRecoveryException {
-        this.recoveryState = recoveryState;
-
-        final ImmutableMap<String, BlobMetaData> blobs;
-        try {
-            blobs = blobContainer.listBlobs();
-        } catch (IOException e) {
-            throw new IndexShardGatewayRecoveryException(shardId, "Failed to list content of gateway", e);
-        }
-
-        List<CommitPoint> commitPointsList = Lists.newArrayList();
-        boolean atLeastOneCommitPointExists = false;
-        for (String name : blobs.keySet()) {
-            if (name.startsWith("commit-")) {
-                atLeastOneCommitPointExists = true;
-                try {
-                    commitPointsList.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
-                } catch (Exception e) {
-                    logger.warn("failed to read commit point [{}]", e, name);
-                }
-            }
-        }
-        if (atLeastOneCommitPointExists && commitPointsList.isEmpty()) {
-            // no commit point managed to load, bail so we won't corrupt the index, will require manual intervention
-            throw new IndexShardGatewayRecoveryException(shardId, "Commit points exists but none could be loaded", null);
-        }
-        CommitPoints commitPoints = new CommitPoints(commitPointsList);
-
-        if (commitPoints.commits().isEmpty()) {
-            // no commit points, clean the store just so we won't recover wrong files
-            try {
-                indexShard.store().deleteContent();
-            } catch (IOException e) {
-                logger.warn("failed to clean store before starting shard", e);
-            }
-            recoveryState.getIndex().startTime(System.currentTimeMillis());
-            recoveryState.getIndex().time(System.currentTimeMillis() - recoveryState.getIndex().startTime());
-            return;
-        }
-
-        for (CommitPoint commitPoint : commitPoints) {
-            if (!commitPointExistsInBlobs(commitPoint, blobs)) {
-                logger.warn("listed commit_point [{}]/[{}], but not all files exists, ignoring", commitPoint.name(), commitPoint.version());
-                continue;
-            }
-            try {
-                recoveryState.getIndex().startTime(System.currentTimeMillis());
-                recoverIndex(commitPoint, blobs);
-                recoveryState.getIndex().time(System.currentTimeMillis() - recoveryState.getIndex().startTime());
-
-                recoverTranslog(commitPoint, blobs);
-                return;
-            } catch (Exception e) {
-                throw new IndexShardGatewayRecoveryException(shardId, "failed to recover commit_point [" + commitPoint.name() + "]/[" + commitPoint.version() + "]", e);
-            }
-        }
-        throw new IndexShardGatewayRecoveryException(shardId, "No commit point data is available in gateway", null);
-    }
-
-    private void recoverTranslog(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) throws IndexShardGatewayRecoveryException {
-        if (commitPoint.translogFiles().isEmpty()) {
-            // no translog files, bail
-            recoveryState.getStart().startTime(System.currentTimeMillis());
-            recoveryState.setStage(RecoveryState.Stage.START);
-            indexShard.postRecovery("post recovery from gateway, no translog");
-            recoveryState.getStart().time(System.currentTimeMillis() - recoveryState.getStart().startTime());
-            recoveryState.getStart().checkIndexTime(indexShard.checkIndexTook());
-            return;
-        }
-
-        try {
-            recoveryState.getStart().startTime(System.currentTimeMillis());
-            recoveryState.setStage(RecoveryState.Stage.START);
-            indexShard.performRecoveryPrepareForTranslog();
-            recoveryState.getStart().time(System.currentTimeMillis() - recoveryState.getStart().startTime());
-            recoveryState.getStart().checkIndexTime(indexShard.checkIndexTook());
-
-            recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
-            recoveryState.getTranslog().startTime(System.currentTimeMillis());
-
-            final AtomicReference<Throwable> failure = new AtomicReference<Throwable>();
-            final CountDownLatch latch = new CountDownLatch(1);
-
-            final Iterator<CommitPoint.FileInfo> transIt = commitPoint.translogFiles().iterator();
-
-            blobContainer.readBlob(transIt.next().name(), new BlobContainer.ReadBlobListener() {
-                BytesStreamOutput bos = new BytesStreamOutput();
-                boolean ignore = false;
-
-                @Override
-                public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
-                    if (ignore) {
-                        return;
-                    }
-                    bos.write(data, offset, size);
-                    // if we don't have enough to read the header size of the first translog, bail and wait for the next one
-                    if (bos.size() < 4) {
-                        return;
-                    }
-                    BytesStreamInput si = new BytesStreamInput(bos.bytes());
-                    int position;
-                    while (true) {
-                        try {
-                            position = si.position();
-                            if (position + 4 > bos.size()) {
-                                break;
-                            }
-                            int opSize = si.readInt();
-                            int curPos = si.position();
-                            if ((si.position() + opSize) > bos.size()) {
-                                break;
-                            }
-                            Translog.Operation operation = TranslogStreams.readTranslogOperation(si);
-                            if ((si.position() - curPos) != opSize) {
-                                logger.warn("mismatch in size, expected [{}], got [{}]", opSize, si.position() - curPos);
-                            }
-                            recoveryState.getTranslog().addTranslogOperations(1);
-                            indexShard.performRecoveryOperation(operation);
-                            if (si.position() >= bos.size()) {
-                                position = si.position();
-                                break;
-                            }
-                        } catch (Throwable e) {
-                            logger.warn("failed to retrieve translog after [{}] operations, ignoring the rest, considered corrupted", e, recoveryState.getTranslog().currentTranslogOperations());
-                            ignore = true;
-                            latch.countDown();
-                            return;
-                        }
-                    }
-
-                    BytesStreamOutput newBos = new BytesStreamOutput();
-
-                    int leftOver = bos.size() - position;
-                    if (leftOver > 0) {
-                        newBos.write(bos.bytes().array(), position, leftOver);
-                    }
-
-                    bos = newBos;
-                }
-
-                @Override
-                public synchronized void onCompleted() {
-                    if (ignore) {
-                        return;
-                    }
-                    if (!transIt.hasNext()) {
-                        latch.countDown();
-                        return;
-                    }
-                    blobContainer.readBlob(transIt.next().name(), this);
-                }
-
-                @Override
-                public void onFailure(Throwable t) {
-                    failure.set(t);
-                    latch.countDown();
-                }
-            });
-
-
-            latch.await();
-            if (failure.get() != null) {
-                throw failure.get();
-            }
-
-            indexShard.performRecoveryFinalization(true);
-            recoveryState.getTranslog().time(System.currentTimeMillis() - recoveryState.getTranslog().startTime());
-        } catch (Throwable e) {
-            throw new IndexShardGatewayRecoveryException(shardId, "Failed to recover translog", e);
-        }
-    }
-
-    private void recoverIndex(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) throws Exception {
-        recoveryState.setStage(RecoveryState.Stage.INDEX);
-        int numberOfFiles = 0;
-        long totalSize = 0;
-        int numberOfReusedFiles = 0;
-        long reusedTotalSize = 0;
-
-        List<CommitPoint.FileInfo> filesToRecover = Lists.newArrayList();
-        for (CommitPoint.FileInfo fileInfo : commitPoint.indexFiles()) {
-            String fileName = fileInfo.physicalName();
-            StoreFileMetaData md = null;
-            try {
-                md = store.metaData(fileName);
-            } catch (Exception e) {
-                // no file
-            }
-            // we don't compute checksum for segments, so always recover them
-            if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) {
-                numberOfFiles++;
-                totalSize += md.length();
-                numberOfReusedFiles++;
-                reusedTotalSize += md.length();
-                if (logger.isTraceEnabled()) {
-                    logger.trace("not_recovering [{}], exists in local store and is same", fileInfo.physicalName());
-                }
-            } else {
-                if (logger.isTraceEnabled()) {
-                    if (md == null) {
-                        logger.trace("recovering [{}], does not exists in local store", fileInfo.physicalName());
-                    } else {
-                        logger.trace("recovering [{}], exists in local store but is different", fileInfo.physicalName());
-                    }
-                }
-                numberOfFiles++;
-                totalSize += fileInfo.length();
-                filesToRecover.add(fileInfo);
-            }
-        }
-
-        recoveryState.getIndex().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize);
-        if (filesToRecover.isEmpty()) {
-            logger.trace("no files to recover, all exists within the local store");
-        }
-
-        if (logger.isTraceEnabled()) {
-            logger.trace("recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
-        }
-
-        final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
-        final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
-
-        for (final CommitPoint.FileInfo fileToRecover : filesToRecover) {
-            recoverFile(fileToRecover, blobs, latch, failures);
-        }
-
-        try {
-            latch.await();
-        } catch (InterruptedException e) {
-            throw new IndexShardGatewayRecoveryException(shardId, "Interrupted while recovering index", e);
-        }
-
-        if (!failures.isEmpty()) {
-            throw new IndexShardGatewayRecoveryException(shardId, "Failed to recover index", failures.get(0));
-        }
-
-        // read the gateway data persisted
-        long version = -1;
-        try {
-            if (Lucene.indexExists(store.directory())) {
-                version = Lucene.readSegmentInfos(store.directory()).getVersion();
-            }
-        } catch (IOException e) {
-            throw new IndexShardGatewayRecoveryException(shardId(), "Failed to fetch index version after copying it over", e);
-        }
-        recoveryState.getIndex().updateVersion(version);
-
-        /// now, go over and clean files that are in the store, but were not in the gateway
-        try {
-            for (String storeFile : store.directory().listAll()) {
-                if (!commitPoint.containPhysicalIndexFile(storeFile)) {
-                    try {
-                        store.directory().deleteFile(storeFile);
-                    } catch (Exception e) {
-                        // ignore
-                    }
-                }
-            }
-        } catch (Exception e) {
-            // ignore
-        }
-    }
-
-    private void recoverFile(final CommitPoint.FileInfo fileInfo, final ImmutableMap<String, BlobMetaData> blobs, final CountDownLatch latch, final List<Throwable> failures) {
-        final IndexOutput indexOutput;
-        try {
-            // we create an output with no checksum, this is because the pure binary data of the file is not
-            // the checksum (because of seek). We will create the checksum file once copying is done
-            indexOutput = store.createOutputRaw(fileInfo.physicalName());
-        } catch (IOException e) {
-            failures.add(e);
-            latch.countDown();
-            return;
-        }
-
-        String firstFileToRecover = fileInfo.name();
-        if (!blobs.containsKey(fileInfo.name())) {
-            // chunking, append part0 to it
-            firstFileToRecover = fileInfo.name() + ".part0";
-        }
-        if (!blobs.containsKey(firstFileToRecover)) {
-            // no file, what to do, what to do?
-            logger.warn("no file [{}]/[{}] to recover, ignoring it", fileInfo.name(), fileInfo.physicalName());
-            latch.countDown();
-            return;
-        }
-        final AtomicInteger partIndex = new AtomicInteger();
-
-        blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
-            @Override
-            public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
-                recoveryState.getIndex().addRecoveredByteCount(size);
-                indexOutput.writeBytes(data, offset, size);
-            }
-
-            @Override
-            public synchronized void onCompleted() {
-                int part = partIndex.incrementAndGet();
-                String partName = fileInfo.name() + ".part" + part;
-                if (blobs.containsKey(partName)) {
-                    // continue with the new part
-                    blobContainer.readBlob(partName, this);
-                    return;
-                } else {
-                    // we are done...
-                    try {
-                        indexOutput.close();
-                        // write the checksum
-                        if (fileInfo.checksum() != null) {
-                            store.writeChecksum(fileInfo.physicalName(), fileInfo.checksum());
-                        }
-                        store.directory().sync(Collections.singleton(fileInfo.physicalName()));
-                        recoveryState.getIndex().addRecoveredFileCount(1);
-                    } catch (IOException e) {
-                        onFailure(e);
-                        return;
-                    }
-                }
-                latch.countDown();
-            }
-
-            @Override
-            public void onFailure(Throwable t) {
-                failures.add(t);
-                latch.countDown();
-            }
-        });
-    }
-
-    private void snapshotTranslog(Translog.Snapshot snapshot, CommitPoint.FileInfo fileInfo) throws IOException {
-        blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), snapshot.lengthInBytes());
-//
-//        long chunkBytes = Long.MAX_VALUE;
-//        if (chunkSize != null) {
-//            chunkBytes = chunkSize.bytes();
-//        }
-//
-//        long totalLength = fileInfo.length();
-//        long numberOfChunks = totalLength / chunkBytes;
-//        if (totalLength % chunkBytes > 0) {
-//            numberOfChunks++;
-//        }
-//        if (numberOfChunks == 0) {
-//            numberOfChunks++;
-//        }
-//
-//        if (numberOfChunks == 1) {
-//            blobContainer.writeBlob(fileInfo.name(), snapshot.stream(), snapshot.lengthInBytes());
-//        } else {
-//            InputStream translogStream = snapshot.stream();
-//            long totalLengthLeftToWrite = totalLength;
-//            for (int i = 0; i < numberOfChunks; i++) {
-//                long lengthToWrite = chunkBytes;
-//                if (totalLengthLeftToWrite < chunkBytes) {
-//                    lengthToWrite = totalLengthLeftToWrite;
-//                }
-//                blobContainer.writeBlob(fileInfo.name() + ".part" + i, new LimitInputStream(translogStream, lengthToWrite), lengthToWrite);
-//                totalLengthLeftToWrite -= lengthToWrite;
-//            }
-//        }
-    }
-
-    private void snapshotFile(Directory dir, final CommitPoint.FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) throws IOException {
-        long chunkBytes = Long.MAX_VALUE;
-        if (chunkSize != null) {
-            chunkBytes = chunkSize.bytes();
-        }
-
-        long totalLength = fileInfo.length();
-        long numberOfChunks = totalLength / chunkBytes;
-        if (totalLength % chunkBytes > 0) {
-            numberOfChunks++;
-        }
-        if (numberOfChunks == 0) {
-            numberOfChunks++;
-        }
-
-        final long fNumberOfChunks = numberOfChunks;
-        final AtomicLong counter = new AtomicLong(numberOfChunks);
-        for (long i = 0; i < fNumberOfChunks; i++) {
-            final long partNumber = i;
-
-            IndexInput indexInput = null;
-            try {
-                // TODO: maybe use IOContext.READONCE?
-                indexInput = indexShard.store().openInputRaw(fileInfo.physicalName(), IOContext.READ);
-                indexInput.seek(partNumber * chunkBytes);
-                InputStreamIndexInput is = new ThreadSafeInputStreamIndexInput(indexInput, chunkBytes);
-
-                String blobName = fileInfo.name();
-                if (fNumberOfChunks > 1) {
-                    // if we do chunks, then all of them are in the form of "[xxx].part[N]".
-                    blobName += ".part" + partNumber;
-                }
-
-                final IndexInput fIndexInput = indexInput;
-                blobContainer.writeBlob(blobName, is, is.actualSizeToRead(), new ImmutableBlobContainer.WriterListener() {
-                    @Override
-                    public void onCompleted() {
-                        try {
-                            fIndexInput.close();
-                        } catch (IOException e) {
-                            // ignore
-                        }
-                        if (counter.decrementAndGet() == 0) {
-                            latch.countDown();
-                        }
-                    }
-
-                    @Override
-                    public void onFailure(Throwable t) {
-                        try {
-                            fIndexInput.close();
-                        } catch (IOException e) {
-                            // ignore
-                        }
-                        failures.add(t);
-                        if (counter.decrementAndGet() == 0) {
-                            latch.countDown();
-                        }
-                    }
-                });
-            } catch (Exception e) {
-                if (indexInput != null) {
-                    try {
-                        indexInput.close();
-                    } catch (IOException e1) {
-                        // ignore
-                    }
-                }
-                failures.add(e);
-                latch.countDown();
-            }
-        }
-    }
-
-    private boolean commitPointExistsInBlobs(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs) {
-        for (CommitPoint.FileInfo fileInfo : Iterables.concat(commitPoint.indexFiles(), commitPoint.translogFiles())) {
-            if (!commitPointFileExistsInBlobs(fileInfo, blobs)) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    private boolean commitPointFileExistsInBlobs(CommitPoint.FileInfo fileInfo, ImmutableMap<String, BlobMetaData> blobs) {
-        BlobMetaData blobMetaData = blobs.get(fileInfo.name());
-        if (blobMetaData != null) {
-            if (blobMetaData.length() != fileInfo.length()) {
-                return false;
-            }
-        } else if (blobs.containsKey(fileInfo.name() + ".part0")) {
-            // multi part file sum up the size and check
-            int part = 0;
-            long totalSize = 0;
-            while (true) {
-                blobMetaData = blobs.get(fileInfo.name() + ".part" + part++);
-                if (blobMetaData == null) {
-                    break;
-                }
-                totalSize += blobMetaData.length();
-            }
-            if (totalSize != fileInfo.length()) {
-                return false;
-            }
-        } else {
-            // no file, not exact and not multipart
-            return false;
-        }
-        return true;
-    }
-
-    private CommitPoints buildCommitPoints(ImmutableMap<String, BlobMetaData> blobs) {
-        List<CommitPoint> commitPoints = Lists.newArrayList();
-        for (String name : blobs.keySet()) {
-            if (name.startsWith("commit-")) {
-                try {
-                    commitPoints.add(CommitPoints.fromXContent(blobContainer.readBlobFully(name)));
-                } catch (Exception e) {
-                    logger.warn("failed to read commit point [{}]", e, name);
-                }
-            }
-        }
-        return new CommitPoints(commitPoints);
-    }
-
-    private String fileNameFromGeneration(long generation) {
-        return "__" + Long.toString(generation, Character.MAX_RADIX);
-    }
-
-    private long findLatestFileNameGeneration(ImmutableMap<String, BlobMetaData> blobs) {
-        long generation = -1;
-        for (String name : blobs.keySet()) {
-            if (!name.startsWith("__")) {
-                continue;
-            }
-            if (name.contains(".part")) {
-                name = name.substring(0, name.indexOf(".part"));
-            }
-
-            try {
-                long currentGen = Long.parseLong(name.substring(2) /*__*/, Character.MAX_RADIX);
-                if (currentGen > generation) {
-                    generation = currentGen;
-                }
-            } catch (NumberFormatException e) {
-                logger.warn("file [{}] does not conform to the '__' schema");
-            }
-        }
-        return generation;
-    }
-}

+ 0 - 49
src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGateway.java

@@ -1,49 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.gateway.fs;
-
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.gateway.Gateway;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.index.gateway.IndexShardGateway;
-import org.elasticsearch.index.gateway.blobstore.BlobStoreIndexGateway;
-import org.elasticsearch.index.settings.IndexSettings;
-
-/**
- *
- */
-public class FsIndexGateway extends BlobStoreIndexGateway {
-
-    @Inject
-    public FsIndexGateway(Index index, @IndexSettings Settings indexSettings, Gateway gateway) {
-        super(index, indexSettings, gateway);
-    }
-
-    @Override
-    public String type() {
-        return "fs";
-    }
-
-    @Override
-    public Class<? extends IndexShardGateway> shardGatewayClass() {
-        return FsIndexShardGateway.class;
-    }
-}

+ 0 - 34
src/main/java/org/elasticsearch/index/gateway/fs/FsIndexGatewayModule.java

@@ -1,34 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.gateway.fs;
-
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.index.gateway.IndexGateway;
-
-/**
- *
- */
-public class FsIndexGatewayModule extends AbstractModule {
-
-    @Override
-    protected void configure() {
-        bind(IndexGateway.class).to(FsIndexGateway.class).asEagerSingleton();
-    }
-}

+ 0 - 89
src/main/java/org/elasticsearch/index/gateway/fs/FsIndexShardGateway.java

@@ -1,89 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.gateway.fs;
-
-import org.apache.lucene.store.Lock;
-import org.apache.lucene.store.NativeFSLockFactory;
-import org.elasticsearch.ElasticsearchIllegalStateException;
-import org.elasticsearch.common.blobstore.fs.AbstractFsBlobContainer;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.gateway.IndexGateway;
-import org.elasticsearch.index.gateway.blobstore.BlobStoreIndexShardGateway;
-import org.elasticsearch.index.settings.IndexSettings;
-import org.elasticsearch.index.shard.ShardId;
-import org.elasticsearch.index.shard.service.IndexShard;
-import org.elasticsearch.index.store.Store;
-import org.elasticsearch.threadpool.ThreadPool;
-
-import java.io.IOException;
-
-/**
- *
- */
-public class FsIndexShardGateway extends BlobStoreIndexShardGateway {
-
-    private final boolean snapshotLock;
-
-    @Inject
-    public FsIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, IndexGateway fsIndexGateway,
-                               IndexShard indexShard, Store store) {
-        super(shardId, indexSettings, threadPool, fsIndexGateway, indexShard, store);
-        this.snapshotLock = indexSettings.getAsBoolean("gateway.fs.snapshot_lock", true);
-    }
-
-    @Override
-    public String type() {
-        return "fs";
-    }
-
-    @Override
-    public SnapshotLock obtainSnapshotLock() throws Exception {
-        if (!snapshotLock) {
-            return NO_SNAPSHOT_LOCK;
-        }
-        AbstractFsBlobContainer fsBlobContainer = (AbstractFsBlobContainer) blobContainer;
-        NativeFSLockFactory lockFactory = new NativeFSLockFactory(fsBlobContainer.filePath());
-
-        Lock lock = lockFactory.makeLock("snapshot.lock");
-        boolean obtained = lock.obtain();
-        if (!obtained) {
-            throw new ElasticsearchIllegalStateException("failed to obtain snapshot lock [" + lock + "]");
-        }
-        return new FsSnapshotLock(lock);
-    }
-
-    public class FsSnapshotLock implements SnapshotLock {
-        private final Lock lock;
-
-        public FsSnapshotLock(Lock lock) {
-            this.lock = lock;
-        }
-
-        @Override
-        public void release() {
-            try {
-                lock.close();
-            } catch (IOException e) {
-                logger.warn("failed to release snapshot lock [{}]", e, lock);
-            }
-        }
-    }
-}

+ 0 - 30
src/main/java/org/elasticsearch/index/gateway/local/LocalIndexShardGateway.java

@@ -32,7 +32,6 @@ import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.index.gateway.IndexShardGateway;
 import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
 import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.index.gateway.SnapshotStatus;
 import org.elasticsearch.index.settings.IndexSettings;
 import org.elasticsearch.index.shard.AbstractIndexShardComponent;
 import org.elasticsearch.index.shard.IndexShardState;
@@ -259,30 +258,6 @@ public class LocalIndexShardGateway extends AbstractIndexShardComponent implemen
         return "local";
     }
 
-    @Override
-    public SnapshotStatus snapshot(Snapshot snapshot) {
-        return null;
-    }
-
-    @Override
-    public SnapshotStatus lastSnapshotStatus() {
-        return null;
-    }
-
-    @Override
-    public SnapshotStatus currentSnapshotStatus() {
-        return null;
-    }
-
-    @Override
-    public boolean requiresSnapshot() {
-        return false;
-    }
-
-    @Override
-    public boolean requiresSnapshotScheduling() {
-        return false;
-    }
 
     @Override
     public void close() {
@@ -291,11 +266,6 @@ public class LocalIndexShardGateway extends AbstractIndexShardComponent implemen
         }
     }
 
-    @Override
-    public SnapshotLock obtainSnapshotLock() throws Exception {
-        return NO_SNAPSHOT_LOCK;
-    }
-
     class Sync implements Runnable {
         @Override
         public void run() {

+ 0 - 31
src/main/java/org/elasticsearch/index/gateway/none/NoneIndexShardGateway.java

@@ -25,7 +25,6 @@ import org.elasticsearch.gateway.none.NoneGateway;
 import org.elasticsearch.index.gateway.IndexShardGateway;
 import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
 import org.elasticsearch.indices.recovery.RecoveryState;
-import org.elasticsearch.index.gateway.SnapshotStatus;
 import org.elasticsearch.index.settings.IndexSettings;
 import org.elasticsearch.index.shard.AbstractIndexShardComponent;
 import org.elasticsearch.index.shard.ShardId;
@@ -81,37 +80,7 @@ public class NoneIndexShardGateway extends AbstractIndexShardComponent implement
         return NoneGateway.TYPE;
     }
 
-    @Override
-    public SnapshotStatus snapshot(Snapshot snapshot) {
-        return null;
-    }
-
-    @Override
-    public SnapshotStatus lastSnapshotStatus() {
-        return null;
-    }
-
-    @Override
-    public SnapshotStatus currentSnapshotStatus() {
-        return null;
-    }
-
-    @Override
-    public boolean requiresSnapshot() {
-        return false;
-    }
-
-    @Override
-    public boolean requiresSnapshotScheduling() {
-        return false;
-    }
-
     @Override
     public void close() {
     }
-
-    @Override
-    public SnapshotLock obtainSnapshotLock() throws Exception {
-        return NO_SNAPSHOT_LOCK;
-    }
 }

+ 0 - 6
src/main/java/org/elasticsearch/index/service/InternalIndexService.java

@@ -407,12 +407,6 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
             logger.debug("failed to close merge policy provider", e);
             // ignore
         }
-        try {
-            shardInjector.getInstance(IndexShardGatewayService.class).snapshotOnClose();
-        } catch (Throwable e) {
-            logger.debug("failed to snapshot index shard gateway on close", e);
-            // ignore
-        }
         try {
             shardInjector.getInstance(IndexShardGatewayService.class).close();
         } catch (Throwable e) {

+ 0 - 2
src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java

@@ -30,7 +30,6 @@ import org.elasticsearch.common.inject.AbstractModule;
 import org.elasticsearch.gateway.local.LocalGatewayAllocator;
 import org.elasticsearch.index.codec.CodecService;
 import org.elasticsearch.index.engine.internal.InternalEngine;
-import org.elasticsearch.index.gateway.IndexShardGatewayService;
 import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
 import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider;
 import org.elasticsearch.index.merge.policy.LogDocMergePolicyProvider;
@@ -67,7 +66,6 @@ public class IndexDynamicSettingsModule extends AbstractModule {
         indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ);
         indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE);
         indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA);
-        indexDynamicSettings.addDynamicSetting(IndexShardGatewayService.INDEX_GATEWAY_SNAPSHOT_INTERVAL, Validator.TIME);
         indexDynamicSettings.addDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE);
         indexDynamicSettings.addDynamicSetting(InternalIndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME);
         indexDynamicSettings.addDynamicSetting(LocalGatewayAllocator.INDEX_RECOVERY_INITIAL_SHARDS);

+ 0 - 2
src/main/java/org/elasticsearch/index/shard/service/IndexShard.java

@@ -146,8 +146,6 @@ public interface IndexShard extends IndexShardComponent {
 
     void optimize(Engine.Optimize optimize) throws ElasticsearchException;
 
-    <T> T snapshot(Engine.SnapshotHandler<T> snapshotHandler) throws EngineException;
-
     SnapshotIndexCommit snapshotIndex() throws EngineException;
 
     void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException;

+ 0 - 11
src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java

@@ -599,17 +599,6 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
         engine.optimize(optimize);
     }
 
-    @Override
-    public <T> T snapshot(Engine.SnapshotHandler<T> snapshotHandler) throws EngineException {
-        IndexShardState state = this.state; // one time volatile read
-        // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
-        if (state == IndexShardState.POST_RECOVERY || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
-            return engine.snapshot(snapshotHandler);
-        } else {
-            throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
-        }
-    }
-
     @Override
     public SnapshotIndexCommit snapshotIndex() throws EngineException {
         IndexShardState state = this.state; // one time volatile read

+ 0 - 3
src/main/java/org/elasticsearch/rest/action/RestActionModule.java

@@ -58,7 +58,6 @@ import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
 import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
 import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
 import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
-import org.elasticsearch.rest.action.admin.indices.gateway.snapshot.RestGatewaySnapshotAction;
 import org.elasticsearch.rest.action.admin.indices.mapping.delete.RestDeleteMappingAction;
 import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
 import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
@@ -179,8 +178,6 @@ public class RestActionModule extends AbstractModule {
         bind(RestGetMappingAction.class).asEagerSingleton();
         bind(RestGetFieldMappingAction.class).asEagerSingleton();
 
-        bind(RestGatewaySnapshotAction.class).asEagerSingleton();
-
         bind(RestRefreshAction.class).asEagerSingleton();
         bind(RestFlushAction.class).asEagerSingleton();
         bind(RestOptimizeAction.class).asEagerSingleton();

+ 0 - 84
src/main/java/org/elasticsearch/rest/action/admin/indices/gateway/snapshot/RestGatewaySnapshotAction.java

@@ -1,84 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.rest.action.admin.indices.gateway.snapshot;
-
-import org.elasticsearch.action.ActionListener;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequest;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotResponse;
-import org.elasticsearch.action.support.IndicesOptions;
-import org.elasticsearch.client.Client;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.rest.*;
-import org.elasticsearch.rest.action.support.RestXContentBuilder;
-
-import java.io.IOException;
-
-import static org.elasticsearch.rest.RestRequest.Method.POST;
-import static org.elasticsearch.rest.RestStatus.OK;
-import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader;
-
-/**
- *
- */
-@Deprecated
-public class RestGatewaySnapshotAction extends BaseRestHandler {
-
-    @Inject
-    public RestGatewaySnapshotAction(Settings settings, Client client, RestController controller) {
-        super(settings, client);
-        controller.registerHandler(POST, "/_gateway/snapshot", this);
-        controller.registerHandler(POST, "/{index}/_gateway/snapshot", this);
-    }
-
-    @Override
-    public void handleRequest(final RestRequest request, final RestChannel channel) {
-        GatewaySnapshotRequest gatewaySnapshotRequest = new GatewaySnapshotRequest(Strings.splitStringByCommaToArray(request.param("index")));
-        gatewaySnapshotRequest.listenerThreaded(false);
-        gatewaySnapshotRequest.indicesOptions(IndicesOptions.fromRequest(request, gatewaySnapshotRequest.indicesOptions()));
-        client.admin().indices().gatewaySnapshot(gatewaySnapshotRequest, new ActionListener<GatewaySnapshotResponse>() {
-            @Override
-            public void onResponse(GatewaySnapshotResponse response) {
-                try {
-                    XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
-                    builder.startObject();
-
-                    buildBroadcastShardsHeader(builder, response);
-
-                    builder.endObject();
-                    channel.sendResponse(new XContentRestResponse(request, OK, builder));
-                } catch (Throwable e) {
-                    onFailure(e);
-                }
-            }
-
-            @Override
-            public void onFailure(Throwable e) {
-                try {
-                    channel.sendResponse(new XContentThrowableRestResponse(request, e));
-                } catch (IOException e1) {
-                    logger.error("Failed to send failure response", e1);
-                }
-            }
-        });
-    }
-}

+ 0 - 390
src/test/java/org/elasticsearch/gateway/fs/IndexGatewayTests.java

@@ -1,390 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.gateway.fs;
-
-import com.carrotsearch.randomizedtesting.annotations.Nightly;
-import com.google.common.base.Predicate;
-import org.apache.lucene.util.LuceneTestCase.Slow;
-import org.apache.lucene.util.SetOnce;
-import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
-import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
-import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
-import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
-import org.elasticsearch.action.admin.indices.status.IndexShardStatus;
-import org.elasticsearch.action.admin.indices.status.IndicesStatusResponse;
-import org.elasticsearch.action.admin.indices.status.ShardStatus;
-import org.elasticsearch.action.get.GetResponse;
-import org.elasticsearch.client.Requests;
-import org.elasticsearch.common.collect.MapBuilder;
-import org.elasticsearch.common.io.FileSystemUtils;
-import org.elasticsearch.common.settings.ImmutableSettings;
-import org.elasticsearch.common.settings.ImmutableSettings.Builder;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.indices.IndexAlreadyExistsException;
-import org.elasticsearch.test.ElasticsearchIntegrationTest;
-import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
-import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
-import org.junit.Test;
-
-import static org.elasticsearch.client.Requests.*;
-import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.hamcrest.Matchers.*;
-
-/**
- *
- */
-@ClusterScope(scope=Scope.TEST, numNodes=0)
-@Slow
-public class IndexGatewayTests extends ElasticsearchIntegrationTest {
-
-    private String storeType;
-    private final SetOnce<Settings> settings = new SetOnce<Settings>();
-
-    @Override
-    protected Settings nodeSettings(int nodeOrdinal) {
-        if (settings.get() == null) {
-            Builder builder = ImmutableSettings.builder();
-            builder.put("cluster.routing.schedule", "100ms");
-            builder.put("gateway.type", "fs");
-            if (between(0, 5) == 0) {
-                builder.put("gateway.fs.buffer_size", between(1, 100) + "kb");
-            }
-            if (between(0, 5) == 0) {
-                builder.put("gateway.fs.chunk_size", between(1, 100) + "kb");
-            }
-            storeType = rarely() ? "ram" : "fs";
-            builder.put("index.store.type", storeType);
-            settings.set(builder.build());
-        }
-        return settings.get();
-    }
-
-    protected boolean isPersistentStorage() {
-        assertNotNull(storeType);
-        return "fs".equals(settings.get().get("index.store.type"));
-    }
-
-    @Test
-    public void testSnapshotOperations() throws Exception {
-        cluster().startNode(nodeSettings(0));
-
-        // get the environment, so we can clear the work dir when needed
-        Environment environment = cluster().getInstance(Environment.class);
-
-
-        logger.info("Running Cluster Health (waiting for node to startup properly)");
-        ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
-
-        // Translog tests
-
-        logger.info("Creating index [{}]", "test");
-        client().admin().indices().prepareCreate("test").execute().actionGet();
-
-        // create a mapping
-        PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type1").setSource(mappingSource()).execute().actionGet();
-        assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
-
-        // verify that mapping is there
-        ClusterStateResponse clusterState = client().admin().cluster().state(clusterStateRequest()).actionGet();
-        assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());
-
-        // create two and delete the first
-        logger.info("Indexing #1");
-        client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
-        logger.info("Indexing #2");
-        client().index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
-
-        // perform snapshot to the index
-        logger.info("Gateway Snapshot");
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-
-        logger.info("Deleting #1");
-        client().delete(deleteRequest("test").type("type1").id("1")).actionGet();
-
-        // perform snapshot to the index
-        logger.info("Gateway Snapshot");
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-        logger.info("Gateway Snapshot (should be a no op)");
-        // do it again, it should be a no op
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-
-        logger.info("Closing the server");
-        cluster().stopRandomNode();
-        logger.info("Starting the server, should recover from the gateway (only translog should be populated)");
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("Running Cluster Health (wait for the shards to startup)");
-        clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
-
-        // verify that mapping is there
-        clusterState = client().admin().cluster().state(clusterStateRequest()).actionGet();
-        assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());
-
-        logger.info("Getting #1, should not exists");
-        GetResponse getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
-        assertThat(getResponse.isExists(), equalTo(false));
-        logger.info("Getting #2");
-        getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
-
-        // Now flush and add some data (so we have index recovery as well)
-        logger.info("Flushing, so we have actual content in the index files (#2 should be in the index)");
-        client().admin().indices().flush(flushRequest("test")).actionGet();
-        logger.info("Indexing #3, so we have something in the translog as well");
-        client().index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test"))).actionGet();
-
-        logger.info("Gateway Snapshot");
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-        logger.info("Gateway Snapshot (should be a no op)");
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-
-        logger.info("Closing the server");
-        cluster().stopRandomNode();
-        logger.info("Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("Running Cluster Health (wait for the shards to startup)");
-        clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
-
-        logger.info("Getting #1, should not exists");
-        getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
-        assertThat(getResponse.isExists(), equalTo(false));
-        logger.info("Getting #2 (not from the translog, but from the index)");
-        getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
-        logger.info("Getting #3 (from the translog)");
-        getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
-
-        logger.info("Closing the server");
-        cluster().stopRandomNode();
-        logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
-        FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
-        logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("Running Cluster Health (wait for the shards to startup)");
-        clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
-
-        logger.info("Getting #1, should not exists");
-        getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
-        assertThat(getResponse.isExists(), equalTo(false));
-        logger.info("Getting #2 (not from the translog, but from the index)");
-        getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
-        logger.info("Getting #3 (from the translog)");
-        getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
-
-
-        logger.info("Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
-        client().admin().indices().flush(flushRequest("test")).actionGet();
-
-        logger.info("Gateway Snapshot");
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-        logger.info("Gateway Snapshot (should be a no op)");
-        client().admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
-
-        logger.info("Closing the server");
-        cluster().stopRandomNode();
-        logger.info("Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("Running Cluster Health (wait for the shards to startup)");
-        clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
-
-        logger.info("Getting #1, should not exists");
-        getResponse = client().get(getRequest("test").type("type1").id("1")).actionGet();
-        assertThat(getResponse.isExists(), equalTo(false));
-        logger.info("Getting #2 (not from the translog, but from the index)");
-        getResponse = client().get(getRequest("test").type("type1").id("2")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
-        logger.info("Getting #3 (not from the translog, but from the index)");
-        getResponse = client().get(getRequest("test").type("type1").id("3")).actionGet();
-        assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));
-
-        logger.info("Deleting the index");
-        client().admin().indices().delete(deleteIndexRequest("test")).actionGet();
-    }
-
-    @Test
-    @Nightly
-    public void testLoadWithFullRecovery() {
-        testLoad(true);
-    }
-
-    @Test
-    @Nightly
-    public void testLoadWithReuseRecovery() {
-        testLoad(false);
-    }
-
-    private void testLoad(boolean fullRecovery) {
-        logger.info("Running with fullRecover [{}]", fullRecovery);
-
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("Running Cluster Health (waiting for node to startup properly)");
-        ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
-
-        // get the environment, so we can clear the work dir when needed
-        Environment environment = cluster().getInstance(Environment.class);
-
-        logger.info("--> creating test index ...");
-        client().admin().indices().prepareCreate("test").execute().actionGet();
-
-        logger.info("Running Cluster Health (wait for the shards to startup)");
-        clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
-
-
-        logger.info("--> refreshing and checking count");
-        client().admin().indices().prepareRefresh().execute().actionGet();
-        assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
-        long numDocs = between(100, rarely() ? 2000 : 1000);
-        logger.info("--> indexing " + numDocs + "  docs");
-        boolean hasSnapshoted = false;
-        boolean hasFlushed = false;
-        for (long i = 0; i < numDocs; i++) {
-            client().prepareIndex("test", "type1", Long.toString(i))
-                    .setCreate(true) // make sure we use create, so if we recover wrongly, we will get increments...
-                    .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
-
-             // snapshot every 100 so we get some actions going on in the gateway
-            if (rarely()) {
-                hasSnapshoted = true;
-                client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
-            }
-            // flush every once is a while, so we get different data
-            if (rarely()) {
-                hasFlushed = true;
-                client().admin().indices().prepareFlush().execute().actionGet();
-            }
-        }
-        if (!hasSnapshoted) {
-            client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
-        }
-
-        if (!hasFlushed)  {
-            client().admin().indices().prepareFlush().execute().actionGet();
-        }
-
-        logger.info("--> refreshing and checking count");
-        client().admin().indices().prepareRefresh().execute().actionGet();
-        assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(numDocs));
-
-
-        logger.info("--> closing the server");
-        cluster().stopRandomNode();
-        if (fullRecovery) {
-            logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
-            FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
-            logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
-        }
-
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("--> running Cluster Health (wait for the shards to startup)");
-        clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
-        logger.info("--> done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
-
-        logger.info("--> checking count");
-        assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(numDocs));
-
-        logger.info("--> checking reuse / recovery status");
-        IndicesStatusResponse statusResponse = client().admin().indices().prepareStatus().setRecovery(true).execute().actionGet();
-        for (IndexShardStatus indexShardStatus : statusResponse.getIndex("test")) {
-            for (ShardStatus shardStatus : indexShardStatus) {
-                if (shardStatus.getShardRouting().primary()) {
-                    if (fullRecovery || !isPersistentStorage()) {
-                        assertThat(shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), equalTo(0l));
-                    } else {
-                        assertThat(shardStatus.getGatewayRecoveryStatus().getReusedIndexSize().bytes(), greaterThan(shardStatus.getGatewayRecoveryStatus().getIndexSize().bytes() - 8196 /* segments file and others */));
-                    }
-                }
-            }
-        }
-    }
-
-    private String mappingSource() {
-        return "{ type1 : { properties : { name : { type : \"string\" } } } }";
-    }
-
-    private String source(String id, String nameValue) {
-        return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
-    }
-
-    @Test
-    public void testRandom() {
-        testLoad(randomBoolean());
-    }
-
-    @Test
-    public void testIndexActions() throws Exception {
-        cluster().startNode(nodeSettings(0));
-
-        logger.info("Running Cluster Health (waiting for node to startup properly)");
-        ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
-        logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
-        assertThat(clusterHealth.isTimedOut(), equalTo(false));
-        assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
-
-        assertAcked(client().admin().indices().create(createIndexRequest("test")).actionGet());
-
-        cluster().stopRandomNode();
-        cluster().startNode(nodeSettings(0));
-        assertTrue("index should exists", awaitBusy(new Predicate<Object>() {
-            @Override
-            public boolean apply(Object input) {
-                try {
-                    client().admin().indices().create(createIndexRequest("test")).actionGet();
-                    return false;
-                } catch (IndexAlreadyExistsException e) {
-                    // all is well
-                    return true;
-                }
-            }
-        }));
-    }
-}

+ 1 - 60
src/test/java/org/elasticsearch/index/engine/internal/InternalEngineTests.java

@@ -40,7 +40,6 @@ import org.elasticsearch.index.codec.CodecService;
 import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
 import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
 import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
-import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommitExistsMatcher;
 import org.elasticsearch.index.engine.*;
 import org.elasticsearch.index.indexing.ShardIndexingService;
 import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
@@ -75,7 +74,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
-import java.util.concurrent.*;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
 
 import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
@@ -594,64 +593,6 @@ public class InternalEngineTests extends ElasticsearchTestCase {
         searchResult.release();
     }
 
-    @Test
-    public void testSimpleSnapshot() throws Exception {
-        // create a document
-        ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_1, false);
-        engine.create(new Engine.Create(null, newUid("1"), doc1));
-
-        final ExecutorService executorService = Executors.newCachedThreadPool();
-
-        engine.snapshot(new Engine.SnapshotHandler<Void>() {
-            @Override
-            public Void snapshot(final SnapshotIndexCommit snapshotIndexCommit1, final Translog.Snapshot translogSnapshot1) {
-                MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
-                assertThat(translogSnapshot1.hasNext(), equalTo(true));
-                Translog.Create create1 = (Translog.Create) translogSnapshot1.next();
-                assertThat(create1.source().toBytesArray(), equalTo(B_1.toBytesArray()));
-                assertThat(translogSnapshot1.hasNext(), equalTo(false));
-
-                Future<Object> future = executorService.submit(new Callable<Object>() {
-                    @Override
-                    public Object call() throws Exception {
-                        engine.flush(new Engine.Flush());
-                        ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_2, false);
-                        engine.create(new Engine.Create(null, newUid("2"), doc2));
-                        engine.flush(new Engine.Flush());
-                        ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), Lucene.STANDARD_ANALYZER, B_3, false);
-                        engine.create(new Engine.Create(null, newUid("3"), doc3));
-                        return null;
-                    }
-                });
-
-                try {
-                    future.get();
-                } catch (Exception e) {
-                    e.printStackTrace();
-                    assertThat(e.getMessage(), false, equalTo(true));
-                }
-
-                MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
-
-                engine.snapshot(new Engine.SnapshotHandler<Void>() {
-                    @Override
-                    public Void snapshot(SnapshotIndexCommit snapshotIndexCommit2, Translog.Snapshot translogSnapshot2) throws EngineException {
-                        MatcherAssert.assertThat(snapshotIndexCommit1, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
-                        MatcherAssert.assertThat(snapshotIndexCommit2, SnapshotIndexCommitExistsMatcher.snapshotIndexCommitExists());
-                        assertThat(snapshotIndexCommit2.getSegmentsFileName(), not(equalTo(snapshotIndexCommit1.getSegmentsFileName())));
-                        assertThat(translogSnapshot2.hasNext(), equalTo(true));
-                        Translog.Create create3 = (Translog.Create) translogSnapshot2.next();
-                        assertThat(create3.source().toBytesArray(), equalTo(B_3.toBytesArray()));
-                        assertThat(translogSnapshot2.hasNext(), equalTo(false));
-                        return null;
-                    }
-                });
-                return null;
-            }
-        });
-
-        engine.close();
-    }
 
     @Test
     public void testSimpleRecover() throws Exception {

+ 0 - 14
src/test/java/org/elasticsearch/indices/IndicesOptionsTests.java

@@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder
 import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
 import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
 import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
-import org.elasticsearch.action.admin.indices.gateway.snapshot.GatewaySnapshotRequestBuilder;
 import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder;
 import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder;
 import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
@@ -81,7 +80,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count("test1", "test2"), true);
         verify(clearCache("test1", "test2"), true);
         verify(_flush("test1", "test2"),true);
-        verify(gatewatSnapshot("test1", "test2"), true);
         verify(segments("test1", "test2"), true);
         verify(stats("test1", "test2"), true);
         verify(status("test1", "test2"), true);
@@ -106,7 +104,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count("test1", "test2").setIndicesOptions(options), true);
         verify(clearCache("test1", "test2").setIndicesOptions(options), true);
         verify(_flush("test1", "test2").setIndicesOptions(options),true);
-        verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), true);
         verify(segments("test1", "test2").setIndicesOptions(options), true);
         verify(stats("test1", "test2").setIndicesOptions(options), true);
         verify(status("test1", "test2").setIndicesOptions(options), true);
@@ -131,7 +128,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count("test1", "test2").setIndicesOptions(options), false);
         verify(clearCache("test1", "test2").setIndicesOptions(options), false);
         verify(_flush("test1", "test2").setIndicesOptions(options), false);
-        verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), false);
         verify(segments("test1", "test2").setIndicesOptions(options), false);
         verify(stats("test1", "test2").setIndicesOptions(options), false);
         verify(status("test1", "test2").setIndicesOptions(options), false);
@@ -158,7 +154,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count("test1", "test2").setIndicesOptions(options), false);
         verify(clearCache("test1", "test2").setIndicesOptions(options), false);
         verify(_flush("test1", "test2").setIndicesOptions(options),false);
-        verify(gatewatSnapshot("test1", "test2").setIndicesOptions(options), false);
         verify(segments("test1", "test2").setIndicesOptions(options), false);
         verify(stats("test1", "test2").setIndicesOptions(options), false);
         verify(status("test1", "test2").setIndicesOptions(options), false);
@@ -217,7 +212,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count(indices), false);
         verify(clearCache(indices), false);
         verify(_flush(indices),false);
-        verify(gatewatSnapshot(indices), false);
         verify(segments(indices), true);
         verify(stats(indices), false);
         verify(status(indices), false);
@@ -243,7 +237,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count(indices).setIndicesOptions(options), false);
         verify(clearCache(indices).setIndicesOptions(options), false);
         verify(_flush(indices).setIndicesOptions(options),false);
-        verify(gatewatSnapshot(indices).setIndicesOptions(options), false);
         verify(segments(indices).setIndicesOptions(options), false);
         verify(stats(indices).setIndicesOptions(options), false);
         verify(status(indices).setIndicesOptions(options), false);
@@ -272,7 +265,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count(indices), false, 1);
         verify(clearCache(indices), false);
         verify(_flush(indices),false);
-        verify(gatewatSnapshot(indices), false);
         verify(segments(indices), false);
         verify(stats(indices), false);
         verify(status(indices), false);
@@ -298,7 +290,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count(indices), false, 1);
         verify(clearCache(indices), false);
         verify(_flush(indices),false);
-        verify(gatewatSnapshot(indices), false);
         verify(segments(indices), true);
         verify(stats(indices), false);
         verify(status(indices), false);
@@ -324,7 +315,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         verify(count(indices).setIndicesOptions(options), false, 1);
         verify(clearCache(indices).setIndicesOptions(options), false);
         verify(_flush(indices).setIndicesOptions(options),false);
-        verify(gatewatSnapshot(indices).setIndicesOptions(options), false);
         verify(segments(indices).setIndicesOptions(options), false);
         verify(stats(indices).setIndicesOptions(options), false);
         verify(status(indices).setIndicesOptions(options), false);
@@ -774,10 +764,6 @@ public class IndicesOptionsTests extends ElasticsearchIntegrationTest {
         return client().admin().indices().prepareFlush(indices);
     }
 
-    private static GatewaySnapshotRequestBuilder gatewatSnapshot(String... indices) {
-        return client().admin().indices().prepareGatewaySnapshot(indices);
-    }
-
     private static IndicesSegmentsRequestBuilder segments(String... indices) {
         return client().admin().indices().prepareSegments(indices);
     }

+ 0 - 2
src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java

@@ -195,8 +195,6 @@ public class FullRestartStressTest {
                 bulk.execute().actionGet();
             }
 
-            client.client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
-
             client.close();
             for (Node node : nodes) {
                 File[] nodeDatas = ((InternalNode) node).injector().getInstance(NodeEnvironment.class).nodeDataLocations();

+ 0 - 2
src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java

@@ -66,8 +66,6 @@ public class ManyIndicesRemoteStressTest {
             logger.info("DONE  index [{}]", i);
         }
 
-        client.admin().indices().prepareGatewaySnapshot().execute().actionGet();
-
         logger.info("closing node...");
         if (node != null) {
             node.close();

+ 0 - 2
src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java

@@ -62,8 +62,6 @@ public class ManyIndicesStressTest {
             logger.info("DONE  index [{}] ...", i);
         }
 
-        node.client().admin().indices().prepareGatewaySnapshot().execute().actionGet();
-
         logger.info("closing node...");
         node.close();
         logger.info("node closed");