|
@@ -26,8 +26,6 @@ import org.apache.lucene.index.IndexCommit;
|
|
|
import org.apache.lucene.store.IOContext;
|
|
|
import org.apache.lucene.store.IndexInput;
|
|
|
import org.apache.lucene.store.RateLimiter;
|
|
|
-import org.apache.lucene.util.BytesRef;
|
|
|
-import org.apache.lucene.util.BytesRefBuilder;
|
|
|
import org.apache.lucene.util.SetOnce;
|
|
|
import org.elasticsearch.ElasticsearchParseException;
|
|
|
import org.elasticsearch.ExceptionsHelper;
|
|
@@ -954,8 +952,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|
|
final Map<String, BlobMetaData> blobs,
|
|
|
final String reason) {
|
|
|
final String indexGeneration = Integer.toString(fileListGeneration);
|
|
|
- final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration);
|
|
|
-
|
|
|
final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
|
|
|
try {
|
|
|
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
|
|
@@ -998,7 +994,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|
|
snapshotId, shardId), e);
|
|
|
}
|
|
|
} catch (IOException e) {
|
|
|
- String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]";
|
|
|
+ String message =
|
|
|
+ "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]";
|
|
|
throw new IndexShardSnapshotFailedException(shardId, message, e);
|
|
|
}
|
|
|
}
|
|
@@ -1135,16 +1132,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|
|
List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
|
|
|
if (filesInfo != null) {
|
|
|
for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
|
|
|
- try {
|
|
|
- // in 1.3.3 we added additional hashes for .si / segments_N files
|
|
|
- // to ensure we don't double the space in the repo since old snapshots
|
|
|
- // don't have this hash we try to read that hash from the blob store
|
|
|
- // in a bwc compatible way.
|
|
|
- maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata);
|
|
|
- } catch (Exception e) {
|
|
|
- logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]",
|
|
|
- shardId, fileInfo.physicalName(), fileInfo.metadata()), e);
|
|
|
- }
|
|
|
if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) {
|
|
|
// a commit point file with the same name, size and checksum was already copied to repository
|
|
|
// we will reuse it for this snapshot
|
|
@@ -1315,32 +1302,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /**
|
|
|
- * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
|
|
|
- * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the
|
|
|
- * comparison of the files on a per-segment / per-commit level.
|
|
|
- */
|
|
|
- private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo,
|
|
|
- Store.MetadataSnapshot snapshot) throws Exception {
|
|
|
- final StoreFileMetaData metadata;
|
|
|
- if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
|
|
|
- if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
|
|
|
- // we have a hash - check if our repo has a hash too otherwise we have
|
|
|
- // to calculate it.
|
|
|
- // we might have multiple parts even though the file is small... make sure we read all of it.
|
|
|
- try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
|
|
|
- BytesRefBuilder builder = new BytesRefBuilder();
|
|
|
- Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
|
|
|
- BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash
|
|
|
- assert hash.length == 0;
|
|
|
- hash.bytes = builder.bytes();
|
|
|
- hash.offset = 0;
|
|
|
- hash.length = builder.length();
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
private static final class PartSliceStream extends SlicedInputStream {
|
|
|
|
|
|
private final BlobContainer container;
|