|
@@ -565,139 +565,6 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- public void testFetchFullCacheEntry() throws Exception {
|
|
|
- Settings settings = Settings.builder()
|
|
|
- .put(NODE_NAME_SETTING.getKey(), "node")
|
|
|
- .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
|
|
|
- .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
|
|
|
- .put("path.home", createTempDir())
|
|
|
- .build();
|
|
|
-
|
|
|
- final var bulkTaskCount = new AtomicInteger(0);
|
|
|
- final var threadPool = new TestThreadPool("test");
|
|
|
- final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
|
|
|
- @Override
|
|
|
- public void execute(Runnable command) {
|
|
|
- super.execute(command);
|
|
|
- bulkTaskCount.incrementAndGet();
|
|
|
- }
|
|
|
- };
|
|
|
-
|
|
|
- try (
|
|
|
- NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
|
|
- var cacheService = new SharedBlobCacheService<>(
|
|
|
- environment,
|
|
|
- settings,
|
|
|
- threadPool,
|
|
|
- threadPool.executor(ThreadPool.Names.GENERIC),
|
|
|
- BlobCacheMetrics.NOOP
|
|
|
- )
|
|
|
- ) {
|
|
|
- {
|
|
|
- final var cacheKey = generateCacheKey();
|
|
|
- assertEquals(5, cacheService.freeRegionCount());
|
|
|
- final long size = size(250);
|
|
|
- AtomicLong bytesRead = new AtomicLong(size);
|
|
|
- final PlainActionFuture<Void> future = new PlainActionFuture<>();
|
|
|
- cacheService.maybeFetchFullEntry(
|
|
|
- cacheKey,
|
|
|
- size,
|
|
|
- (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
- completionListener,
|
|
|
- () -> {
|
|
|
- assert streamFactory == null : streamFactory;
|
|
|
- bytesRead.addAndGet(-length);
|
|
|
- progressUpdater.accept(length);
|
|
|
- }
|
|
|
- ),
|
|
|
- bulkExecutor,
|
|
|
- future
|
|
|
- );
|
|
|
-
|
|
|
- future.get(10, TimeUnit.SECONDS);
|
|
|
- assertEquals(0L, bytesRead.get());
|
|
|
- assertEquals(2, cacheService.freeRegionCount());
|
|
|
- assertEquals(3, bulkTaskCount.get());
|
|
|
- }
|
|
|
- {
|
|
|
- // a download that would use up all regions should not run
|
|
|
- final var cacheKey = generateCacheKey();
|
|
|
- assertEquals(2, cacheService.freeRegionCount());
|
|
|
- var configured = cacheService.maybeFetchFullEntry(
|
|
|
- cacheKey,
|
|
|
- size(500),
|
|
|
- (ch, chPos, streamFactory, relPos, len, update, completionListener) -> completeWith(completionListener, () -> {
|
|
|
- throw new AssertionError("Should never reach here");
|
|
|
- }),
|
|
|
- bulkExecutor,
|
|
|
- ActionListener.noop()
|
|
|
- );
|
|
|
- assertFalse(configured);
|
|
|
- assertEquals(2, cacheService.freeRegionCount());
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- threadPool.shutdown();
|
|
|
- }
|
|
|
-
|
|
|
- public void testFetchFullCacheEntryConcurrently() throws Exception {
|
|
|
- Settings settings = Settings.builder()
|
|
|
- .put(NODE_NAME_SETTING.getKey(), "node")
|
|
|
- .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep())
|
|
|
- .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep())
|
|
|
- .put("path.home", createTempDir())
|
|
|
- .build();
|
|
|
-
|
|
|
- final var threadPool = new TestThreadPool("test");
|
|
|
- final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic());
|
|
|
-
|
|
|
- try (
|
|
|
- NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
|
|
- var cacheService = new SharedBlobCacheService<>(
|
|
|
- environment,
|
|
|
- settings,
|
|
|
- threadPool,
|
|
|
- threadPool.executor(ThreadPool.Names.GENERIC),
|
|
|
- BlobCacheMetrics.NOOP
|
|
|
- )
|
|
|
- ) {
|
|
|
-
|
|
|
- final long size = size(randomIntBetween(1, 100));
|
|
|
- final Thread[] threads = new Thread[10];
|
|
|
- for (int i = 0; i < threads.length; i++) {
|
|
|
- threads[i] = new Thread(() -> {
|
|
|
- for (int j = 0; j < 1000; j++) {
|
|
|
- final var cacheKey = generateCacheKey();
|
|
|
- safeAwait(
|
|
|
- (ActionListener<Void> listener) -> cacheService.maybeFetchFullEntry(
|
|
|
- cacheKey,
|
|
|
- size,
|
|
|
- (
|
|
|
- channel,
|
|
|
- channelPos,
|
|
|
- streamFactory,
|
|
|
- relativePos,
|
|
|
- length,
|
|
|
- progressUpdater,
|
|
|
- completionListener) -> completeWith(completionListener, () -> progressUpdater.accept(length)),
|
|
|
- bulkExecutor,
|
|
|
- listener
|
|
|
- )
|
|
|
- );
|
|
|
- }
|
|
|
- });
|
|
|
- }
|
|
|
- for (Thread thread : threads) {
|
|
|
- thread.start();
|
|
|
- }
|
|
|
- for (Thread thread : threads) {
|
|
|
- thread.join();
|
|
|
- }
|
|
|
- } finally {
|
|
|
- assertTrue(ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS));
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
public void testCacheSizeRejectedOnNonFrozenNodes() {
|
|
|
String cacheSize = randomBoolean()
|
|
|
? ByteSizeValue.ofBytes(size(500)).getStringRep()
|
|
@@ -1130,6 +997,195 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
|
|
|
threadPool.shutdown();
|
|
|
}
|
|
|
|
|
|
+ public void testFetchRegion() throws Exception {
|
|
|
+ final long cacheSize = size(500L);
|
|
|
+ final long regionSize = size(100L);
|
|
|
+ Settings settings = Settings.builder()
|
|
|
+ .put(NODE_NAME_SETTING.getKey(), "node")
|
|
|
+ .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
|
|
|
+ .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
|
|
|
+ .put("path.home", createTempDir())
|
|
|
+ .build();
|
|
|
+
|
|
|
+ final var bulkTaskCount = new AtomicInteger(0);
|
|
|
+ final var threadPool = new TestThreadPool("test");
|
|
|
+ final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
|
|
|
+ @Override
|
|
|
+ public void execute(Runnable command) {
|
|
|
+ super.execute(command);
|
|
|
+ bulkTaskCount.incrementAndGet();
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ try (
|
|
|
+ NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
|
|
+ var cacheService = new SharedBlobCacheService<>(
|
|
|
+ environment,
|
|
|
+ settings,
|
|
|
+ threadPool,
|
|
|
+ threadPool.executor(ThreadPool.Names.GENERIC),
|
|
|
+ BlobCacheMetrics.NOOP
|
|
|
+ )
|
|
|
+ ) {
|
|
|
+ {
|
|
|
+ // fetch a single region
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ assertEquals(5, cacheService.freeRegionCount());
|
|
|
+ final long blobLength = size(250); // 3 regions
|
|
|
+ AtomicLong bytesRead = new AtomicLong(0L);
|
|
|
+ final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
|
|
|
+ cacheService.fetchRegion(
|
|
|
+ cacheKey,
|
|
|
+ 0,
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ assert streamFactory == null : streamFactory;
|
|
|
+ bytesRead.addAndGet(length);
|
|
|
+ progressUpdater.accept(length);
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ true,
|
|
|
+ future
|
|
|
+ );
|
|
|
+
|
|
|
+ var fetched = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat("Region has been fetched", fetched, is(true));
|
|
|
+ assertEquals(regionSize, bytesRead.get());
|
|
|
+ assertEquals(4, cacheService.freeRegionCount());
|
|
|
+ assertEquals(1, bulkTaskCount.get());
|
|
|
+ }
|
|
|
+ {
|
|
|
+ // fetch multiple regions to used all the cache
|
|
|
+ final int remainingFreeRegions = cacheService.freeRegionCount();
|
|
|
+ assertEquals(4, cacheService.freeRegionCount());
|
|
|
+
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ final long blobLength = regionSize * remainingFreeRegions;
|
|
|
+ AtomicLong bytesRead = new AtomicLong(0L);
|
|
|
+
|
|
|
+ final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
|
|
|
+ final var listener = new GroupedActionListener<>(remainingFreeRegions, future);
|
|
|
+ for (int region = 0; region < remainingFreeRegions; region++) {
|
|
|
+ cacheService.fetchRegion(
|
|
|
+ cacheKey,
|
|
|
+ region,
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ assert streamFactory == null : streamFactory;
|
|
|
+ bytesRead.addAndGet(length);
|
|
|
+ progressUpdater.accept(length);
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ true,
|
|
|
+ listener
|
|
|
+ );
|
|
|
+ }
|
|
|
+
|
|
|
+ var results = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat(results.stream().allMatch(result -> result), is(true));
|
|
|
+ assertEquals(blobLength, bytesRead.get());
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ assertEquals(1 + remainingFreeRegions, bulkTaskCount.get());
|
|
|
+ }
|
|
|
+ {
|
|
|
+ // cache fully used, no entry old enough to be evicted and force=false should not evict entries
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
|
|
|
+ cacheService.fetchRegion(
|
|
|
+ cacheKey,
|
|
|
+ 0,
|
|
|
+ regionSize,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ throw new AssertionError("should not be executed");
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ false,
|
|
|
+ future
|
|
|
+ );
|
|
|
+ assertThat("Listener is immediately completed", future.isDone(), is(true));
|
|
|
+ assertThat("Region already exists in cache", future.get(), is(false));
|
|
|
+ }
|
|
|
+ {
|
|
|
+ // cache fully used, but force=true, so the cache should evict regions to make space for the requested regions
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ AtomicLong bytesRead = new AtomicLong(0L);
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
|
|
|
+ var regionsToFetch = randomIntBetween(1, (int) (cacheSize / regionSize));
|
|
|
+ final var listener = new GroupedActionListener<>(regionsToFetch, future);
|
|
|
+ long blobLength = regionsToFetch * regionSize;
|
|
|
+ for (int region = 0; region < regionsToFetch; region++) {
|
|
|
+ cacheService.fetchRegion(
|
|
|
+ cacheKey,
|
|
|
+ region,
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ assert streamFactory == null : streamFactory;
|
|
|
+ bytesRead.addAndGet(length);
|
|
|
+ progressUpdater.accept(length);
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ true,
|
|
|
+ listener
|
|
|
+ );
|
|
|
+ }
|
|
|
+
|
|
|
+ var results = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat(results.stream().allMatch(result -> result), is(true));
|
|
|
+ assertEquals(blobLength, bytesRead.get());
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ assertEquals(regionsToFetch + 5, bulkTaskCount.get());
|
|
|
+ }
|
|
|
+ {
|
|
|
+ cacheService.computeDecay();
|
|
|
+
|
|
|
+ // We explicitly called computeDecay, meaning that some regions must have been demoted to level 0,
|
|
|
+ // therefore there should be enough room to fetch the requested range regardless of the force flag.
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ long blobLength = randomLongBetween(1L, regionSize);
|
|
|
+ AtomicLong bytesRead = new AtomicLong(0L);
|
|
|
+ final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
|
|
|
+ cacheService.fetchRegion(
|
|
|
+ cacheKey,
|
|
|
+ 0,
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ assert ignore == null : ignore;
|
|
|
+ bytesRead.addAndGet(length);
|
|
|
+ progressUpdater.accept(length);
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ randomBoolean(),
|
|
|
+ future
|
|
|
+ );
|
|
|
+
|
|
|
+ var fetched = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat("Region has been fetched", fetched, is(true));
|
|
|
+ assertEquals(blobLength, bytesRead.get());
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ }
|
|
|
+ } finally {
|
|
|
+ TestThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
public void testMaybeFetchRange() throws Exception {
|
|
|
final long cacheSize = size(500L);
|
|
|
final long regionSize = size(100L);
|
|
@@ -1301,6 +1357,208 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
|
|
|
threadPool.shutdown();
|
|
|
}
|
|
|
|
|
|
+ public void testFetchRange() throws Exception {
|
|
|
+ final long cacheSize = size(500L);
|
|
|
+ final long regionSize = size(100L);
|
|
|
+ Settings settings = Settings.builder()
|
|
|
+ .put(NODE_NAME_SETTING.getKey(), "node")
|
|
|
+ .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(cacheSize).getStringRep())
|
|
|
+ .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(regionSize).getStringRep())
|
|
|
+ .put("path.home", createTempDir())
|
|
|
+ .build();
|
|
|
+
|
|
|
+ final var bulkTaskCount = new AtomicInteger(0);
|
|
|
+ final var threadPool = new TestThreadPool("test");
|
|
|
+ final var bulkExecutor = new StoppableExecutorServiceWrapper(threadPool.generic()) {
|
|
|
+ @Override
|
|
|
+ public void execute(Runnable command) {
|
|
|
+ super.execute(command);
|
|
|
+ bulkTaskCount.incrementAndGet();
|
|
|
+ }
|
|
|
+ };
|
|
|
+
|
|
|
+ try (
|
|
|
+ NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
|
|
+ var cacheService = new SharedBlobCacheService<>(
|
|
|
+ environment,
|
|
|
+ settings,
|
|
|
+ threadPool,
|
|
|
+ threadPool.executor(ThreadPool.Names.GENERIC),
|
|
|
+ BlobCacheMetrics.NOOP
|
|
|
+ )
|
|
|
+ ) {
|
|
|
+ {
|
|
|
+ // fetch a random range in a random region of the blob
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ assertEquals(5, cacheService.freeRegionCount());
|
|
|
+
|
|
|
+ // blobLength is 1024000 bytes and requires 3 regions
|
|
|
+ final long blobLength = size(250);
|
|
|
+ final var regions = List.of(
|
|
|
+ // region 0: 0-409600
|
|
|
+ ByteRange.of(cacheService.getRegionStart(0), cacheService.getRegionEnd(0)),
|
|
|
+ // region 1: 409600-819200
|
|
|
+ ByteRange.of(cacheService.getRegionStart(1), cacheService.getRegionEnd(1)),
|
|
|
+ // region 2: 819200-1228800
|
|
|
+ ByteRange.of(cacheService.getRegionStart(2), cacheService.getRegionEnd(2))
|
|
|
+ );
|
|
|
+
|
|
|
+ long pos = randomLongBetween(0, blobLength - 1L);
|
|
|
+ long len = randomLongBetween(1, blobLength - pos);
|
|
|
+ var range = ByteRange.of(pos, pos + len);
|
|
|
+ var region = between(0, regions.size() - 1);
|
|
|
+ var regionRange = cacheService.mapSubRangeToRegion(range, region);
|
|
|
+
|
|
|
+ var bytesCopied = new AtomicLong(0L);
|
|
|
+ var future = new PlainActionFuture<Boolean>();
|
|
|
+ cacheService.maybeFetchRange(
|
|
|
+ cacheKey,
|
|
|
+ region,
|
|
|
+ range,
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start()));
|
|
|
+ assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start())));
|
|
|
+ assertThat(length, equalTo(Math.toIntExact(regionRange.length())));
|
|
|
+ bytesCopied.addAndGet(length);
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ future
|
|
|
+ );
|
|
|
+ var fetched = future.get(10, TimeUnit.SECONDS);
|
|
|
+
|
|
|
+ assertThat(regionRange.length(), equalTo(bytesCopied.get()));
|
|
|
+ if (regionRange.isEmpty()) {
|
|
|
+ assertThat(fetched, is(false));
|
|
|
+ assertEquals(5, cacheService.freeRegionCount());
|
|
|
+ assertEquals(0, bulkTaskCount.get());
|
|
|
+ } else {
|
|
|
+ assertThat(fetched, is(true));
|
|
|
+ assertEquals(4, cacheService.freeRegionCount());
|
|
|
+ assertEquals(1, bulkTaskCount.get());
|
|
|
+ }
|
|
|
+ }
|
|
|
+ {
|
|
|
+ // fetch multiple ranges to use all the cache
|
|
|
+ final int remainingFreeRegions = cacheService.freeRegionCount();
|
|
|
+ assertThat(remainingFreeRegions, greaterThanOrEqualTo(4));
|
|
|
+ bulkTaskCount.set(0);
|
|
|
+
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ final long blobLength = regionSize * remainingFreeRegions;
|
|
|
+ AtomicLong bytesCopied = new AtomicLong(0L);
|
|
|
+
|
|
|
+ final PlainActionFuture<Collection<Boolean>> future = new PlainActionFuture<>();
|
|
|
+ final var listener = new GroupedActionListener<>(remainingFreeRegions, future);
|
|
|
+ for (int region = 0; region < remainingFreeRegions; region++) {
|
|
|
+ cacheService.fetchRange(
|
|
|
+ cacheKey,
|
|
|
+ region,
|
|
|
+ ByteRange.of(0L, blobLength),
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> bytesCopied.addAndGet(length)
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ true,
|
|
|
+ listener
|
|
|
+ );
|
|
|
+ }
|
|
|
+
|
|
|
+ var results = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat(results.stream().allMatch(result -> result), is(true));
|
|
|
+ assertEquals(blobLength, bytesCopied.get());
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ assertEquals(remainingFreeRegions, bulkTaskCount.get());
|
|
|
+ }
|
|
|
+ {
|
|
|
+ // cache fully used, no entry old enough to be evicted and force=false
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ final var blobLength = randomLongBetween(1L, regionSize);
|
|
|
+ final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
|
|
|
+ cacheService.fetchRange(
|
|
|
+ cacheKey,
|
|
|
+ randomIntBetween(0, 10),
|
|
|
+ ByteRange.of(0L, blobLength),
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> {
|
|
|
+ throw new AssertionError("should not be executed");
|
|
|
+ }
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ false,
|
|
|
+ future
|
|
|
+ );
|
|
|
+ assertThat("Listener is immediately completed", future.isDone(), is(true));
|
|
|
+ assertThat("Region already exists in cache", future.get(), is(false));
|
|
|
+ }
|
|
|
+ {
|
|
|
+ // cache fully used, since force=true the range should be populated
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ long blobLength = randomLongBetween(1L, regionSize);
|
|
|
+ AtomicLong bytesCopied = new AtomicLong(0L);
|
|
|
+ final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
|
|
|
+ cacheService.fetchRange(
|
|
|
+ cacheKey,
|
|
|
+ 0,
|
|
|
+ ByteRange.of(0L, blobLength),
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> bytesCopied.addAndGet(length)
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ true,
|
|
|
+ future
|
|
|
+ );
|
|
|
+
|
|
|
+ var fetched = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat("Region has been fetched", fetched, is(true));
|
|
|
+ assertEquals(blobLength, bytesCopied.get());
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ }
|
|
|
+ {
|
|
|
+ cacheService.computeDecay();
|
|
|
+
|
|
|
+ // We explicitly called computeDecay, meaning that some regions must have been demoted to level 0,
|
|
|
+ // therefore there should be enough room to fetch the requested range regardless of the force flag.
|
|
|
+ final var cacheKey = generateCacheKey();
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ long blobLength = randomLongBetween(1L, regionSize);
|
|
|
+ AtomicLong bytesCopied = new AtomicLong(0L);
|
|
|
+ final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
|
|
|
+ cacheService.fetchRange(
|
|
|
+ cacheKey,
|
|
|
+ 0,
|
|
|
+ ByteRange.of(0L, blobLength),
|
|
|
+ blobLength,
|
|
|
+ (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith(
|
|
|
+ completionListener,
|
|
|
+ () -> bytesCopied.addAndGet(length)
|
|
|
+ ),
|
|
|
+ bulkExecutor,
|
|
|
+ randomBoolean(),
|
|
|
+ future
|
|
|
+ );
|
|
|
+
|
|
|
+ var fetched = future.get(10, TimeUnit.SECONDS);
|
|
|
+ assertThat("Region has been fetched", fetched, is(true));
|
|
|
+ assertEquals(blobLength, bytesCopied.get());
|
|
|
+ assertEquals(0, cacheService.freeRegionCount());
|
|
|
+ }
|
|
|
+ } finally {
|
|
|
+ TestThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
public void testPopulate() throws Exception {
|
|
|
final long regionSize = size(1L);
|
|
|
Settings settings = Settings.builder()
|