InternalClusterInfoService.java 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. /*
  2. * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
  3. * or more contributor license agreements. Licensed under the Elastic License
  4. * 2.0 and the Server Side Public License, v 1; you may not use this file except
  5. * in compliance with, at your election, the Elastic License 2.0 or the Server
  6. * Side Public License, v 1.
  7. */
  8. package org.elasticsearch.cluster;
  9. import org.apache.logging.log4j.LogManager;
  10. import org.apache.logging.log4j.Logger;
  11. import org.elasticsearch.action.ActionListener;
  12. import org.elasticsearch.action.FailedNodeException;
  13. import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
  14. import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
  15. import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
  16. import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
  17. import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
  18. import org.elasticsearch.action.admin.indices.stats.ShardStats;
  19. import org.elasticsearch.action.support.DefaultShardOperationFailedException;
  20. import org.elasticsearch.action.support.IndicesOptions;
  21. import org.elasticsearch.action.support.PlainActionFuture;
  22. import org.elasticsearch.client.internal.Client;
  23. import org.elasticsearch.cluster.block.ClusterBlockException;
  24. import org.elasticsearch.cluster.node.DiscoveryNode;
  25. import org.elasticsearch.cluster.routing.ShardRouting;
  26. import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings;
  27. import org.elasticsearch.cluster.service.ClusterService;
  28. import org.elasticsearch.common.settings.ClusterSettings;
  29. import org.elasticsearch.common.settings.Setting;
  30. import org.elasticsearch.common.settings.Setting.Property;
  31. import org.elasticsearch.common.settings.Settings;
  32. import org.elasticsearch.common.util.concurrent.CountDown;
  33. import org.elasticsearch.core.TimeValue;
  34. import org.elasticsearch.index.shard.ShardId;
  35. import org.elasticsearch.index.store.StoreStats;
  36. import org.elasticsearch.monitor.fs.FsInfo;
  37. import org.elasticsearch.threadpool.ThreadPool;
  38. import java.util.ArrayList;
  39. import java.util.HashMap;
  40. import java.util.HashSet;
  41. import java.util.List;
  42. import java.util.Map;
  43. import java.util.Set;
  44. import java.util.concurrent.CopyOnWriteArrayList;
  45. import java.util.function.Consumer;
  46. import static org.elasticsearch.core.Strings.format;
  47. /**
  48. * InternalClusterInfoService provides the ClusterInfoService interface,
  49. * routinely updated on a timer. The timer can be dynamically changed by
  50. * setting the <code>cluster.info.update.interval</code> setting (defaulting
  51. * to 30 seconds). The InternalClusterInfoService only runs on the master node.
  52. * Listens for changes in the number of data nodes and immediately submits a
  53. * ClusterInfoUpdateJob if a node has been added.
  54. *
  55. * Every time the timer runs, if <code>cluster.routing.allocation.disk.threshold_enabled</code>
  56. * is enabled, gathers information about the disk usage and shard sizes across the cluster,
  57. * computes a new cluster info and notifies the registered listeners. If disk threshold
  58. * monitoring is disabled, listeners are called with an empty cluster info.
  59. */
  60. public class InternalClusterInfoService implements ClusterInfoService, ClusterStateListener {
  61. private static final Logger logger = LogManager.getLogger(InternalClusterInfoService.class);
  62. public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting(
  63. "cluster.info.update.interval",
  64. TimeValue.timeValueSeconds(30),
  65. TimeValue.timeValueSeconds(10),
  66. Property.Dynamic,
  67. Property.NodeScope
  68. );
  69. public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting(
  70. "cluster.info.update.timeout",
  71. TimeValue.timeValueSeconds(15),
  72. Property.Dynamic,
  73. Property.NodeScope
  74. );
  75. private volatile boolean enabled;
  76. private volatile TimeValue updateFrequency;
  77. private volatile TimeValue fetchTimeout;
  78. private volatile Map<String, DiskUsage> leastAvailableSpaceUsages;
  79. private volatile Map<String, DiskUsage> mostAvailableSpaceUsages;
  80. private volatile IndicesStatsSummary indicesStatsSummary;
  81. private final ThreadPool threadPool;
  82. private final Client client;
  83. private final List<Consumer<ClusterInfo>> listeners = new CopyOnWriteArrayList<>();
  84. private final Object mutex = new Object();
  85. private final List<ActionListener<ClusterInfo>> nextRefreshListeners = new ArrayList<>();
  86. private AsyncRefresh currentRefresh;
  87. private RefreshScheduler refreshScheduler;
  88. public InternalClusterInfoService(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) {
  89. this.leastAvailableSpaceUsages = Map.of();
  90. this.mostAvailableSpaceUsages = Map.of();
  91. this.indicesStatsSummary = IndicesStatsSummary.EMPTY;
  92. this.threadPool = threadPool;
  93. this.client = client;
  94. this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings);
  95. this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings);
  96. this.enabled = DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings);
  97. ClusterSettings clusterSettings = clusterService.getClusterSettings();
  98. clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout);
  99. clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency);
  100. clusterSettings.addSettingsUpdateConsumer(
  101. DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
  102. this::setEnabled
  103. );
  104. }
  105. private void setEnabled(boolean enabled) {
  106. this.enabled = enabled;
  107. }
  108. private void setFetchTimeout(TimeValue fetchTimeout) {
  109. this.fetchTimeout = fetchTimeout;
  110. }
  111. void setUpdateFrequency(TimeValue updateFrequency) {
  112. this.updateFrequency = updateFrequency;
  113. }
  114. @Override
  115. public void clusterChanged(ClusterChangedEvent event) {
  116. final Runnable newRefresh;
  117. synchronized (mutex) {
  118. if (event.localNodeMaster() == false) {
  119. refreshScheduler = null;
  120. return;
  121. }
  122. if (refreshScheduler == null) {
  123. logger.trace("elected as master, scheduling cluster info update tasks");
  124. refreshScheduler = new RefreshScheduler();
  125. nextRefreshListeners.add(refreshScheduler.getListener());
  126. }
  127. newRefresh = getNewRefresh();
  128. assert assertRefreshInvariant();
  129. }
  130. newRefresh.run();
  131. // Refresh if a data node was added
  132. for (DiscoveryNode addedNode : event.nodesDelta().addedNodes()) {
  133. if (addedNode.canContainData()) {
  134. refreshAsync(new PlainActionFuture<>());
  135. break;
  136. }
  137. }
  138. }
  139. private class AsyncRefresh {
  140. private final List<ActionListener<ClusterInfo>> thisRefreshListeners;
  141. private final CountDown countDown = new CountDown(2);
  142. AsyncRefresh(List<ActionListener<ClusterInfo>> thisRefreshListeners) {
  143. this.thisRefreshListeners = thisRefreshListeners;
  144. }
  145. void execute() {
  146. if (enabled == false) {
  147. logger.trace("skipping collecting info from cluster, notifying listeners with empty cluster info");
  148. leastAvailableSpaceUsages = Map.of();
  149. mostAvailableSpaceUsages = Map.of();
  150. indicesStatsSummary = IndicesStatsSummary.EMPTY;
  151. callListeners();
  152. return;
  153. }
  154. assert countDown.isCountedDown() == false;
  155. logger.trace("starting async refresh");
  156. final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true");
  157. nodesStatsRequest.clear();
  158. nodesStatsRequest.addMetric(NodesStatsRequest.Metric.FS.metricName());
  159. nodesStatsRequest.timeout(fetchTimeout);
  160. client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.runAfter(new ActionListener<>() {
  161. @Override
  162. public void onResponse(NodesStatsResponse nodesStatsResponse) {
  163. logger.trace("received node stats response");
  164. for (final FailedNodeException failure : nodesStatsResponse.failures()) {
  165. logger.warn(() -> "failed to retrieve stats for node [" + failure.nodeId() + "]", failure.getCause());
  166. }
  167. Map<String, DiskUsage> leastAvailableUsagesBuilder = new HashMap<>();
  168. Map<String, DiskUsage> mostAvailableUsagesBuilder = new HashMap<>();
  169. fillDiskUsagePerNode(
  170. adjustNodesStats(nodesStatsResponse.getNodes()),
  171. leastAvailableUsagesBuilder,
  172. mostAvailableUsagesBuilder
  173. );
  174. leastAvailableSpaceUsages = Map.copyOf(leastAvailableUsagesBuilder);
  175. mostAvailableSpaceUsages = Map.copyOf(mostAvailableUsagesBuilder);
  176. }
  177. @Override
  178. public void onFailure(Exception e) {
  179. if (e instanceof ClusterBlockException) {
  180. logger.trace("failed to retrieve node stats", e);
  181. } else {
  182. logger.warn("failed to retrieve node stats", e);
  183. }
  184. leastAvailableSpaceUsages = Map.of();
  185. mostAvailableSpaceUsages = Map.of();
  186. }
  187. }, this::onStatsProcessed));
  188. final IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
  189. indicesStatsRequest.clear();
  190. indicesStatsRequest.store(true);
  191. indicesStatsRequest.indicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_CLOSED_HIDDEN);
  192. indicesStatsRequest.timeout(fetchTimeout);
  193. client.admin().indices().stats(indicesStatsRequest, ActionListener.runAfter(new ActionListener<>() {
  194. @Override
  195. public void onResponse(IndicesStatsResponse indicesStatsResponse) {
  196. logger.trace("received indices stats response");
  197. if (indicesStatsResponse.getShardFailures().length > 0) {
  198. final Set<String> failedNodeIds = new HashSet<>();
  199. for (final DefaultShardOperationFailedException shardFailure : indicesStatsResponse.getShardFailures()) {
  200. if (shardFailure.getCause()instanceof final FailedNodeException failedNodeException) {
  201. if (failedNodeIds.add(failedNodeException.nodeId())) {
  202. logger.warn(
  203. () -> format("failed to retrieve shard stats from node [%s]", failedNodeException.nodeId()),
  204. failedNodeException.getCause()
  205. );
  206. }
  207. logger.trace(
  208. () -> format(
  209. "failed to retrieve stats for shard [%s][%s]",
  210. shardFailure.index(),
  211. shardFailure.shardId()
  212. ),
  213. shardFailure.getCause()
  214. );
  215. } else {
  216. logger.warn(
  217. () -> format(
  218. "failed to retrieve stats for shard [%s][%s]",
  219. shardFailure.index(),
  220. shardFailure.shardId()
  221. ),
  222. shardFailure.getCause()
  223. );
  224. }
  225. }
  226. }
  227. final ShardStats[] stats = indicesStatsResponse.getShards();
  228. final Map<String, Long> shardSizeByIdentifierBuilder = new HashMap<>();
  229. final Map<ShardId, Long> shardDataSetSizeBuilder = new HashMap<>();
  230. final Map<ShardRouting, String> dataPathByShardRoutingBuilder = new HashMap<>();
  231. final Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace.Builder> reservedSpaceBuilders = new HashMap<>();
  232. buildShardLevelInfo(
  233. stats,
  234. shardSizeByIdentifierBuilder,
  235. shardDataSetSizeBuilder,
  236. dataPathByShardRoutingBuilder,
  237. reservedSpaceBuilders
  238. );
  239. final Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> rsrvdSpace = new HashMap<>();
  240. reservedSpaceBuilders.forEach((nodeAndPath, builder) -> rsrvdSpace.put(nodeAndPath, builder.build()));
  241. indicesStatsSummary = new IndicesStatsSummary(
  242. Map.copyOf(shardSizeByIdentifierBuilder),
  243. Map.copyOf(shardDataSetSizeBuilder),
  244. Map.copyOf(dataPathByShardRoutingBuilder),
  245. Map.copyOf(rsrvdSpace)
  246. );
  247. }
  248. @Override
  249. public void onFailure(Exception e) {
  250. if (e instanceof ClusterBlockException) {
  251. logger.trace("failed to retrieve indices stats", e);
  252. } else {
  253. logger.warn("failed to retrieve indices stats", e);
  254. }
  255. indicesStatsSummary = IndicesStatsSummary.EMPTY;
  256. }
  257. }, this::onStatsProcessed));
  258. }
  259. private void onStatsProcessed() {
  260. if (countDown.countDown()) {
  261. logger.trace("stats all received, computing cluster info and notifying listeners");
  262. callListeners();
  263. }
  264. }
  265. private void callListeners() {
  266. try {
  267. final ClusterInfo clusterInfo = getClusterInfo();
  268. boolean anyListeners = false;
  269. for (final Consumer<ClusterInfo> listener : listeners) {
  270. anyListeners = true;
  271. try {
  272. logger.trace("notifying [{}] of new cluster info", listener);
  273. listener.accept(clusterInfo);
  274. } catch (Exception e) {
  275. logger.info(() -> "failed to notify [" + listener + "] of new cluster info", e);
  276. }
  277. }
  278. assert anyListeners : "expected to notify at least one listener";
  279. for (final ActionListener<ClusterInfo> listener : thisRefreshListeners) {
  280. listener.onResponse(clusterInfo);
  281. }
  282. } finally {
  283. onRefreshComplete(this);
  284. }
  285. }
  286. }
  287. private void onRefreshComplete(AsyncRefresh completedRefresh) {
  288. final Runnable newRefresh;
  289. synchronized (mutex) {
  290. assert currentRefresh == completedRefresh;
  291. currentRefresh = null;
  292. // We only ever run one refresh at once; if another refresh was requested while this one was running then we must start another
  293. // to ensure that the stats it sees are up-to-date.
  294. newRefresh = getNewRefresh();
  295. assert assertRefreshInvariant();
  296. }
  297. newRefresh.run();
  298. }
  299. private Runnable getNewRefresh() {
  300. assert Thread.holdsLock(mutex) : "mutex not held";
  301. if (currentRefresh != null) {
  302. return () -> {};
  303. }
  304. if (nextRefreshListeners.isEmpty()) {
  305. return () -> {};
  306. }
  307. final ArrayList<ActionListener<ClusterInfo>> thisRefreshListeners = new ArrayList<>(nextRefreshListeners);
  308. nextRefreshListeners.clear();
  309. currentRefresh = new AsyncRefresh(thisRefreshListeners);
  310. return currentRefresh::execute;
  311. }
  312. private boolean assertRefreshInvariant() {
  313. assert Thread.holdsLock(mutex) : "mutex not held";
  314. // We never leave a refresh listener waiting unless we're already refreshing (which will pick up the waiting listener on completion)
  315. assert nextRefreshListeners.isEmpty() || currentRefresh != null;
  316. return true;
  317. }
  318. private class RefreshScheduler {
  319. ActionListener<ClusterInfo> getListener() {
  320. return ActionListener.wrap(() -> {
  321. if (shouldRefresh()) {
  322. threadPool.scheduleUnlessShuttingDown(updateFrequency, ThreadPool.Names.SAME, () -> {
  323. if (shouldRefresh()) {
  324. refreshAsync(getListener());
  325. }
  326. });
  327. }
  328. });
  329. }
  330. private boolean shouldRefresh() {
  331. synchronized (mutex) {
  332. return refreshScheduler == this;
  333. }
  334. }
  335. }
  336. @Override
  337. public ClusterInfo getClusterInfo() {
  338. final IndicesStatsSummary indicesStatsSummary = this.indicesStatsSummary; // single volatile read
  339. return new ClusterInfo(
  340. leastAvailableSpaceUsages,
  341. mostAvailableSpaceUsages,
  342. indicesStatsSummary.shardSizes,
  343. indicesStatsSummary.shardDataSetSizes,
  344. indicesStatsSummary.shardRoutingToDataPath,
  345. indicesStatsSummary.reservedSpace
  346. );
  347. }
  348. // allow tests to adjust the node stats on receipt
  349. List<NodeStats> adjustNodesStats(List<NodeStats> nodeStats) {
  350. return nodeStats;
  351. }
  352. void refreshAsync(ActionListener<ClusterInfo> future) {
  353. final Runnable newRefresh;
  354. synchronized (mutex) {
  355. nextRefreshListeners.add(future);
  356. newRefresh = getNewRefresh();
  357. assert assertRefreshInvariant();
  358. }
  359. newRefresh.run();
  360. }
  361. @Override
  362. public void addListener(Consumer<ClusterInfo> clusterInfoConsumer) {
  363. listeners.add(clusterInfoConsumer);
  364. }
  365. static void buildShardLevelInfo(
  366. ShardStats[] stats,
  367. Map<String, Long> shardSizes,
  368. Map<ShardId, Long> shardDataSetSizeBuilder,
  369. Map<ShardRouting, String> newShardRoutingToDataPath,
  370. Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace.Builder> reservedSpaceByShard
  371. ) {
  372. for (ShardStats s : stats) {
  373. final ShardRouting shardRouting = s.getShardRouting();
  374. newShardRoutingToDataPath.put(shardRouting, s.getDataPath());
  375. final StoreStats storeStats = s.getStats().getStore();
  376. if (storeStats == null) {
  377. continue;
  378. }
  379. final long size = storeStats.sizeInBytes();
  380. final long dataSetSize = storeStats.totalDataSetSizeInBytes();
  381. final long reserved = storeStats.getReservedSize().getBytes();
  382. final String shardIdentifier = ClusterInfo.shardIdentifierFromRouting(shardRouting);
  383. logger.trace("shard: {} size: {} reserved: {}", shardIdentifier, size, reserved);
  384. shardSizes.put(shardIdentifier, size);
  385. if (dataSetSize > shardDataSetSizeBuilder.getOrDefault(shardRouting.shardId(), -1L)) {
  386. shardDataSetSizeBuilder.put(shardRouting.shardId(), dataSetSize);
  387. }
  388. if (reserved != StoreStats.UNKNOWN_RESERVED_BYTES) {
  389. final ClusterInfo.ReservedSpace.Builder reservedSpaceBuilder = reservedSpaceByShard.computeIfAbsent(
  390. new ClusterInfo.NodeAndPath(shardRouting.currentNodeId(), s.getDataPath()),
  391. t -> new ClusterInfo.ReservedSpace.Builder()
  392. );
  393. reservedSpaceBuilder.add(shardRouting.shardId(), reserved);
  394. }
  395. }
  396. }
  397. static void fillDiskUsagePerNode(
  398. List<NodeStats> nodeStatsArray,
  399. Map<String, DiskUsage> newLeastAvailableUsages,
  400. Map<String, DiskUsage> newMostAvailableUsages
  401. ) {
  402. for (NodeStats nodeStats : nodeStatsArray) {
  403. if (nodeStats.getFs() == null) {
  404. logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId());
  405. continue;
  406. }
  407. FsInfo.Path leastAvailablePath = null;
  408. FsInfo.Path mostAvailablePath = null;
  409. for (FsInfo.Path info : nodeStats.getFs()) {
  410. if (leastAvailablePath == null) {
  411. // noinspection ConstantConditions this assertion is for the benefit of readers, it's always true
  412. assert mostAvailablePath == null;
  413. mostAvailablePath = leastAvailablePath = info;
  414. } else if (leastAvailablePath.getAvailable().getBytes() > info.getAvailable().getBytes()) {
  415. leastAvailablePath = info;
  416. } else if (mostAvailablePath.getAvailable().getBytes() < info.getAvailable().getBytes()) {
  417. mostAvailablePath = info;
  418. }
  419. }
  420. if (leastAvailablePath == null) {
  421. // noinspection ConstantConditions this assertion is for the benefit of readers, it's always true
  422. assert mostAvailablePath == null;
  423. logger.warn("node [{}/{}] did not return any filesystem stats", nodeStats.getNode().getName(), nodeStats.getNode().getId());
  424. continue;
  425. }
  426. final String nodeId = nodeStats.getNode().getId();
  427. final String nodeName = nodeStats.getNode().getName();
  428. if (logger.isTraceEnabled()) {
  429. logger.trace(
  430. "node [{}]: most available: total: {}, available: {} / least available: total: {}, available: {}",
  431. nodeId,
  432. mostAvailablePath.getTotal(),
  433. mostAvailablePath.getAvailable(),
  434. leastAvailablePath.getTotal(),
  435. leastAvailablePath.getAvailable()
  436. );
  437. }
  438. if (leastAvailablePath.getTotal().getBytes() < 0) {
  439. if (logger.isTraceEnabled()) {
  440. logger.trace(
  441. "node: [{}] least available path has less than 0 total bytes of disk [{}], skipping",
  442. nodeId,
  443. leastAvailablePath.getTotal().getBytes()
  444. );
  445. }
  446. } else {
  447. newLeastAvailableUsages.put(
  448. nodeId,
  449. new DiskUsage(
  450. nodeId,
  451. nodeName,
  452. leastAvailablePath.getPath(),
  453. leastAvailablePath.getTotal().getBytes(),
  454. leastAvailablePath.getAvailable().getBytes()
  455. )
  456. );
  457. }
  458. if (mostAvailablePath.getTotal().getBytes() < 0) {
  459. if (logger.isTraceEnabled()) {
  460. logger.trace(
  461. "node: [{}] most available path has less than 0 total bytes of disk [{}], skipping",
  462. nodeId,
  463. mostAvailablePath.getTotal().getBytes()
  464. );
  465. }
  466. } else {
  467. newMostAvailableUsages.put(
  468. nodeId,
  469. new DiskUsage(
  470. nodeId,
  471. nodeName,
  472. mostAvailablePath.getPath(),
  473. mostAvailablePath.getTotal().getBytes(),
  474. mostAvailablePath.getAvailable().getBytes()
  475. )
  476. );
  477. }
  478. }
  479. }
  480. private record IndicesStatsSummary(
  481. Map<String, Long> shardSizes,
  482. Map<ShardId, Long> shardDataSetSizes,
  483. Map<ShardRouting, String> shardRoutingToDataPath,
  484. Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> reservedSpace
  485. ) {
  486. static final IndicesStatsSummary EMPTY = new IndicesStatsSummary(Map.of(), Map.of(), Map.of(), Map.of());
  487. }
  488. }