|
@@ -324,18 +324,6 @@ public class MachineLearningUsageTransportAction extends XPackUsageFeatureTransp
|
|
|
dataframeAnalyticsUsage.put("analysis_counts", perAnalysisTypeCounterMap);
|
|
|
}
|
|
|
|
|
|
- private static void initializeStats(Map<String, Long> emptyStatsMap) {
|
|
|
- emptyStatsMap.put("sum", 0L);
|
|
|
- emptyStatsMap.put("min", 0L);
|
|
|
- emptyStatsMap.put("max", 0L);
|
|
|
- }
|
|
|
-
|
|
|
- private static void updateStats(Map<String, Long> statsMap, Long value) {
|
|
|
- statsMap.computeIfPresent("sum", (k, v) -> v + value);
|
|
|
- statsMap.computeIfPresent("min", (k, v) -> Math.min(v, value));
|
|
|
- statsMap.computeIfPresent("max", (k, v) -> Math.max(v, value));
|
|
|
- }
|
|
|
-
|
|
|
private void addInferenceUsage(ActionListener<Map<String, Object>> listener) {
|
|
|
GetTrainedModelsAction.Request getModelsRequest = new GetTrainedModelsAction.Request(
|
|
|
"*",
|
|
@@ -445,32 +433,34 @@ public class MachineLearningUsageTransportAction extends XPackUsageFeatureTransp
|
|
|
// TODO separate out ours and users models possibly regression vs classification
|
|
|
private void addInferenceIngestUsage(GetTrainedModelsStatsAction.Response statsResponse, Map<String, Object> inferenceUsage) {
|
|
|
int pipelineCount = 0;
|
|
|
- Map<String, Long> docCountStats = new HashMap<>(3);
|
|
|
- Map<String, Long> timeStats = new HashMap<>(3);
|
|
|
- Map<String, Long> failureStats = new HashMap<>(3);
|
|
|
- initializeStats(docCountStats);
|
|
|
- initializeStats(timeStats);
|
|
|
- initializeStats(failureStats);
|
|
|
+ StatsAccumulator docCountStats = new StatsAccumulator();
|
|
|
+ StatsAccumulator timeStats = new StatsAccumulator();
|
|
|
+ StatsAccumulator failureStats = new StatsAccumulator();
|
|
|
|
|
|
for (GetTrainedModelsStatsAction.Response.TrainedModelStats modelStats : statsResponse.getResources().results()) {
|
|
|
pipelineCount += modelStats.getPipelineCount();
|
|
|
IngestStats ingestStats = modelStats.getIngestStats();
|
|
|
- long ingestCount = ingestStats.getTotalStats().getIngestCount();
|
|
|
- long ingestTime = ingestStats.getTotalStats().getIngestTimeInMillis();
|
|
|
- long failureCount = ingestStats.getTotalStats().getIngestFailedCount();
|
|
|
- updateStats(docCountStats, ingestCount);
|
|
|
- updateStats(timeStats, ingestTime);
|
|
|
- updateStats(failureStats, failureCount);
|
|
|
+ docCountStats.add(ingestStats.getTotalStats().getIngestCount());
|
|
|
+ timeStats.add(ingestStats.getTotalStats().getIngestTimeInMillis());
|
|
|
+ failureStats.add(ingestStats.getTotalStats().getIngestFailedCount());
|
|
|
}
|
|
|
|
|
|
Map<String, Object> ingestUsage = new HashMap<>(6);
|
|
|
ingestUsage.put("pipelines", createCountUsageEntry(pipelineCount));
|
|
|
- ingestUsage.put("num_docs_processed", docCountStats);
|
|
|
- ingestUsage.put("time_ms", timeStats);
|
|
|
- ingestUsage.put("num_failures", failureStats);
|
|
|
+ ingestUsage.put("num_docs_processed", getMinMaxSumAsLongsFromStats(docCountStats));
|
|
|
+ ingestUsage.put("time_ms", getMinMaxSumAsLongsFromStats(timeStats));
|
|
|
+ ingestUsage.put("num_failures", getMinMaxSumAsLongsFromStats(failureStats));
|
|
|
inferenceUsage.put("ingest_processors", Collections.singletonMap(MachineLearningFeatureSetUsage.ALL, ingestUsage));
|
|
|
}
|
|
|
|
|
|
+ private Map<String, Object> getMinMaxSumAsLongsFromStats(StatsAccumulator stats) {
|
|
|
+ Map<String, Object> asMap = new HashMap<>(3);
|
|
|
+ asMap.put("sum", Double.valueOf(stats.getTotal()).longValue());
|
|
|
+ asMap.put("min", Double.valueOf(stats.getMin()).longValue());
|
|
|
+ asMap.put("max", Double.valueOf(stats.getMax()).longValue());
|
|
|
+ return asMap;
|
|
|
+ }
|
|
|
+
|
|
|
private static int mlNodeCount(final ClusterState clusterState) {
|
|
|
int mlNodeCount = 0;
|
|
|
for (DiscoveryNode node : clusterState.getNodes()) {
|