dashboard.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. package analytics
  2. import (
  3. "context"
  4. "fmt"
  5. "sort"
  6. "time"
  7. "github.com/0xJacky/Nginx-UI/internal/nginx_log/searcher"
  8. "github.com/uozi-tech/cosy/logger"
  9. )
  10. // GetDashboardAnalytics generates comprehensive dashboard analytics
  11. func (s *service) GetDashboardAnalytics(ctx context.Context, req *DashboardQueryRequest) (*DashboardAnalytics, error) {
  12. if req == nil {
  13. return nil, fmt.Errorf("request cannot be nil")
  14. }
  15. if err := s.ValidateTimeRange(req.StartTime, req.EndTime); err != nil {
  16. return nil, fmt.Errorf("invalid time range: %w", err)
  17. }
  18. searchReq := &searcher.SearchRequest{
  19. StartTime: &req.StartTime,
  20. EndTime: &req.EndTime,
  21. LogPaths: req.LogPaths,
  22. IncludeFacets: true,
  23. FacetFields: []string{"browser", "os", "device_type", "ip"},
  24. FacetSize: 1000, // Reduced: we'll use cardinality counter for path_exact
  25. UseCache: true,
  26. SortBy: "timestamp",
  27. SortOrder: "desc",
  28. Limit: 0, // Don't fetch documents, use aggregations instead
  29. }
  30. // Execute search
  31. result, err := s.searcher.Search(ctx, searchReq)
  32. if err != nil {
  33. return nil, fmt.Errorf("failed to search logs for dashboard: %w", err)
  34. }
  35. // --- DIAGNOSTIC LOGGING ---
  36. logger.Debugf("Dashboard search completed. Total Hits: %d, Returned Hits: %d, Facets: %d",
  37. result.TotalHits, len(result.Hits), len(result.Facets))
  38. if result.TotalHits > uint64(len(result.Hits)) {
  39. logger.Warnf("Dashboard sampling: using %d/%d documents for time calculations (%.1f%% coverage)",
  40. len(result.Hits), result.TotalHits, float64(len(result.Hits))/float64(result.TotalHits)*100)
  41. }
  42. // --- END DIAGNOSTIC LOGGING ---
  43. // Initialize analytics with empty slices
  44. analytics := &DashboardAnalytics{}
  45. // Calculate analytics if we have results
  46. if result.TotalHits > 0 {
  47. // For now, use batch queries to get complete data
  48. analytics.HourlyStats = s.calculateHourlyStatsWithBatch(ctx, req)
  49. analytics.DailyStats = s.calculateDailyStatsWithBatch(ctx, req)
  50. // Use cardinality counter for efficient unique URLs counting
  51. analytics.TopURLs = s.calculateTopURLsWithCardinality(ctx, req)
  52. analytics.Browsers = s.calculateBrowserStats(result)
  53. analytics.OperatingSystems = s.calculateOSStats(result)
  54. analytics.Devices = s.calculateDeviceStats(result)
  55. } else {
  56. // Ensure slices are initialized even if there are no hits
  57. analytics.HourlyStats = make([]HourlyAccessStats, 0)
  58. analytics.DailyStats = make([]DailyAccessStats, 0)
  59. analytics.TopURLs = make([]URLAccessStats, 0)
  60. analytics.Browsers = make([]BrowserAccessStats, 0)
  61. analytics.OperatingSystems = make([]OSAccessStats, 0)
  62. analytics.Devices = make([]DeviceAccessStats, 0)
  63. }
  64. // Calculate summary with cardinality counting for accurate unique pages
  65. analytics.Summary = s.calculateDashboardSummaryWithCardinality(ctx, analytics, result, req)
  66. return analytics, nil
  67. }
  68. // calculateHourlyStats calculates hourly access statistics.
  69. // Returns 48 hours of data centered around the end_date to support all timezones.
  70. func (s *service) calculateHourlyStats(result *searcher.SearchResult, startTime, endTime int64) []HourlyAccessStats {
  71. // Use a map with timestamp as key for easier processing
  72. hourlyMap := make(map[int64]*HourlyAccessStats)
  73. uniqueIPsPerHour := make(map[int64]map[string]bool)
  74. // Calculate 48 hours range: from UTC end_date minus 12 hours to plus 36 hours
  75. // This covers UTC-12 to UTC+14 timezones
  76. endDate := time.Unix(endTime, 0).UTC()
  77. endDateStart := time.Date(endDate.Year(), endDate.Month(), endDate.Day(), 0, 0, 0, 0, time.UTC)
  78. // Create hourly buckets for 48 hours (12 hours before to 36 hours after the UTC date boundary)
  79. rangeStart := endDateStart.Add(-12 * time.Hour)
  80. rangeEnd := endDateStart.Add(36 * time.Hour)
  81. // Initialize hourly buckets
  82. for t := rangeStart; t.Before(rangeEnd); t = t.Add(time.Hour) {
  83. timestamp := t.Unix()
  84. hourlyMap[timestamp] = &HourlyAccessStats{
  85. Hour: t.Hour(),
  86. UV: 0,
  87. PV: 0,
  88. Timestamp: timestamp,
  89. }
  90. uniqueIPsPerHour[timestamp] = make(map[string]bool)
  91. }
  92. // Process search results - count hits within the 48-hour window
  93. for _, hit := range result.Hits {
  94. if timestampField, ok := hit.Fields["timestamp"]; ok {
  95. if timestampFloat, ok := timestampField.(float64); ok {
  96. timestamp := int64(timestampFloat)
  97. // Check if this hit falls within our 48-hour window
  98. if timestamp >= rangeStart.Unix() && timestamp < rangeEnd.Unix() {
  99. // Round down to the hour
  100. t := time.Unix(timestamp, 0).UTC()
  101. hourTimestamp := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, time.UTC).Unix()
  102. if stats, exists := hourlyMap[hourTimestamp]; exists {
  103. stats.PV++
  104. if ipField, ok := hit.Fields["ip"]; ok {
  105. if ip, ok := ipField.(string); ok && ip != "" {
  106. if !uniqueIPsPerHour[hourTimestamp][ip] {
  107. uniqueIPsPerHour[hourTimestamp][ip] = true
  108. stats.UV++
  109. }
  110. }
  111. }
  112. }
  113. }
  114. }
  115. }
  116. }
  117. // Convert to slice and sort by timestamp
  118. var stats []HourlyAccessStats
  119. for _, stat := range hourlyMap {
  120. stats = append(stats, *stat)
  121. }
  122. sort.Slice(stats, func(i, j int) bool {
  123. return stats[i].Timestamp < stats[j].Timestamp
  124. })
  125. return stats
  126. }
  127. // calculateDailyStats calculates daily access statistics
  128. func (s *service) calculateDailyStats(result *searcher.SearchResult, startTime, endTime int64) []DailyAccessStats {
  129. dailyMap := make(map[string]*DailyAccessStats)
  130. uniqueIPsPerDay := make(map[string]map[string]bool)
  131. // Initialize daily buckets for the entire time range
  132. start := time.Unix(startTime, 0)
  133. end := time.Unix(endTime, 0)
  134. for t := start; t.Before(end) || t.Equal(end); t = t.AddDate(0, 0, 1) {
  135. dateStr := t.Format("2006-01-02")
  136. if _, exists := dailyMap[dateStr]; !exists {
  137. dailyMap[dateStr] = &DailyAccessStats{
  138. Date: dateStr,
  139. UV: 0,
  140. PV: 0,
  141. Timestamp: t.Unix(),
  142. }
  143. uniqueIPsPerDay[dateStr] = make(map[string]bool)
  144. }
  145. }
  146. // Process search results
  147. for _, hit := range result.Hits {
  148. if timestampField, ok := hit.Fields["timestamp"]; ok {
  149. if timestampFloat, ok := timestampField.(float64); ok {
  150. timestamp := int64(timestampFloat)
  151. t := time.Unix(timestamp, 0)
  152. dateStr := t.Format("2006-01-02")
  153. if stats, exists := dailyMap[dateStr]; exists {
  154. stats.PV++
  155. if ipField, ok := hit.Fields["ip"]; ok {
  156. if ip, ok := ipField.(string); ok && ip != "" {
  157. if !uniqueIPsPerDay[dateStr][ip] {
  158. uniqueIPsPerDay[dateStr][ip] = true
  159. stats.UV++
  160. }
  161. }
  162. }
  163. }
  164. }
  165. }
  166. }
  167. // Convert to slice and sort
  168. var stats []DailyAccessStats
  169. for _, stat := range dailyMap {
  170. stats = append(stats, *stat)
  171. }
  172. sort.Slice(stats, func(i, j int) bool {
  173. return stats[i].Timestamp < stats[j].Timestamp
  174. })
  175. return stats
  176. }
  177. // calculateTopURLs calculates top URL statistics from facets (legacy method)
  178. func (s *service) calculateTopURLs(result *searcher.SearchResult) []URLAccessStats {
  179. if facet, ok := result.Facets["path_exact"]; ok {
  180. logger.Infof("📊 Facet-based URL calculation: facet.Total=%d, TotalHits=%d",
  181. facet.Total, result.TotalHits)
  182. urlStats := calculateTopFieldStats(facet, int(result.TotalHits), func(term string, count int, percent float64) URLAccessStats {
  183. return URLAccessStats{URL: term, Visits: count, Percent: percent}
  184. })
  185. logger.Infof("📈 Calculated %d URL stats from facet", len(urlStats))
  186. return urlStats
  187. } else {
  188. logger.Errorf("❌ path_exact facet not found in search results")
  189. return []URLAccessStats{}
  190. }
  191. }
  192. // calculateTopURLsWithCardinality calculates top URL statistics using facet-based approach
  193. // Always returns actual top URLs with their visit counts instead of just a summary
  194. func (s *service) calculateTopURLsWithCardinality(ctx context.Context, req *DashboardQueryRequest) []URLAccessStats {
  195. // Always use facet-based calculation to get actual top URLs with visit counts
  196. searchReq := &searcher.SearchRequest{
  197. StartTime: &req.StartTime,
  198. EndTime: &req.EndTime,
  199. LogPaths: req.LogPaths,
  200. IncludeFacets: true,
  201. FacetFields: []string{"path_exact"},
  202. FacetSize: 100, // Reasonable facet size to get top URLs
  203. UseCache: true,
  204. }
  205. result, err := s.searcher.Search(ctx, searchReq)
  206. if err != nil {
  207. logger.Errorf("Failed to search for URL facets: %v", err)
  208. return []URLAccessStats{}
  209. }
  210. // Get actual top URLs with visit counts
  211. return s.calculateTopURLs(result)
  212. }
  213. // calculateBrowserStats calculates browser statistics from facets
  214. func (s *service) calculateBrowserStats(result *searcher.SearchResult) []BrowserAccessStats {
  215. return calculateTopFieldStats(result.Facets["browser"], int(result.TotalHits), func(term string, count int, percent float64) BrowserAccessStats {
  216. return BrowserAccessStats{Browser: term, Count: count, Percent: percent}
  217. })
  218. }
  219. // calculateOSStats calculates operating system statistics from facets
  220. func (s *service) calculateOSStats(result *searcher.SearchResult) []OSAccessStats {
  221. return calculateTopFieldStats(result.Facets["os"], int(result.TotalHits), func(term string, count int, percent float64) OSAccessStats {
  222. return OSAccessStats{OS: term, Count: count, Percent: percent}
  223. })
  224. }
  225. // calculateDeviceStats calculates device statistics from facets
  226. func (s *service) calculateDeviceStats(result *searcher.SearchResult) []DeviceAccessStats {
  227. return calculateTopFieldStats(result.Facets["device_type"], int(result.TotalHits), func(term string, count int, percent float64) DeviceAccessStats {
  228. return DeviceAccessStats{Device: term, Count: count, Percent: percent}
  229. })
  230. }
  231. // calculateTopFieldStats is a generic function to calculate top N items from a facet result.
  232. func calculateTopFieldStats[T any](
  233. facet *searcher.Facet,
  234. totalHits int,
  235. creator func(term string, count int, percent float64) T,
  236. ) []T {
  237. if facet == nil || totalHits == 0 {
  238. return []T{}
  239. }
  240. var items []T
  241. for _, term := range facet.Terms {
  242. percent := float64(term.Count) / float64(totalHits) * 100
  243. items = append(items, creator(term.Term, term.Count, percent))
  244. }
  245. return items
  246. }
  247. // calculateDashboardSummary calculates summary statistics
  248. func (s *service) calculateDashboardSummary(analytics *DashboardAnalytics, result *searcher.SearchResult) DashboardSummary {
  249. // Calculate total UV from IP facet, which is now reliable.
  250. totalUV := 0
  251. if result.Facets != nil {
  252. if ipFacet, ok := result.Facets["ip"]; ok {
  253. // The total number of unique terms in the facet is the UV count.
  254. totalUV = ipFacet.Total
  255. }
  256. }
  257. totalPV := int(result.TotalHits)
  258. // Calculate average daily UV and PV
  259. var avgDailyUV, avgDailyPV float64
  260. if len(analytics.DailyStats) > 0 {
  261. var sumPV int
  262. for _, daily := range analytics.DailyStats {
  263. sumPV += daily.PV
  264. }
  265. // Use total unique visitors divided by number of days for accurate daily UV average
  266. // The totalUV represents unique visitors across the entire period, not sum of daily UVs
  267. avgDailyUV = float64(totalUV) / float64(len(analytics.DailyStats))
  268. avgDailyPV = float64(sumPV) / float64(len(analytics.DailyStats))
  269. }
  270. // Find peak hour
  271. var peakHour, peakHourTraffic int
  272. for _, hourly := range analytics.HourlyStats {
  273. if hourly.PV > peakHourTraffic {
  274. peakHour = hourly.Hour
  275. peakHourTraffic = hourly.PV
  276. }
  277. }
  278. return DashboardSummary{
  279. TotalUV: totalUV,
  280. TotalPV: totalPV,
  281. AvgDailyUV: avgDailyUV,
  282. AvgDailyPV: avgDailyPV,
  283. PeakHour: peakHour,
  284. PeakHourTraffic: peakHourTraffic,
  285. }
  286. }
  287. // calculateDashboardSummaryWithCardinality calculates enhanced summary statistics using cardinality counters
  288. func (s *service) calculateDashboardSummaryWithCardinality(ctx context.Context, analytics *DashboardAnalytics, result *searcher.SearchResult, req *DashboardQueryRequest) DashboardSummary {
  289. // Start with the basic summary but we'll override the UV calculation
  290. summary := s.calculateDashboardSummary(analytics, result)
  291. // Use cardinality counter for accurate unique visitor (UV) counting if available
  292. cardinalityCounter := s.getCardinalityCounter()
  293. if cardinalityCounter != nil {
  294. // Count unique IPs (visitors) using cardinality counter instead of limited facet
  295. uvCardReq := &searcher.CardinalityRequest{
  296. Field: "ip",
  297. StartTime: &req.StartTime,
  298. EndTime: &req.EndTime,
  299. LogPaths: req.LogPaths,
  300. }
  301. if uvResult, err := cardinalityCounter.CountCardinality(ctx, uvCardReq); err == nil {
  302. // Override the facet-limited UV count with accurate cardinality count
  303. summary.TotalUV = int(uvResult.Cardinality)
  304. // Recalculate average daily UV with accurate count
  305. if len(analytics.DailyStats) > 0 {
  306. summary.AvgDailyUV = float64(summary.TotalUV) / float64(len(analytics.DailyStats))
  307. }
  308. // Log the improvement - handle case where IP facet might not exist
  309. facetUV := "N/A"
  310. if result.Facets != nil && result.Facets["ip"] != nil {
  311. facetUV = fmt.Sprintf("%d", result.Facets["ip"].Total)
  312. }
  313. logger.Infof("✓ Accurate UV count using CardinalityCounter: %d (was limited to %s by facet)",
  314. uvResult.Cardinality, facetUV)
  315. } else {
  316. logger.Errorf("Failed to count unique visitors with cardinality counter: %v", err)
  317. }
  318. // Also count unique pages for additional insights
  319. pageCardReq := &searcher.CardinalityRequest{
  320. Field: "path_exact",
  321. StartTime: &req.StartTime,
  322. EndTime: &req.EndTime,
  323. LogPaths: req.LogPaths,
  324. }
  325. if pageResult, err := cardinalityCounter.CountCardinality(ctx, pageCardReq); err == nil {
  326. logger.Debugf("Accurate unique pages count: %d (vs Total PV: %d)", pageResult.Cardinality, summary.TotalPV)
  327. if pageResult.Cardinality <= uint64(summary.TotalPV) {
  328. logger.Infof("✓ Unique pages (%d) ≤ Total PV (%d) - data consistency verified", pageResult.Cardinality, summary.TotalPV)
  329. } else {
  330. logger.Warnf("⚠ Unique pages (%d) > Total PV (%d) - possible data inconsistency", pageResult.Cardinality, summary.TotalPV)
  331. }
  332. } else {
  333. logger.Errorf("Failed to count unique pages: %v", err)
  334. }
  335. } else {
  336. logger.Warnf("CardinalityCounter not available, UV count limited by facet size to %d", summary.TotalUV)
  337. }
  338. return summary
  339. }
  340. // calculateDailyStatsWithBatch calculates daily statistics by fetching data in batches
  341. func (s *service) calculateDailyStatsWithBatch(ctx context.Context, req *DashboardQueryRequest) []DailyAccessStats {
  342. dailyMap := make(map[string]*DailyAccessStats)
  343. uniqueIPsPerDay := make(map[string]map[string]bool)
  344. // Initialize daily buckets for the entire time range
  345. start := time.Unix(req.StartTime, 0)
  346. end := time.Unix(req.EndTime, 0)
  347. for t := start; t.Before(end) || t.Equal(end); t = t.AddDate(0, 0, 1) {
  348. dateStr := t.Format("2006-01-02")
  349. if _, exists := dailyMap[dateStr]; !exists {
  350. dailyMap[dateStr] = &DailyAccessStats{
  351. Date: dateStr,
  352. UV: 0,
  353. PV: 0,
  354. Timestamp: t.Unix(),
  355. }
  356. uniqueIPsPerDay[dateStr] = make(map[string]bool)
  357. }
  358. }
  359. // Process data in batches to avoid memory issues
  360. batchSize := 10000
  361. offset := 0
  362. for {
  363. searchReq := &searcher.SearchRequest{
  364. StartTime: &req.StartTime,
  365. EndTime: &req.EndTime,
  366. LogPaths: req.LogPaths,
  367. Limit: batchSize,
  368. Offset: offset,
  369. Fields: []string{"timestamp", "ip"},
  370. UseCache: false, // Don't cache intermediate results
  371. }
  372. result, err := s.searcher.Search(ctx, searchReq)
  373. if err != nil {
  374. logger.Errorf("Failed to fetch batch at offset %d: %v", offset, err)
  375. break
  376. }
  377. // Process this batch of results
  378. for _, hit := range result.Hits {
  379. if timestampField, ok := hit.Fields["timestamp"]; ok {
  380. if timestampFloat, ok := timestampField.(float64); ok {
  381. timestamp := int64(timestampFloat)
  382. t := time.Unix(timestamp, 0)
  383. dateStr := t.Format("2006-01-02")
  384. if stats, exists := dailyMap[dateStr]; exists {
  385. stats.PV++
  386. if ipField, ok := hit.Fields["ip"]; ok {
  387. if ip, ok := ipField.(string); ok && ip != "" {
  388. if !uniqueIPsPerDay[dateStr][ip] {
  389. uniqueIPsPerDay[dateStr][ip] = true
  390. stats.UV++
  391. }
  392. }
  393. }
  394. }
  395. }
  396. }
  397. }
  398. // Check if we've processed all results
  399. if len(result.Hits) < batchSize {
  400. break
  401. }
  402. offset += batchSize
  403. // Log progress
  404. logger.Debugf("Processed %d/%d records for daily stats", offset, result.TotalHits)
  405. }
  406. // Convert to slice and sort
  407. var stats []DailyAccessStats
  408. for _, stat := range dailyMap {
  409. stats = append(stats, *stat)
  410. }
  411. sort.Slice(stats, func(i, j int) bool {
  412. return stats[i].Timestamp < stats[j].Timestamp
  413. })
  414. return stats
  415. }
  416. // calculateHourlyStatsWithBatch calculates hourly statistics by fetching data in batches
  417. func (s *service) calculateHourlyStatsWithBatch(ctx context.Context, req *DashboardQueryRequest) []HourlyAccessStats {
  418. // Use a map with timestamp as key for easier processing
  419. hourlyMap := make(map[int64]*HourlyAccessStats)
  420. uniqueIPsPerHour := make(map[int64]map[string]bool)
  421. // Calculate 48 hours range: from UTC end_date minus 12 hours to plus 36 hours
  422. // This covers UTC-12 to UTC+14 timezones
  423. endDate := time.Unix(req.EndTime, 0).UTC()
  424. endDateStart := time.Date(endDate.Year(), endDate.Month(), endDate.Day(), 0, 0, 0, 0, time.UTC)
  425. // Create hourly buckets for 48 hours (12 hours before to 36 hours after the UTC date boundary)
  426. rangeStart := endDateStart.Add(-12 * time.Hour)
  427. rangeEnd := endDateStart.Add(36 * time.Hour)
  428. // Initialize hourly buckets
  429. for t := rangeStart; t.Before(rangeEnd); t = t.Add(time.Hour) {
  430. timestamp := t.Unix()
  431. hourlyMap[timestamp] = &HourlyAccessStats{
  432. Hour: t.Hour(),
  433. UV: 0,
  434. PV: 0,
  435. Timestamp: timestamp,
  436. }
  437. uniqueIPsPerHour[timestamp] = make(map[string]bool)
  438. }
  439. // Process data in batches
  440. batchSize := 10000
  441. offset := 0
  442. // Adjust time range for hourly query
  443. hourlyStartTime := rangeStart.Unix()
  444. hourlyEndTime := rangeEnd.Unix()
  445. for {
  446. searchReq := &searcher.SearchRequest{
  447. StartTime: &hourlyStartTime,
  448. EndTime: &hourlyEndTime,
  449. LogPaths: req.LogPaths,
  450. Limit: batchSize,
  451. Offset: offset,
  452. Fields: []string{"timestamp", "ip"},
  453. UseCache: false,
  454. }
  455. result, err := s.searcher.Search(ctx, searchReq)
  456. if err != nil {
  457. logger.Errorf("Failed to fetch batch at offset %d: %v", offset, err)
  458. break
  459. }
  460. // Process this batch of results
  461. for _, hit := range result.Hits {
  462. if timestampField, ok := hit.Fields["timestamp"]; ok {
  463. if timestampFloat, ok := timestampField.(float64); ok {
  464. timestamp := int64(timestampFloat)
  465. // Round down to the hour
  466. t := time.Unix(timestamp, 0).UTC()
  467. hourTimestamp := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, time.UTC).Unix()
  468. if stats, exists := hourlyMap[hourTimestamp]; exists {
  469. stats.PV++
  470. if ipField, ok := hit.Fields["ip"]; ok {
  471. if ip, ok := ipField.(string); ok && ip != "" {
  472. if !uniqueIPsPerHour[hourTimestamp][ip] {
  473. uniqueIPsPerHour[hourTimestamp][ip] = true
  474. stats.UV++
  475. }
  476. }
  477. }
  478. }
  479. }
  480. }
  481. }
  482. // Check if we've processed all results
  483. if len(result.Hits) < batchSize {
  484. break
  485. }
  486. offset += batchSize
  487. // Log progress
  488. logger.Debugf("Processed %d/%d records for hourly stats", offset, result.TotalHits)
  489. }
  490. // Convert to slice and sort by timestamp
  491. var stats []HourlyAccessStats
  492. for _, stat := range hourlyMap {
  493. stats = append(stats, *stat)
  494. }
  495. sort.Slice(stats, func(i, j int) bool {
  496. return stats[i].Timestamp < stats[j].Timestamp
  497. })
  498. return stats
  499. }