1
0

optimized_time_series.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. package analytics
  2. import (
  3. "context"
  4. "sort"
  5. "sync"
  6. )
  7. // SearchRequest and related types from searcher package
  8. type SearchRequest struct {
  9. StartTime *int64 `json:"start_time,omitempty"`
  10. EndTime *int64 `json:"end_time,omitempty"`
  11. LogPaths []string `json:"log_paths,omitempty"`
  12. Limit int `json:"limit"`
  13. IncludeFacets bool `json:"include_facets,omitempty"`
  14. IncludeStats bool `json:"include_stats,omitempty"`
  15. UseCache bool `json:"use_cache,omitempty"`
  16. }
  17. type SearchResult struct {
  18. Hits []*SearchHit `json:"hits"`
  19. TotalHits uint64 `json:"total_hits"`
  20. Stats *SearchStats `json:"stats,omitempty"`
  21. }
  22. type SearchHit struct {
  23. Fields map[string]interface{} `json:"fields"`
  24. }
  25. type SearchStats struct {
  26. TotalBytes int64 `json:"total_bytes"`
  27. }
  28. type AggregationRequest struct{}
  29. type AggregationResult struct{}
  30. type Suggestion struct{}
  31. // Searcher interface (simplified)
  32. type Searcher interface {
  33. Search(ctx context.Context, req *SearchRequest) (*SearchResult, error)
  34. Aggregate(ctx context.Context, req *AggregationRequest) (*AggregationResult, error)
  35. Suggest(ctx context.Context, text string, field string, size int) ([]*Suggestion, error)
  36. Analyze(ctx context.Context, text string, analyzer string) ([]string, error)
  37. ClearCache() error
  38. }
  39. // OptimizedTimeSeriesProcessor provides high-performance time-series analytics
  40. type OptimizedTimeSeriesProcessor struct {
  41. bucketPools map[int64]*BucketPool
  42. visitorSets map[int64]*VisitorSetPool
  43. resultCache *TimeSeriesCache
  44. mutex sync.RWMutex
  45. }
  46. // NewOptimizedTimeSeriesProcessor creates a new optimized processor
  47. func NewOptimizedTimeSeriesProcessor() *OptimizedTimeSeriesProcessor {
  48. return &OptimizedTimeSeriesProcessor{
  49. bucketPools: make(map[int64]*BucketPool),
  50. visitorSets: make(map[int64]*VisitorSetPool),
  51. resultCache: NewTimeSeriesCache(1000, 1800), // 1000 entries, 30min TTL
  52. }
  53. }
  54. // BucketPool provides pooled time buckets for aggregation
  55. type BucketPool struct {
  56. buckets sync.Pool
  57. }
  58. // NewBucketPool creates a new bucket pool
  59. func NewBucketPool() *BucketPool {
  60. return &BucketPool{
  61. buckets: sync.Pool{
  62. New: func() interface{} {
  63. return make(map[int64]*TimeBucket, 1000)
  64. },
  65. },
  66. }
  67. }
  68. // Get retrieves a bucket map from the pool
  69. func (bp *BucketPool) Get() map[int64]*TimeBucket {
  70. return bp.buckets.Get().(map[int64]*TimeBucket)
  71. }
  72. // Put returns a bucket map to the pool
  73. func (bp *BucketPool) Put(buckets map[int64]*TimeBucket) {
  74. // Clear the map
  75. for k := range buckets {
  76. delete(buckets, k)
  77. }
  78. bp.buckets.Put(buckets)
  79. }
  80. // TimeBucket represents an optimized time bucket for aggregation
  81. type TimeBucket struct {
  82. Timestamp int64
  83. RequestCount int64
  84. BytesTransferred int64
  85. UniqueVisitors map[string]struct{} // Use struct{} for zero-memory set
  86. StatusCodes map[int]int64
  87. Methods map[string]int64
  88. Paths map[string]int64
  89. }
  90. // NewTimeBucket creates a new optimized time bucket
  91. func NewTimeBucket(timestamp int64) *TimeBucket {
  92. return &TimeBucket{
  93. Timestamp: timestamp,
  94. UniqueVisitors: make(map[string]struct{}, 100),
  95. StatusCodes: make(map[int]int64, 10),
  96. Methods: make(map[string]int64, 5),
  97. Paths: make(map[string]int64, 20),
  98. }
  99. }
  100. // AddEntry adds an entry to the time bucket with optimized operations
  101. func (tb *TimeBucket) AddEntry(ip string, status int, method string, path string, bytes int64) {
  102. tb.RequestCount++
  103. tb.BytesTransferred += bytes
  104. // Use struct{} for zero-memory set operations
  105. tb.UniqueVisitors[ip] = struct{}{}
  106. // Optimized map operations
  107. tb.StatusCodes[status]++
  108. tb.Methods[method]++
  109. tb.Paths[path]++
  110. }
  111. // GetUniqueVisitorCount returns the count of unique visitors
  112. func (tb *TimeBucket) GetUniqueVisitorCount() int {
  113. return len(tb.UniqueVisitors)
  114. }
  115. // VisitorSetPool provides pooled visitor sets
  116. type VisitorSetPool struct {
  117. sets sync.Pool
  118. }
  119. // NewVisitorSetPool creates a new visitor set pool
  120. func NewVisitorSetPool() *VisitorSetPool {
  121. return &VisitorSetPool{
  122. sets: sync.Pool{
  123. New: func() interface{} {
  124. return make(map[string]struct{}, 1000)
  125. },
  126. },
  127. }
  128. }
  129. // Get retrieves a visitor set from the pool
  130. func (vsp *VisitorSetPool) Get() map[string]struct{} {
  131. return vsp.sets.Get().(map[string]struct{})
  132. }
  133. // Put returns a visitor set to the pool
  134. func (vsp *VisitorSetPool) Put(set map[string]struct{}) {
  135. // Clear the set
  136. for k := range set {
  137. delete(set, k)
  138. }
  139. vsp.sets.Put(set)
  140. }
  141. // TimeSeriesCache provides caching for time-series results
  142. type TimeSeriesCache struct {
  143. cache map[string]*CachedTimeSeriesResult
  144. maxSize int
  145. ttlSeconds int64
  146. mutex sync.RWMutex
  147. }
  148. // CachedTimeSeriesResult represents a cached time-series result
  149. type CachedTimeSeriesResult struct {
  150. Data interface{}
  151. Timestamp int64
  152. AccessCount int64
  153. }
  154. // NewTimeSeriesCache creates a new time-series cache
  155. func NewTimeSeriesCache(maxSize int, ttlSeconds int64) *TimeSeriesCache {
  156. return &TimeSeriesCache{
  157. cache: make(map[string]*CachedTimeSeriesResult),
  158. maxSize: maxSize,
  159. ttlSeconds: ttlSeconds,
  160. }
  161. }
  162. // Get retrieves a cached result
  163. func (tsc *TimeSeriesCache) Get(key string) (interface{}, bool) {
  164. tsc.mutex.RLock()
  165. result, exists := tsc.cache[key]
  166. tsc.mutex.RUnlock()
  167. if !exists {
  168. return nil, false
  169. }
  170. // Check TTL
  171. currentTime := getCurrentTimestamp()
  172. if currentTime-result.Timestamp > tsc.ttlSeconds {
  173. tsc.Delete(key)
  174. return nil, false
  175. }
  176. // Update access count atomically
  177. tsc.mutex.Lock()
  178. result.AccessCount++
  179. tsc.mutex.Unlock()
  180. return result.Data, true
  181. }
  182. // Put stores a result in the cache
  183. func (tsc *TimeSeriesCache) Put(key string, data interface{}) {
  184. tsc.mutex.Lock()
  185. defer tsc.mutex.Unlock()
  186. // Evict if at capacity
  187. if len(tsc.cache) >= tsc.maxSize {
  188. tsc.evictLRU()
  189. }
  190. tsc.cache[key] = &CachedTimeSeriesResult{
  191. Data: data,
  192. Timestamp: getCurrentTimestamp(),
  193. AccessCount: 1,
  194. }
  195. }
  196. // Delete removes a cached result
  197. func (tsc *TimeSeriesCache) Delete(key string) {
  198. tsc.mutex.Lock()
  199. defer tsc.mutex.Unlock()
  200. delete(tsc.cache, key)
  201. }
  202. // evictLRU removes the least recently used entry
  203. func (tsc *TimeSeriesCache) evictLRU() {
  204. var lruKey string
  205. var lruTimestamp int64 = ^int64(0) // Max int64
  206. for key, result := range tsc.cache {
  207. if result.Timestamp < lruTimestamp {
  208. lruTimestamp = result.Timestamp
  209. lruKey = key
  210. }
  211. }
  212. if lruKey != "" {
  213. delete(tsc.cache, lruKey)
  214. }
  215. }
  216. // getCurrentTimestamp returns current Unix timestamp
  217. func getCurrentTimestamp() int64 {
  218. return 1640995200 // Mock timestamp for testing
  219. }
  220. // OptimizedGetVisitorsByTime provides optimized visitors by time calculation
  221. func (otsp *OptimizedTimeSeriesProcessor) OptimizedGetVisitorsByTime(
  222. ctx context.Context,
  223. req *VisitorsByTimeRequest,
  224. s Searcher,
  225. ) (*VisitorsByTime, error) {
  226. // Check cache first
  227. cacheKey := generateCacheKey("visitors_by_time", req)
  228. if cached, found := otsp.resultCache.Get(cacheKey); found {
  229. return cached.(*VisitorsByTime), nil
  230. }
  231. // Prepare search request
  232. searchReq := &SearchRequest{
  233. StartTime: &req.StartTime,
  234. EndTime: &req.EndTime,
  235. LogPaths: req.LogPaths,
  236. Limit: 0,
  237. IncludeFacets: false,
  238. UseCache: true,
  239. }
  240. result, err := s.Search(ctx, searchReq)
  241. if err != nil {
  242. return nil, err
  243. }
  244. // Optimize interval calculation
  245. interval := int64(req.IntervalSeconds)
  246. if interval <= 0 {
  247. interval = 60 // Default 1 minute
  248. }
  249. // Get pooled bucket map
  250. bucketPool := otsp.getBucketPool(interval)
  251. buckets := bucketPool.Get()
  252. defer bucketPool.Put(buckets)
  253. // Process hits with optimized bucketing
  254. for _, hit := range result.Hits {
  255. if timestampField, ok := hit.Fields["timestamp"]; ok {
  256. if timestampFloat, ok := timestampField.(float64); ok {
  257. timestamp := int64(timestampFloat)
  258. bucketTime := (timestamp / interval) * interval
  259. // Get or create bucket
  260. bucket := buckets[bucketTime]
  261. if bucket == nil {
  262. bucket = NewTimeBucket(bucketTime)
  263. buckets[bucketTime] = bucket
  264. }
  265. // Add IP to unique visitors
  266. if ip, ok := hit.Fields["ip"].(string); ok {
  267. bucket.UniqueVisitors[ip] = struct{}{}
  268. }
  269. }
  270. }
  271. }
  272. // Convert to sorted result
  273. visitorsByTime := make([]TimeValue, 0, len(buckets))
  274. for _, bucket := range buckets {
  275. visitorsByTime = append(visitorsByTime, TimeValue{
  276. Timestamp: bucket.Timestamp,
  277. Value: len(bucket.UniqueVisitors),
  278. })
  279. }
  280. // Sort efficiently
  281. sort.Slice(visitorsByTime, func(i, j int) bool {
  282. return visitorsByTime[i].Timestamp < visitorsByTime[j].Timestamp
  283. })
  284. result_data := &VisitorsByTime{Data: visitorsByTime}
  285. // Cache the result
  286. otsp.resultCache.Put(cacheKey, result_data)
  287. return result_data, nil
  288. }
  289. // OptimizedGetTrafficByTime provides optimized traffic analytics
  290. func (otsp *OptimizedTimeSeriesProcessor) OptimizedGetTrafficByTime(
  291. ctx context.Context,
  292. req *TrafficByTimeRequest,
  293. s Searcher,
  294. ) (*TrafficByTime, error) {
  295. // Check cache first
  296. cacheKey := generateCacheKey("traffic_by_time", req)
  297. if cached, found := otsp.resultCache.Get(cacheKey); found {
  298. return cached.(*TrafficByTime), nil
  299. }
  300. searchReq := &SearchRequest{
  301. StartTime: &req.StartTime,
  302. EndTime: &req.EndTime,
  303. LogPaths: req.LogPaths,
  304. Limit: 0,
  305. IncludeStats: true,
  306. UseCache: true,
  307. }
  308. result, err := s.Search(ctx, searchReq)
  309. if err != nil {
  310. return nil, err
  311. }
  312. interval := int64(req.IntervalSeconds)
  313. if interval <= 0 {
  314. interval = 300 // Default 5 minutes
  315. }
  316. // Get pooled bucket map
  317. bucketPool := otsp.getBucketPool(interval)
  318. buckets := bucketPool.Get()
  319. defer bucketPool.Put(buckets)
  320. // Process hits with comprehensive metrics
  321. for _, hit := range result.Hits {
  322. if timestampField, ok := hit.Fields["timestamp"]; ok {
  323. if timestampFloat, ok := timestampField.(float64); ok {
  324. timestamp := int64(timestampFloat)
  325. bucketTime := (timestamp / interval) * interval
  326. bucket := buckets[bucketTime]
  327. if bucket == nil {
  328. bucket = NewTimeBucket(bucketTime)
  329. buckets[bucketTime] = bucket
  330. }
  331. // Extract fields efficiently
  332. var ip, method, path string
  333. var status int
  334. var bytes int64
  335. if v, ok := hit.Fields["ip"].(string); ok { ip = v }
  336. if v, ok := hit.Fields["method"].(string); ok { method = v }
  337. if v, ok := hit.Fields["path"].(string); ok { path = v }
  338. if v, ok := hit.Fields["status"].(float64); ok { status = int(v) }
  339. if v, ok := hit.Fields["bytes_sent"].(float64); ok { bytes = int64(v) }
  340. bucket.AddEntry(ip, status, method, path, bytes)
  341. }
  342. }
  343. }
  344. // Convert to result with comprehensive metrics
  345. trafficData := make([]TrafficTimeValue, 0, len(buckets))
  346. for _, bucket := range buckets {
  347. trafficData = append(trafficData, TrafficTimeValue{
  348. Timestamp: bucket.Timestamp,
  349. Requests: bucket.RequestCount,
  350. Bytes: bucket.BytesTransferred,
  351. UniqueVisitors: len(bucket.UniqueVisitors),
  352. })
  353. }
  354. // Sort by timestamp
  355. sort.Slice(trafficData, func(i, j int) bool {
  356. return trafficData[i].Timestamp < trafficData[j].Timestamp
  357. })
  358. result_data := &TrafficByTime{Data: trafficData}
  359. // Cache the result
  360. otsp.resultCache.Put(cacheKey, result_data)
  361. return result_data, nil
  362. }
  363. // HyperLogLog provides cardinality estimation for unique visitors
  364. type HyperLogLog struct {
  365. buckets []uint8
  366. b uint8 // number of bits for bucket index
  367. m uint32 // number of buckets (2^b)
  368. }
  369. // NewHyperLogLog creates a new HyperLogLog counter
  370. func NewHyperLogLog(precision uint8) *HyperLogLog {
  371. b := precision
  372. m := uint32(1) << b
  373. return &HyperLogLog{
  374. buckets: make([]uint8, m),
  375. b: b,
  376. m: m,
  377. }
  378. }
  379. // Add adds a value to the HyperLogLog
  380. func (hll *HyperLogLog) Add(value string) {
  381. hash := hashString(value)
  382. j := hash >> (32 - hll.b) // first b bits
  383. w := hash << hll.b // remaining bits
  384. // Count leading zeros + 1
  385. lz := countLeadingZeros(w) + 1
  386. if lz > uint8(32-hll.b) {
  387. lz = uint8(32 - hll.b)
  388. }
  389. if lz > hll.buckets[j] {
  390. hll.buckets[j] = lz
  391. }
  392. }
  393. // Count estimates the cardinality
  394. func (hll *HyperLogLog) Count() uint64 {
  395. rawEstimate := hll.alpha() * float64(hll.m*hll.m) / hll.sumOfPowers()
  396. if rawEstimate <= 2.5*float64(hll.m) {
  397. // Small range correction
  398. zeros := 0
  399. for _, bucket := range hll.buckets {
  400. if bucket == 0 {
  401. zeros++
  402. }
  403. }
  404. if zeros != 0 {
  405. return uint64(float64(hll.m) * logValue(float64(hll.m)/float64(zeros)))
  406. }
  407. }
  408. return uint64(rawEstimate)
  409. }
  410. // Helper functions for HyperLogLog
  411. func (hll *HyperLogLog) alpha() float64 {
  412. switch hll.m {
  413. case 16:
  414. return 0.673
  415. case 32:
  416. return 0.697
  417. case 64:
  418. return 0.709
  419. default:
  420. return 0.7213 / (1.0 + 1.079/float64(hll.m))
  421. }
  422. }
  423. func (hll *HyperLogLog) sumOfPowers() float64 {
  424. sum := 0.0
  425. for _, bucket := range hll.buckets {
  426. sum += 1.0 / float64(uint32(1)<<bucket)
  427. }
  428. return sum
  429. }
  430. // Simple hash function for strings
  431. func hashString(s string) uint32 {
  432. var hash uint32 = 2166136261
  433. for i := 0; i < len(s); i++ {
  434. hash ^= uint32(s[i])
  435. hash *= 16777619
  436. }
  437. return hash
  438. }
  439. // Count leading zeros in a 32-bit integer
  440. func countLeadingZeros(x uint32) uint8 {
  441. if x == 0 {
  442. return 32
  443. }
  444. n := uint8(0)
  445. if x <= 0x0000FFFF {
  446. n += 16
  447. x <<= 16
  448. }
  449. if x <= 0x00FFFFFF {
  450. n += 8
  451. x <<= 8
  452. }
  453. if x <= 0x0FFFFFFF {
  454. n += 4
  455. x <<= 4
  456. }
  457. if x <= 0x3FFFFFFF {
  458. n += 2
  459. x <<= 2
  460. }
  461. if x <= 0x7FFFFFFF {
  462. n += 1
  463. }
  464. return n
  465. }
  466. // Simple log function
  467. func logValue(x float64) float64 {
  468. // Approximation of natural logarithm for HLL correction
  469. if x <= 0 {
  470. return 0
  471. }
  472. return 0.693147 * float64(32-countLeadingZeros(uint32(x))) // Rough approximation
  473. }
  474. // getBucketPool gets or creates a bucket pool for the given interval
  475. func (otsp *OptimizedTimeSeriesProcessor) getBucketPool(interval int64) *BucketPool {
  476. otsp.mutex.RLock()
  477. pool, exists := otsp.bucketPools[interval]
  478. otsp.mutex.RUnlock()
  479. if !exists {
  480. otsp.mutex.Lock()
  481. // Double-check after acquiring write lock
  482. if pool, exists = otsp.bucketPools[interval]; !exists {
  483. pool = NewBucketPool()
  484. otsp.bucketPools[interval] = pool
  485. }
  486. otsp.mutex.Unlock()
  487. }
  488. return pool
  489. }
  490. // generateCacheKey generates a cache key from request parameters
  491. func generateCacheKey(prefix string, req interface{}) string {
  492. // Simple cache key generation - in production, use a proper hash
  493. return prefix + "_cache_key"
  494. }
  495. // Additional types for comprehensive traffic analytics
  496. type TrafficByTimeRequest struct {
  497. StartTime int64
  498. EndTime int64
  499. LogPaths []string
  500. IntervalSeconds int
  501. }
  502. type TrafficByTime struct {
  503. Data []TrafficTimeValue `json:"data"`
  504. }
  505. type TrafficTimeValue struct {
  506. Timestamp int64 `json:"timestamp"`
  507. Requests int64 `json:"requests"`
  508. Bytes int64 `json:"bytes"`
  509. UniqueVisitors int `json:"unique_visitors"`
  510. }
  511. // AdvancedTimeSeriesProcessor provides advanced analytics with ML-like features
  512. type AdvancedTimeSeriesProcessor struct {
  513. *OptimizedTimeSeriesProcessor
  514. anomalyThreshold float64
  515. trendWindow int
  516. }
  517. // NewAdvancedTimeSeriesProcessor creates an advanced processor
  518. func NewAdvancedTimeSeriesProcessor() *AdvancedTimeSeriesProcessor {
  519. return &AdvancedTimeSeriesProcessor{
  520. OptimizedTimeSeriesProcessor: NewOptimizedTimeSeriesProcessor(),
  521. anomalyThreshold: 2.0, // 2 standard deviations
  522. trendWindow: 10, // 10 data points for trend
  523. }
  524. }
  525. // DetectAnomalies detects anomalies in time-series data
  526. func (atsp *AdvancedTimeSeriesProcessor) DetectAnomalies(data []TimeValue) []AnomalyPoint {
  527. if len(data) < 3 {
  528. return nil
  529. }
  530. // Calculate moving average and standard deviation
  531. anomalies := make([]AnomalyPoint, 0)
  532. windowSize := 5
  533. for i := windowSize; i < len(data); i++ {
  534. // Calculate stats for window
  535. sum, sumSq := 0.0, 0.0
  536. for j := i - windowSize; j < i; j++ {
  537. val := float64(data[j].Value)
  538. sum += val
  539. sumSq += val * val
  540. }
  541. mean := sum / float64(windowSize)
  542. variance := (sumSq / float64(windowSize)) - (mean * mean)
  543. stdDev := variance * 0.5 // Approximate square root
  544. // Check if current value is anomalous
  545. currentVal := float64(data[i].Value)
  546. deviation := currentVal - mean
  547. if deviation < 0 {
  548. deviation = -deviation
  549. }
  550. if stdDev > 0 && deviation > atsp.anomalyThreshold*stdDev {
  551. anomalies = append(anomalies, AnomalyPoint{
  552. Timestamp: data[i].Timestamp,
  553. Value: data[i].Value,
  554. Expected: int(mean),
  555. Deviation: deviation / stdDev,
  556. })
  557. }
  558. }
  559. return anomalies
  560. }
  561. // AnomalyPoint represents a detected anomaly
  562. type AnomalyPoint struct {
  563. Timestamp int64 `json:"timestamp"`
  564. Value int `json:"value"`
  565. Expected int `json:"expected"`
  566. Deviation float64 `json:"deviation"`
  567. }
  568. // CalculateTrend calculates trend direction and strength
  569. func (atsp *AdvancedTimeSeriesProcessor) CalculateTrend(data []TimeValue) TrendAnalysis {
  570. if len(data) < 2 {
  571. return TrendAnalysis{Direction: "insufficient_data"}
  572. }
  573. // Simple linear regression for trend
  574. n := float64(len(data))
  575. sumX, sumY, sumXY, sumXX := 0.0, 0.0, 0.0, 0.0
  576. for i, point := range data {
  577. x := float64(i)
  578. y := float64(point.Value)
  579. sumX += x
  580. sumY += y
  581. sumXY += x * y
  582. sumXX += x * x
  583. }
  584. // Calculate slope
  585. slope := (n*sumXY - sumX*sumY) / (n*sumXX - sumX*sumX)
  586. // Determine trend direction and strength
  587. direction := "stable"
  588. if slope > 0.1 {
  589. direction = "increasing"
  590. } else if slope < -0.1 {
  591. direction = "decreasing"
  592. }
  593. // Calculate trend strength (simplified R-squared approximation)
  594. strength := slope * slope / (slope*slope + 1) // Normalize to 0-1
  595. return TrendAnalysis{
  596. Direction: direction,
  597. Strength: strength,
  598. Slope: slope,
  599. }
  600. }
  601. // TrendAnalysis represents trend analysis results
  602. type TrendAnalysis struct {
  603. Direction string `json:"direction"`
  604. Strength float64 `json:"strength"`
  605. Slope float64 `json:"slope"`
  606. }