parallel_indexer.go 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. package indexer
  2. import (
  3. "bufio"
  4. "compress/gzip"
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "sync/atomic"
  14. "time"
  15. "github.com/blevesearch/bleve/v2"
  16. "github.com/uozi-tech/cosy/logger"
  17. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  18. )
  19. // ParallelIndexer provides high-performance parallel indexing with sharding
  20. type ParallelIndexer struct {
  21. config *Config
  22. shardManager ShardManager
  23. metrics MetricsCollector
  24. // Worker management
  25. workers []*indexWorker
  26. jobQueue chan *IndexJob
  27. resultQueue chan *IndexResult
  28. // State management
  29. ctx context.Context
  30. cancel context.CancelFunc
  31. wg sync.WaitGroup
  32. running int32
  33. // Cleanup control
  34. stopOnce sync.Once
  35. channelsClosed int32
  36. // Statistics
  37. stats *IndexStats
  38. statsMutex sync.RWMutex
  39. // Optimization
  40. lastOptimized int64
  41. optimizing int32
  42. adaptiveOptimizer *AdaptiveOptimizer
  43. zeroAllocProcessor *ZeroAllocBatchProcessor
  44. optimizationEnabled bool
  45. // Dynamic shard awareness
  46. dynamicAwareness *DynamicShardAwareness
  47. }
  48. // indexWorker represents a single indexing worker
  49. type indexWorker struct {
  50. id int
  51. indexer *ParallelIndexer
  52. stats *WorkerStats
  53. statsMutex sync.RWMutex
  54. }
  55. // NewParallelIndexer creates a new parallel indexer with dynamic shard awareness
  56. func NewParallelIndexer(config *Config, shardManager ShardManager) *ParallelIndexer {
  57. if config == nil {
  58. config = DefaultIndexerConfig()
  59. }
  60. ctx, cancel := context.WithCancel(context.Background())
  61. // Initialize dynamic shard awareness
  62. dynamicAwareness := NewDynamicShardAwareness(config)
  63. // If no shard manager provided, use dynamic awareness to detect optimal type
  64. var actualShardManager ShardManager
  65. if shardManager == nil {
  66. detected, err := dynamicAwareness.DetectAndSetupShardManager()
  67. if err != nil {
  68. logger.Warnf("Failed to setup dynamic shard manager, using default: %v", err)
  69. detected = NewDefaultShardManager(config)
  70. detected.(*DefaultShardManager).Initialize()
  71. }
  72. // Type assertion to ShardManager interface
  73. if sm, ok := detected.(ShardManager); ok {
  74. actualShardManager = sm
  75. } else {
  76. // Fallback to default
  77. actualShardManager = NewDefaultShardManager(config)
  78. actualShardManager.(*DefaultShardManager).Initialize()
  79. }
  80. } else {
  81. actualShardManager = shardManager
  82. }
  83. indexer := &ParallelIndexer{
  84. config: config,
  85. shardManager: actualShardManager,
  86. metrics: NewDefaultMetricsCollector(),
  87. jobQueue: make(chan *IndexJob, config.MaxQueueSize),
  88. resultQueue: make(chan *IndexResult, config.WorkerCount),
  89. ctx: ctx,
  90. cancel: cancel,
  91. stats: &IndexStats{
  92. WorkerStats: make([]*WorkerStats, config.WorkerCount),
  93. },
  94. adaptiveOptimizer: NewAdaptiveOptimizer(config),
  95. zeroAllocProcessor: NewZeroAllocBatchProcessor(config),
  96. optimizationEnabled: true, // Enable optimizations by default
  97. dynamicAwareness: dynamicAwareness,
  98. }
  99. // Initialize workers
  100. indexer.workers = make([]*indexWorker, config.WorkerCount)
  101. for i := 0; i < config.WorkerCount; i++ {
  102. indexer.workers[i] = &indexWorker{
  103. id: i,
  104. indexer: indexer,
  105. stats: &WorkerStats{
  106. ID: i,
  107. Status: WorkerStatusIdle,
  108. },
  109. }
  110. indexer.stats.WorkerStats[i] = indexer.workers[i].stats
  111. }
  112. return indexer
  113. }
  114. // Start begins the indexer operation
  115. func (pi *ParallelIndexer) Start(ctx context.Context) error {
  116. if !atomic.CompareAndSwapInt32(&pi.running, 0, 1) {
  117. return fmt.Errorf("indexer not started")
  118. }
  119. // Initialize shard manager
  120. if err := pi.shardManager.Initialize(); err != nil {
  121. atomic.StoreInt32(&pi.running, 0)
  122. return fmt.Errorf("failed to initialize shard manager: %w", err)
  123. }
  124. // Start workers
  125. for _, worker := range pi.workers {
  126. pi.wg.Add(1)
  127. go worker.run()
  128. }
  129. // Start result processor
  130. pi.wg.Add(1)
  131. go pi.processResults()
  132. // Start optimization routine if enabled
  133. if pi.config.OptimizeInterval > 0 {
  134. pi.wg.Add(1)
  135. go pi.optimizationRoutine()
  136. }
  137. // Start metrics collection if enabled
  138. if pi.config.EnableMetrics {
  139. pi.wg.Add(1)
  140. go pi.metricsRoutine()
  141. }
  142. // Start adaptive optimizer if enabled
  143. if pi.optimizationEnabled && pi.adaptiveOptimizer != nil {
  144. if err := pi.adaptiveOptimizer.Start(); err != nil {
  145. logger.Warnf("Failed to start adaptive optimizer: %v", err)
  146. }
  147. }
  148. // Start dynamic shard awareness monitoring if enabled
  149. if pi.dynamicAwareness != nil {
  150. pi.dynamicAwareness.StartMonitoring(ctx)
  151. if pi.dynamicAwareness.IsDynamic() {
  152. logger.Info("Dynamic shard management is active with automatic scaling")
  153. } else {
  154. logger.Info("Static shard management is active")
  155. }
  156. }
  157. return nil
  158. }
  159. // Stop gracefully stops the indexer
  160. func (pi *ParallelIndexer) Stop() error {
  161. var stopErr error
  162. pi.stopOnce.Do(func() {
  163. // Set running to 0
  164. if !atomic.CompareAndSwapInt32(&pi.running, 1, 0) {
  165. logger.Warnf("[ParallelIndexer] Stop called but indexer already stopped")
  166. stopErr = fmt.Errorf("indexer already stopped")
  167. return
  168. }
  169. // Cancel context to stop all routines
  170. pi.cancel()
  171. // Stop adaptive optimizer
  172. if pi.adaptiveOptimizer != nil {
  173. pi.adaptiveOptimizer.Stop()
  174. }
  175. // Close channels safely if they haven't been closed yet
  176. if atomic.CompareAndSwapInt32(&pi.channelsClosed, 0, 1) {
  177. // Close job queue to stop accepting new jobs
  178. close(pi.jobQueue)
  179. // Wait for all workers to finish
  180. pi.wg.Wait()
  181. // Close result queue
  182. close(pi.resultQueue)
  183. } else {
  184. // If channels are already closed, just wait for workers
  185. pi.wg.Wait()
  186. }
  187. // Skip flush during stop - shards may already be closed by searcher
  188. // FlushAll should be called before Stop() if needed
  189. // Close the shard manager - this will close all shards and stop Bleve worker goroutines
  190. // This is critical to prevent goroutine leaks from Bleve's internal workers
  191. if pi.shardManager != nil {
  192. if err := pi.shardManager.Close(); err != nil {
  193. logger.Errorf("Failed to close shard manager: %v", err)
  194. stopErr = err
  195. }
  196. }
  197. })
  198. return stopErr
  199. }
  200. // IndexDocument indexes a single document
  201. func (pi *ParallelIndexer) IndexDocument(ctx context.Context, doc *Document) error {
  202. return pi.IndexDocuments(ctx, []*Document{doc})
  203. }
  204. // IndexDocuments indexes multiple documents
  205. func (pi *ParallelIndexer) IndexDocuments(ctx context.Context, docs []*Document) error {
  206. if !pi.IsHealthy() {
  207. return fmt.Errorf("indexer not started")
  208. }
  209. if len(docs) == 0 {
  210. return nil
  211. }
  212. // Create job
  213. job := &IndexJob{
  214. Documents: docs,
  215. Priority: PriorityNormal,
  216. }
  217. // Submit job and wait for completion
  218. done := make(chan error, 1)
  219. job.Callback = func(err error) {
  220. done <- err
  221. }
  222. select {
  223. case pi.jobQueue <- job:
  224. select {
  225. case err := <-done:
  226. return err
  227. case <-ctx.Done():
  228. return ctx.Err()
  229. }
  230. case <-ctx.Done():
  231. return ctx.Err()
  232. case <-pi.ctx.Done():
  233. return fmt.Errorf("indexer stopped")
  234. }
  235. }
  236. // IndexDocumentAsync indexes a document asynchronously
  237. func (pi *ParallelIndexer) IndexDocumentAsync(doc *Document, callback func(error)) {
  238. pi.IndexDocumentsAsync([]*Document{doc}, callback)
  239. }
  240. // IndexDocumentsAsync indexes multiple documents asynchronously
  241. func (pi *ParallelIndexer) IndexDocumentsAsync(docs []*Document, callback func(error)) {
  242. if !pi.IsHealthy() {
  243. if callback != nil {
  244. callback(fmt.Errorf("indexer not started"))
  245. }
  246. return
  247. }
  248. if len(docs) == 0 {
  249. if callback != nil {
  250. callback(nil)
  251. }
  252. return
  253. }
  254. job := &IndexJob{
  255. Documents: docs,
  256. Priority: PriorityNormal,
  257. Callback: callback,
  258. }
  259. select {
  260. case pi.jobQueue <- job:
  261. // Job queued successfully
  262. case <-pi.ctx.Done():
  263. if callback != nil {
  264. callback(fmt.Errorf("indexer stopped"))
  265. }
  266. default:
  267. // Queue is full
  268. if callback != nil {
  269. callback(fmt.Errorf("queue is full"))
  270. }
  271. }
  272. }
  273. // StartBatch returns a new batch writer with adaptive batch size
  274. func (pi *ParallelIndexer) StartBatch() BatchWriterInterface {
  275. batchSize := pi.config.BatchSize
  276. if pi.adaptiveOptimizer != nil {
  277. batchSize = pi.adaptiveOptimizer.GetOptimalBatchSize()
  278. }
  279. return NewBatchWriter(pi, batchSize)
  280. }
  281. // GetOptimizationStats returns current optimization statistics
  282. func (pi *ParallelIndexer) GetOptimizationStats() AdaptiveOptimizationStats {
  283. if pi.adaptiveOptimizer != nil {
  284. return pi.adaptiveOptimizer.GetOptimizationStats()
  285. }
  286. return AdaptiveOptimizationStats{}
  287. }
  288. // GetPoolStats returns object pool statistics
  289. func (pi *ParallelIndexer) GetPoolStats() PoolStats {
  290. if pi.zeroAllocProcessor != nil {
  291. return pi.zeroAllocProcessor.GetPoolStats()
  292. }
  293. return PoolStats{}
  294. }
  295. // EnableOptimizations enables or disables adaptive optimizations
  296. func (pi *ParallelIndexer) EnableOptimizations(enabled bool) {
  297. pi.optimizationEnabled = enabled
  298. if !enabled && pi.adaptiveOptimizer != nil {
  299. pi.adaptiveOptimizer.Stop()
  300. } else if enabled && pi.adaptiveOptimizer != nil && atomic.LoadInt32(&pi.running) == 1 {
  301. pi.adaptiveOptimizer.Start()
  302. }
  303. }
  304. // GetDynamicShardInfo returns information about dynamic shard management
  305. func (pi *ParallelIndexer) GetDynamicShardInfo() *DynamicShardInfo {
  306. if pi.dynamicAwareness == nil {
  307. return &DynamicShardInfo{
  308. IsEnabled: false,
  309. IsActive: false,
  310. ShardCount: pi.config.ShardCount,
  311. ShardType: "static",
  312. }
  313. }
  314. isDynamic := pi.dynamicAwareness.IsDynamic()
  315. shardManager := pi.dynamicAwareness.GetCurrentShardManager()
  316. info := &DynamicShardInfo{
  317. IsEnabled: true,
  318. IsActive: isDynamic,
  319. ShardCount: pi.config.ShardCount,
  320. ShardType: "static",
  321. }
  322. if isDynamic {
  323. info.ShardType = "dynamic"
  324. if enhancedManager, ok := shardManager.(*EnhancedDynamicShardManager); ok {
  325. info.TargetShardCount = enhancedManager.GetTargetShardCount()
  326. info.IsScaling = enhancedManager.IsScalingInProgress()
  327. info.AutoScaleEnabled = enhancedManager.IsAutoScaleEnabled()
  328. // Get scaling recommendation
  329. recommendation := enhancedManager.GetScalingRecommendations()
  330. info.Recommendation = recommendation
  331. // Get shard health
  332. info.ShardHealth = enhancedManager.GetShardHealth()
  333. }
  334. }
  335. // Get performance analysis
  336. analysis := pi.dynamicAwareness.GetPerformanceAnalysis()
  337. info.PerformanceAnalysis = &analysis
  338. return info
  339. }
  340. // DynamicShardInfo contains information about dynamic shard management status
  341. type DynamicShardInfo struct {
  342. IsEnabled bool `json:"is_enabled"`
  343. IsActive bool `json:"is_active"`
  344. ShardType string `json:"shard_type"` // "static" or "dynamic"
  345. ShardCount int `json:"shard_count"`
  346. TargetShardCount int `json:"target_shard_count,omitempty"`
  347. IsScaling bool `json:"is_scaling,omitempty"`
  348. AutoScaleEnabled bool `json:"auto_scale_enabled,omitempty"`
  349. Recommendation *ScalingRecommendation `json:"recommendation,omitempty"`
  350. ShardHealth map[int]*ShardHealthStatus `json:"shard_health,omitempty"`
  351. PerformanceAnalysis *PerformanceAnalysis `json:"performance_analysis,omitempty"`
  352. }
  353. // FlushAll flushes all pending operations
  354. func (pi *ParallelIndexer) FlushAll() error {
  355. // Check if indexer is still running
  356. if atomic.LoadInt32(&pi.running) != 1 {
  357. return fmt.Errorf("indexer not running")
  358. }
  359. // Get all shards and flush them
  360. shards := pi.shardManager.GetAllShards()
  361. var errs []error
  362. for i, shard := range shards {
  363. if shard == nil {
  364. continue
  365. }
  366. // Force flush by creating and immediately deleting a temporary document
  367. batch := shard.NewBatch()
  368. // Use efficient string building instead of fmt.Sprintf
  369. tempIDBuf := make([]byte, 0, 64)
  370. tempIDBuf = append(tempIDBuf, "_flush_temp_"...)
  371. tempIDBuf = utils.AppendInt(tempIDBuf, i)
  372. tempIDBuf = append(tempIDBuf, '_')
  373. tempIDBuf = utils.AppendInt(tempIDBuf, int(time.Now().UnixNano()))
  374. tempID := utils.BytesToStringUnsafe(tempIDBuf)
  375. batch.Index(tempID, map[string]interface{}{"_temp": true})
  376. if err := shard.Batch(batch); err != nil {
  377. errs = append(errs, fmt.Errorf("failed to flush shard %d: %w", i, err))
  378. continue
  379. }
  380. // Delete the temporary document
  381. shard.Delete(tempID)
  382. }
  383. if len(errs) > 0 {
  384. return fmt.Errorf("flush errors: %v", errs)
  385. }
  386. return nil
  387. }
  388. // Optimize triggers optimization of all shards
  389. func (pi *ParallelIndexer) Optimize() error {
  390. if !atomic.CompareAndSwapInt32(&pi.optimizing, 0, 1) {
  391. return fmt.Errorf("optimization already in progress")
  392. }
  393. defer atomic.StoreInt32(&pi.optimizing, 0)
  394. startTime := time.Now()
  395. stats := pi.shardManager.GetShardStats()
  396. var errs []error
  397. for _, stat := range stats {
  398. if err := pi.shardManager.OptimizeShard(stat.ID); err != nil {
  399. errs = append(errs, fmt.Errorf("failed to optimize shard %d: %w", stat.ID, err))
  400. }
  401. }
  402. // Update optimization stats
  403. pi.statsMutex.Lock()
  404. if pi.stats.OptimizationStats == nil {
  405. pi.stats.OptimizationStats = &OptimizationStats{}
  406. }
  407. pi.stats.OptimizationStats.LastRun = time.Now().Unix()
  408. pi.stats.OptimizationStats.Duration = time.Since(startTime)
  409. pi.stats.OptimizationStats.Success = len(errs) == 0
  410. pi.stats.LastOptimized = time.Now().Unix()
  411. pi.statsMutex.Unlock()
  412. atomic.StoreInt64(&pi.lastOptimized, time.Now().Unix())
  413. if len(errs) > 0 {
  414. return fmt.Errorf("optimization errors: %v", errs)
  415. }
  416. // Record optimization metrics
  417. pi.metrics.RecordOptimization(time.Since(startTime), len(errs) == 0)
  418. return nil
  419. }
  420. // IndexLogFile reads and indexes a single log file
  421. func (pi *ParallelIndexer) IndexLogFile(filePath string) error {
  422. if !pi.IsHealthy() {
  423. return fmt.Errorf("indexer not healthy")
  424. }
  425. file, err := os.Open(filePath)
  426. if err != nil {
  427. return fmt.Errorf("failed to open log file %s: %w", filePath, err)
  428. }
  429. defer file.Close()
  430. // Use a batch writer for efficient indexing
  431. batch := pi.StartBatch()
  432. scanner := bufio.NewScanner(file)
  433. docCount := 0
  434. for scanner.Scan() {
  435. line := scanner.Text()
  436. if line == "" {
  437. continue
  438. }
  439. // In a real implementation, parse the log line into a structured format
  440. // For now, we create a simple document
  441. logDoc, err := ParseLogLine(line) // Assuming a parser function exists
  442. if err != nil {
  443. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  444. continue
  445. }
  446. logDoc.FilePath = filePath
  447. // Use efficient string building for document ID
  448. docIDBuf := make([]byte, 0, len(filePath)+16)
  449. docIDBuf = append(docIDBuf, filePath...)
  450. docIDBuf = append(docIDBuf, '-')
  451. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  452. doc := &Document{
  453. ID: utils.BytesToStringUnsafe(docIDBuf),
  454. Fields: logDoc,
  455. }
  456. if err := batch.Add(doc); err != nil {
  457. // This indicates an auto-flush occurred and failed.
  458. // Log the error and stop processing this file to avoid further issues.
  459. return fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  460. }
  461. docCount++
  462. }
  463. if err := scanner.Err(); err != nil {
  464. return fmt.Errorf("error reading log file %s: %w", filePath, err)
  465. }
  466. if _, err := batch.Flush(); err != nil {
  467. return fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  468. }
  469. return nil
  470. }
  471. // GetStats returns current indexer statistics
  472. func (pi *ParallelIndexer) GetStats() *IndexStats {
  473. pi.statsMutex.RLock()
  474. defer pi.statsMutex.RUnlock()
  475. // Update shard stats
  476. shardStats := pi.shardManager.GetShardStats()
  477. pi.stats.Shards = shardStats
  478. pi.stats.ShardCount = len(shardStats)
  479. var totalDocs uint64
  480. var totalSize int64
  481. for _, shard := range shardStats {
  482. totalDocs += shard.DocumentCount
  483. totalSize += shard.Size
  484. }
  485. pi.stats.TotalDocuments = totalDocs
  486. pi.stats.TotalSize = totalSize
  487. pi.stats.QueueSize = len(pi.jobQueue)
  488. // Calculate memory usage
  489. var memStats runtime.MemStats
  490. runtime.ReadMemStats(&memStats)
  491. pi.stats.MemoryUsage = int64(memStats.Alloc)
  492. // Copy stats to avoid race conditions
  493. statsCopy := *pi.stats
  494. return &statsCopy
  495. }
  496. // IsRunning returns whether the indexer is currently running
  497. func (pi *ParallelIndexer) IsRunning() bool {
  498. return atomic.LoadInt32(&pi.running) != 0
  499. }
  500. // GetShardInfo returns information about a specific shard
  501. func (pi *ParallelIndexer) GetShardInfo(shardID int) (*ShardInfo, error) {
  502. shardStats := pi.shardManager.GetShardStats()
  503. for _, stat := range shardStats {
  504. if stat.ID == shardID {
  505. return stat, nil
  506. }
  507. }
  508. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, shardID)
  509. }
  510. // IsHealthy checks if the indexer is running and healthy
  511. func (pi *ParallelIndexer) IsHealthy() bool {
  512. if atomic.LoadInt32(&pi.running) != 1 {
  513. return false
  514. }
  515. // Check shard manager health
  516. return pi.shardManager.HealthCheck() == nil
  517. }
  518. // GetConfig returns the current configuration
  519. func (pi *ParallelIndexer) GetConfig() *Config {
  520. return pi.config
  521. }
  522. // GetAllShards returns all managed shards
  523. func (pi *ParallelIndexer) GetAllShards() []bleve.Index {
  524. return pi.shardManager.GetAllShards()
  525. }
  526. // DeleteIndexByLogGroup deletes all index entries for a specific log group (base path and its rotated files)
  527. func (pi *ParallelIndexer) DeleteIndexByLogGroup(basePath string, logFileManager interface{}) error {
  528. if !pi.IsHealthy() {
  529. return fmt.Errorf("indexer not healthy")
  530. }
  531. // Get all file paths for this log group from the database
  532. if logFileManager == nil {
  533. return fmt.Errorf("log file manager is required")
  534. }
  535. lfm, ok := logFileManager.(GroupFileProvider)
  536. if !ok {
  537. return fmt.Errorf("log file manager does not support GetFilePathsForGroup")
  538. }
  539. filesToDelete, err := lfm.GetFilePathsForGroup(basePath)
  540. if err != nil {
  541. return fmt.Errorf("failed to get file paths for log group %s: %w", basePath, err)
  542. }
  543. logger.Infof("Deleting index entries for log group %s, files: %v", basePath, filesToDelete)
  544. // Delete documents from all shards for these files
  545. shards := pi.shardManager.GetAllShards()
  546. var deleteErrors []error
  547. for _, shard := range shards {
  548. // Search for documents with matching file_path
  549. for _, filePath := range filesToDelete {
  550. query := bleve.NewTermQuery(filePath)
  551. query.SetField("file_path")
  552. searchRequest := bleve.NewSearchRequest(query)
  553. searchRequest.Size = 1000 // Process in batches
  554. searchRequest.Fields = []string{"file_path"}
  555. for {
  556. searchResult, err := shard.Search(searchRequest)
  557. if err != nil {
  558. deleteErrors = append(deleteErrors, fmt.Errorf("failed to search for documents in file %s: %w", filePath, err))
  559. break
  560. }
  561. if len(searchResult.Hits) == 0 {
  562. break // No more documents to delete
  563. }
  564. // Delete documents in batch
  565. batch := shard.NewBatch()
  566. for _, hit := range searchResult.Hits {
  567. batch.Delete(hit.ID)
  568. }
  569. if err := shard.Batch(batch); err != nil {
  570. deleteErrors = append(deleteErrors, fmt.Errorf("failed to delete batch for file %s: %w", filePath, err))
  571. }
  572. // If we got fewer results than requested, we're done
  573. if len(searchResult.Hits) < searchRequest.Size {
  574. break
  575. }
  576. // Continue from where we left off
  577. searchRequest.From += searchRequest.Size
  578. }
  579. }
  580. }
  581. if len(deleteErrors) > 0 {
  582. return fmt.Errorf("encountered %d errors during deletion: %v", len(deleteErrors), deleteErrors[0])
  583. }
  584. logger.Infof("Successfully deleted index entries for log group: %s", basePath)
  585. return nil
  586. }
  587. // DestroyAllIndexes closes and deletes all index data from disk.
  588. func (pi *ParallelIndexer) DestroyAllIndexes(parentCtx context.Context) error {
  589. // Stop all background routines before deleting files
  590. pi.cancel()
  591. pi.wg.Wait()
  592. // Safely close channels if they haven't been closed yet
  593. if atomic.CompareAndSwapInt32(&pi.channelsClosed, 0, 1) {
  594. close(pi.jobQueue)
  595. close(pi.resultQueue)
  596. }
  597. atomic.StoreInt32(&pi.running, 0) // Mark as not running
  598. var destructionErr error
  599. if manager, ok := pi.shardManager.(*DefaultShardManager); ok {
  600. destructionErr = manager.Destroy()
  601. } else {
  602. destructionErr = fmt.Errorf("shard manager does not support destruction")
  603. }
  604. // Re-initialize context and channels for a potential restart using parent context
  605. pi.ctx, pi.cancel = context.WithCancel(parentCtx)
  606. pi.jobQueue = make(chan *IndexJob, pi.config.MaxQueueSize)
  607. pi.resultQueue = make(chan *IndexResult, pi.config.WorkerCount)
  608. atomic.StoreInt32(&pi.channelsClosed, 0) // Reset the channel closed flag
  609. return destructionErr
  610. }
  611. // IndexLogGroup finds all files related to a base log path (e.g., rotated logs) and indexes them.
  612. // It returns a map of [filePath -> docCount], and the min/max timestamps found.
  613. func (pi *ParallelIndexer) IndexLogGroup(basePath string) (map[string]uint64, *time.Time, *time.Time, error) {
  614. if !pi.IsHealthy() {
  615. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  616. }
  617. // Find all files belonging to this log group by globbing
  618. globPath := basePath + "*"
  619. matches, err := filepath.Glob(globPath)
  620. if err != nil {
  621. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  622. }
  623. // filepath.Glob might not match the base file itself if it has no extension,
  624. // so we check for it explicitly and add it to the list.
  625. info, err := os.Stat(basePath)
  626. if err == nil && info.Mode().IsRegular() {
  627. matches = append(matches, basePath)
  628. }
  629. // Deduplicate file list
  630. seen := make(map[string]struct{})
  631. uniqueFiles := make([]string, 0)
  632. for _, match := range matches {
  633. if _, ok := seen[match]; !ok {
  634. // Further check if it's a file, not a directory. Glob can match dirs.
  635. info, err := os.Stat(match)
  636. if err == nil && info.Mode().IsRegular() {
  637. seen[match] = struct{}{}
  638. uniqueFiles = append(uniqueFiles, match)
  639. }
  640. }
  641. }
  642. if len(uniqueFiles) == 0 {
  643. logger.Warnf("No actual log file found for group: %s", basePath)
  644. return nil, nil, nil, nil
  645. }
  646. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  647. docsCountMap := make(map[string]uint64)
  648. var overallMinTime, overallMaxTime *time.Time
  649. for _, filePath := range uniqueFiles {
  650. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  651. if err != nil {
  652. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  653. continue // Continue with the next file
  654. }
  655. docsCountMap[filePath] = docsIndexed
  656. if minTime != nil {
  657. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  658. overallMinTime = minTime
  659. }
  660. }
  661. if maxTime != nil {
  662. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  663. overallMaxTime = maxTime
  664. }
  665. }
  666. }
  667. return docsCountMap, overallMinTime, overallMaxTime, nil
  668. }
  669. // indexSingleFile contains the logic to process one physical log file.
  670. // It returns the number of documents indexed from the file, and the min/max timestamps.
  671. func (pi *ParallelIndexer) indexSingleFile(filePath string) (uint64, *time.Time, *time.Time, error) {
  672. file, err := os.Open(filePath)
  673. if err != nil {
  674. return 0, nil, nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
  675. }
  676. defer file.Close()
  677. var reader io.Reader = file
  678. // Handle gzipped files
  679. if strings.HasSuffix(filePath, ".gz") {
  680. gz, err := gzip.NewReader(file)
  681. if err != nil {
  682. return 0, nil, nil, fmt.Errorf("failed to create gzip reader for %s: %w", filePath, err)
  683. }
  684. defer gz.Close()
  685. reader = gz
  686. }
  687. logger.Infof("Starting to process file: %s", filePath)
  688. batch := pi.StartBatch()
  689. scanner := bufio.NewScanner(reader)
  690. docCount := 0
  691. var minTime, maxTime *time.Time
  692. for scanner.Scan() {
  693. line := scanner.Text()
  694. if line == "" {
  695. continue
  696. }
  697. logDoc, err := ParseLogLine(line)
  698. if err != nil {
  699. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  700. continue
  701. }
  702. logDoc.FilePath = filePath
  703. // Track min/max timestamps
  704. ts := time.Unix(logDoc.Timestamp, 0)
  705. if minTime == nil || ts.Before(*minTime) {
  706. minTime = &ts
  707. }
  708. if maxTime == nil || ts.After(*maxTime) {
  709. maxTime = &ts
  710. }
  711. // Use efficient string building for document ID
  712. docIDBuf := make([]byte, 0, len(filePath)+16)
  713. docIDBuf = append(docIDBuf, filePath...)
  714. docIDBuf = append(docIDBuf, '-')
  715. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  716. doc := &Document{
  717. ID: utils.BytesToStringUnsafe(docIDBuf),
  718. Fields: logDoc,
  719. }
  720. if err := batch.Add(doc); err != nil {
  721. // This indicates an auto-flush occurred and failed.
  722. // Log the error and stop processing this file to avoid further issues.
  723. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  724. }
  725. docCount++
  726. }
  727. if err := scanner.Err(); err != nil {
  728. return uint64(docCount), minTime, maxTime, fmt.Errorf("error reading log file %s: %w", filePath, err)
  729. }
  730. logger.Infof("Finished processing file: %s. Total lines processed: %d", filePath, docCount)
  731. if docCount > 0 {
  732. if _, err := batch.Flush(); err != nil {
  733. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  734. }
  735. }
  736. return uint64(docCount), minTime, maxTime, nil
  737. }
  738. // UpdateConfig updates the indexer configuration
  739. func (pi *ParallelIndexer) UpdateConfig(config *Config) error {
  740. // Only allow updating certain configuration parameters while running
  741. pi.config.BatchSize = config.BatchSize
  742. pi.config.FlushInterval = config.FlushInterval
  743. pi.config.EnableMetrics = config.EnableMetrics
  744. return nil
  745. }
  746. // Worker implementation
  747. func (w *indexWorker) run() {
  748. defer w.indexer.wg.Done()
  749. w.updateStatus(WorkerStatusIdle)
  750. for {
  751. select {
  752. case job, ok := <-w.indexer.jobQueue:
  753. if !ok {
  754. return // Channel closed, worker should exit
  755. }
  756. w.updateStatus(WorkerStatusBusy)
  757. result := w.processJob(job)
  758. // Send result
  759. select {
  760. case w.indexer.resultQueue <- result:
  761. case <-w.indexer.ctx.Done():
  762. return
  763. }
  764. // Execute callback if provided
  765. if job.Callback != nil {
  766. var err error
  767. if result.Failed > 0 {
  768. err = fmt.Errorf("indexing failed for %d documents", result.Failed)
  769. }
  770. job.Callback(err)
  771. }
  772. w.updateStatus(WorkerStatusIdle)
  773. case <-w.indexer.ctx.Done():
  774. return
  775. }
  776. }
  777. }
  778. func (w *indexWorker) processJob(job *IndexJob) *IndexResult {
  779. startTime := time.Now()
  780. result := &IndexResult{
  781. Processed: len(job.Documents),
  782. }
  783. // Group documents by shard
  784. shardDocs := make(map[int][]*Document)
  785. for _, doc := range job.Documents {
  786. if doc.ID == "" {
  787. result.Failed++
  788. continue
  789. }
  790. _, shardID, err := w.indexer.shardManager.GetShard(doc.ID)
  791. if err != nil {
  792. result.Failed++
  793. continue
  794. }
  795. shardDocs[shardID] = append(shardDocs[shardID], doc)
  796. }
  797. // Index documents per shard
  798. for shardID, docs := range shardDocs {
  799. if err := w.indexShardDocuments(shardID, docs); err != nil {
  800. result.Failed += len(docs)
  801. } else {
  802. result.Succeeded += len(docs)
  803. }
  804. }
  805. result.Duration = time.Since(startTime)
  806. if result.Processed > 0 {
  807. result.ErrorRate = float64(result.Failed) / float64(result.Processed)
  808. result.Throughput = float64(result.Processed) / result.Duration.Seconds()
  809. }
  810. // Update worker stats
  811. w.statsMutex.Lock()
  812. w.stats.ProcessedJobs++
  813. w.stats.ProcessedDocs += int64(result.Processed)
  814. w.stats.ErrorCount += int64(result.Failed)
  815. w.stats.LastActive = time.Now().Unix()
  816. // Update average latency (simple moving average)
  817. if w.stats.AverageLatency == 0 {
  818. w.stats.AverageLatency = result.Duration
  819. } else {
  820. w.stats.AverageLatency = (w.stats.AverageLatency + result.Duration) / 2
  821. }
  822. w.statsMutex.Unlock()
  823. return result
  824. }
  825. func (w *indexWorker) indexShardDocuments(shardID int, docs []*Document) error {
  826. shard, err := w.indexer.shardManager.GetShardByID(shardID)
  827. if err != nil {
  828. return err
  829. }
  830. batch := shard.NewBatch()
  831. for _, doc := range docs {
  832. // Convert LogDocument to map for Bleve indexing
  833. docMap := w.logDocumentToMap(doc.Fields)
  834. batch.Index(doc.ID, docMap)
  835. }
  836. if err := shard.Batch(batch); err != nil {
  837. return fmt.Errorf("failed to index batch for shard %d: %w", shardID, err)
  838. }
  839. return nil
  840. }
  841. // logDocumentToMap converts LogDocument to map[string]interface{} for Bleve
  842. func (w *indexWorker) logDocumentToMap(doc *LogDocument) map[string]interface{} {
  843. docMap := map[string]interface{}{
  844. "timestamp": doc.Timestamp,
  845. "ip": doc.IP,
  846. "method": doc.Method,
  847. "path": doc.Path,
  848. "path_exact": doc.PathExact,
  849. "status": doc.Status,
  850. "bytes_sent": doc.BytesSent,
  851. "file_path": doc.FilePath,
  852. "raw": doc.Raw,
  853. }
  854. // Add optional fields only if they have values
  855. if doc.RegionCode != "" {
  856. docMap["region_code"] = doc.RegionCode
  857. }
  858. if doc.Province != "" {
  859. docMap["province"] = doc.Province
  860. }
  861. if doc.City != "" {
  862. docMap["city"] = doc.City
  863. }
  864. if doc.Protocol != "" {
  865. docMap["protocol"] = doc.Protocol
  866. }
  867. if doc.Referer != "" {
  868. docMap["referer"] = doc.Referer
  869. }
  870. if doc.UserAgent != "" {
  871. docMap["user_agent"] = doc.UserAgent
  872. }
  873. if doc.Browser != "" {
  874. docMap["browser"] = doc.Browser
  875. }
  876. if doc.BrowserVer != "" {
  877. docMap["browser_version"] = doc.BrowserVer
  878. }
  879. if doc.OS != "" {
  880. docMap["os"] = doc.OS
  881. }
  882. if doc.OSVersion != "" {
  883. docMap["os_version"] = doc.OSVersion
  884. }
  885. if doc.DeviceType != "" {
  886. docMap["device_type"] = doc.DeviceType
  887. }
  888. if doc.RequestTime > 0 {
  889. docMap["request_time"] = doc.RequestTime
  890. }
  891. if doc.UpstreamTime != nil {
  892. docMap["upstream_time"] = *doc.UpstreamTime
  893. }
  894. return docMap
  895. }
  896. func (w *indexWorker) updateStatus(status string) {
  897. w.statsMutex.Lock()
  898. w.stats.Status = status
  899. w.statsMutex.Unlock()
  900. }
  901. // Background routines
  902. func (pi *ParallelIndexer) processResults() {
  903. defer pi.wg.Done()
  904. for {
  905. select {
  906. case result := <-pi.resultQueue:
  907. if result != nil {
  908. pi.metrics.RecordIndexOperation(
  909. result.Processed,
  910. result.Duration,
  911. result.Failed == 0,
  912. )
  913. }
  914. case <-pi.ctx.Done():
  915. return
  916. }
  917. }
  918. }
  919. func (pi *ParallelIndexer) optimizationRoutine() {
  920. defer pi.wg.Done()
  921. ticker := time.NewTicker(pi.config.OptimizeInterval)
  922. defer ticker.Stop()
  923. for {
  924. select {
  925. case <-ticker.C:
  926. if atomic.LoadInt32(&pi.optimizing) == 0 {
  927. go pi.Optimize() // Run in background to avoid blocking
  928. }
  929. case <-pi.ctx.Done():
  930. return
  931. }
  932. }
  933. }
  934. func (pi *ParallelIndexer) metricsRoutine() {
  935. defer pi.wg.Done()
  936. ticker := time.NewTicker(10 * time.Second)
  937. defer ticker.Stop()
  938. for {
  939. select {
  940. case <-ticker.C:
  941. pi.updateMetrics()
  942. case <-pi.ctx.Done():
  943. return
  944. }
  945. }
  946. }
  947. func (pi *ParallelIndexer) updateMetrics() {
  948. pi.statsMutex.Lock()
  949. defer pi.statsMutex.Unlock()
  950. // Update indexing rate based on recent activity
  951. metrics := pi.metrics.GetMetrics()
  952. pi.stats.IndexingRate = metrics.IndexingRate
  953. }
  954. // IndexLogGroupWithProgress indexes a log group with progress tracking
  955. func (pi *ParallelIndexer) IndexLogGroupWithProgress(basePath string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  956. if !pi.IsHealthy() {
  957. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  958. }
  959. // Create progress tracker if config is provided
  960. var progressTracker *ProgressTracker
  961. if progressConfig != nil {
  962. progressTracker = NewProgressTracker(basePath, progressConfig)
  963. }
  964. // Find all files belonging to this log group by globbing
  965. globPath := basePath + "*"
  966. matches, err := filepath.Glob(globPath)
  967. if err != nil {
  968. if progressTracker != nil {
  969. progressTracker.Cancel(fmt.Sprintf("glob failed: %v", err))
  970. }
  971. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  972. }
  973. // filepath.Glob might not match the base file itself if it has no extension,
  974. // so we check for it explicitly and add it to the list.
  975. info, err := os.Stat(basePath)
  976. if err == nil && info.Mode().IsRegular() {
  977. matches = append(matches, basePath)
  978. }
  979. // Deduplicate file list
  980. seen := make(map[string]struct{})
  981. uniqueFiles := make([]string, 0)
  982. for _, match := range matches {
  983. if _, ok := seen[match]; !ok {
  984. // Further check if it's a file, not a directory. Glob can match dirs.
  985. info, err := os.Stat(match)
  986. if err == nil && info.Mode().IsRegular() {
  987. seen[match] = struct{}{}
  988. uniqueFiles = append(uniqueFiles, match)
  989. }
  990. }
  991. }
  992. if len(uniqueFiles) == 0 {
  993. logger.Warnf("No actual log file found for group: %s", basePath)
  994. if progressTracker != nil {
  995. progressTracker.Cancel("no files found")
  996. }
  997. return nil, nil, nil, nil
  998. }
  999. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  1000. // Set up progress tracking for all files
  1001. if progressTracker != nil {
  1002. for _, filePath := range uniqueFiles {
  1003. isCompressed := IsCompressedFile(filePath)
  1004. progressTracker.AddFile(filePath, isCompressed)
  1005. // Get file size and estimate lines
  1006. if stat, err := os.Stat(filePath); err == nil {
  1007. progressTracker.SetFileSize(filePath, stat.Size())
  1008. // Estimate lines for progress calculation
  1009. if estimatedLines, err := EstimateFileLines(context.Background(), filePath, stat.Size(), isCompressed); err == nil {
  1010. progressTracker.SetFileEstimate(filePath, estimatedLines)
  1011. }
  1012. }
  1013. }
  1014. }
  1015. docsCountMap := make(map[string]uint64)
  1016. var overallMinTime, overallMaxTime *time.Time
  1017. // Process each file with progress tracking
  1018. for _, filePath := range uniqueFiles {
  1019. if progressTracker != nil {
  1020. progressTracker.StartFile(filePath)
  1021. }
  1022. docsIndexed, minTime, maxTime, err := pi.indexSingleFileWithProgress(filePath, progressTracker)
  1023. if err != nil {
  1024. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  1025. if progressTracker != nil {
  1026. progressTracker.FailFile(filePath, err.Error())
  1027. }
  1028. continue // Continue with the next file
  1029. }
  1030. docsCountMap[filePath] = docsIndexed
  1031. if progressTracker != nil {
  1032. progressTracker.CompleteFile(filePath, int64(docsIndexed))
  1033. }
  1034. if minTime != nil {
  1035. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  1036. overallMinTime = minTime
  1037. }
  1038. }
  1039. if maxTime != nil {
  1040. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  1041. overallMaxTime = maxTime
  1042. }
  1043. }
  1044. }
  1045. return docsCountMap, overallMinTime, overallMaxTime, nil
  1046. }
  1047. // indexSingleFileWithProgress indexes a single file with progress updates
  1048. func (pi *ParallelIndexer) indexSingleFileWithProgress(filePath string, progressTracker *ProgressTracker) (uint64, *time.Time, *time.Time, error) {
  1049. // If no progress tracker, just call the original method
  1050. if progressTracker == nil {
  1051. return pi.indexSingleFile(filePath)
  1052. }
  1053. // Call the original indexing method to do the actual indexing work
  1054. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  1055. if err != nil {
  1056. return 0, nil, nil, err
  1057. }
  1058. // Just do one final progress update when done - no artificial delays
  1059. if progressTracker != nil && docsIndexed > 0 {
  1060. if strings.HasSuffix(filePath, ".gz") {
  1061. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed))
  1062. } else {
  1063. // Estimate position based on average line size
  1064. estimatedPos := int64(docsIndexed * 150) // Assume ~150 bytes per line
  1065. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed), estimatedPos)
  1066. }
  1067. }
  1068. // Return the actual timestamps from the original method
  1069. return docsIndexed, minTime, maxTime, nil
  1070. }