1
0

parallel_indexer.go 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426
  1. package indexer
  2. import (
  3. "context"
  4. "fmt"
  5. "os"
  6. "path/filepath"
  7. "runtime"
  8. "sync"
  9. "sync/atomic"
  10. "time"
  11. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  12. "github.com/blevesearch/bleve/v2"
  13. "github.com/uozi-tech/cosy/logger"
  14. )
  15. // ParallelIndexer provides high-performance parallel indexing with sharding
  16. type ParallelIndexer struct {
  17. config *Config
  18. shardManager ShardManager
  19. metrics MetricsCollector
  20. // Worker management
  21. workers []*indexWorker
  22. jobQueue chan *IndexJob
  23. resultQueue chan *IndexResult
  24. // State management
  25. ctx context.Context
  26. cancel context.CancelFunc
  27. wg sync.WaitGroup
  28. running int32
  29. // Cleanup control
  30. stopOnce sync.Once
  31. channelsClosed int32
  32. // Statistics
  33. stats *IndexStats
  34. statsMutex sync.RWMutex
  35. // Optimization
  36. lastOptimized int64
  37. optimizing int32
  38. adaptiveOptimizer *AdaptiveOptimizer
  39. zeroAllocProcessor *ZeroAllocBatchProcessor
  40. optimizationEnabled bool
  41. // Rotation log scanning for optimized throughput
  42. rotationScanner *RotationScanner
  43. }
  44. // indexWorker represents a single indexing worker
  45. type indexWorker struct {
  46. id int
  47. indexer *ParallelIndexer
  48. stats *WorkerStats
  49. statsMutex sync.RWMutex
  50. }
  51. // NewParallelIndexer creates a new parallel indexer with dynamic shard awareness
  52. func NewParallelIndexer(config *Config, shardManager ShardManager) *ParallelIndexer {
  53. if config == nil {
  54. config = DefaultIndexerConfig()
  55. }
  56. ctx, cancel := context.WithCancel(context.Background())
  57. // Initialize dynamic shard awareness
  58. // NOTE: dynamic shard awareness removed; GroupedShardManager is the default
  59. // If no shard manager provided, use grouped shard manager by default (per SHARD_GROUPS_PLAN)
  60. var actualShardManager ShardManager
  61. if shardManager == nil {
  62. gsm := NewGroupedShardManager(config)
  63. actualShardManager = gsm
  64. } else {
  65. actualShardManager = shardManager
  66. }
  67. ao := NewAdaptiveOptimizer(config)
  68. indexer := &ParallelIndexer{
  69. config: config,
  70. shardManager: actualShardManager,
  71. metrics: NewDefaultMetricsCollector(),
  72. jobQueue: make(chan *IndexJob, config.MaxQueueSize),
  73. resultQueue: make(chan *IndexResult, config.WorkerCount),
  74. ctx: ctx,
  75. cancel: cancel,
  76. stats: &IndexStats{
  77. WorkerStats: make([]*WorkerStats, config.WorkerCount),
  78. },
  79. adaptiveOptimizer: ao,
  80. zeroAllocProcessor: NewZeroAllocBatchProcessor(config),
  81. optimizationEnabled: true, // Enable optimizations by default
  82. rotationScanner: NewRotationScanner(nil), // Use default configuration
  83. }
  84. // Set up the activity poller for the adaptive optimizer
  85. if indexer.adaptiveOptimizer != nil {
  86. indexer.adaptiveOptimizer.SetActivityPoller(indexer)
  87. }
  88. // Initialize workers
  89. indexer.workers = make([]*indexWorker, config.WorkerCount)
  90. for i := 0; i < config.WorkerCount; i++ {
  91. indexer.workers[i] = &indexWorker{
  92. id: i,
  93. indexer: indexer,
  94. stats: &WorkerStats{
  95. ID: i,
  96. Status: WorkerStatusIdle,
  97. },
  98. }
  99. indexer.stats.WorkerStats[i] = indexer.workers[i].stats
  100. }
  101. return indexer
  102. }
  103. // Start begins the indexer operation
  104. func (pi *ParallelIndexer) Start(ctx context.Context) error {
  105. if !atomic.CompareAndSwapInt32(&pi.running, 0, 1) {
  106. return fmt.Errorf("indexer not started")
  107. }
  108. // Initialize shard manager
  109. if err := pi.shardManager.Initialize(); err != nil {
  110. atomic.StoreInt32(&pi.running, 0)
  111. return fmt.Errorf("failed to initialize shard manager: %w", err)
  112. }
  113. // Start workers
  114. for _, worker := range pi.workers {
  115. pi.wg.Add(1)
  116. go worker.run()
  117. }
  118. // Start result processor
  119. pi.wg.Add(1)
  120. go pi.processResults()
  121. // Start optimization routine if enabled
  122. if pi.config.OptimizeInterval > 0 {
  123. pi.wg.Add(1)
  124. go pi.optimizationRoutine()
  125. }
  126. // Start metrics collection if enabled
  127. if pi.config.EnableMetrics {
  128. pi.wg.Add(1)
  129. go pi.metricsRoutine()
  130. }
  131. // Start adaptive optimizer if enabled
  132. if pi.optimizationEnabled && pi.adaptiveOptimizer != nil {
  133. // Set worker count change callback
  134. logger.Debugf("Setting up adaptive optimizer callback for worker count changes")
  135. pi.adaptiveOptimizer.SetWorkerCountChangeCallback(pi.handleWorkerCountChange)
  136. if err := pi.adaptiveOptimizer.Start(); err != nil {
  137. logger.Warnf("Failed to start adaptive optimizer: %v", err)
  138. } else {
  139. logger.Debugf("Adaptive optimizer started successfully")
  140. }
  141. }
  142. // Start dynamic shard awareness monitoring if enabled
  143. // NOTE: dynamic shard awareness removed; GroupedShardManager is the default
  144. return nil
  145. }
  146. // handleWorkerCountChange handles dynamic worker count adjustments from adaptive optimizer
  147. func (pi *ParallelIndexer) handleWorkerCountChange(oldCount, newCount int) {
  148. logger.Infof("Handling worker count change from %d to %d", oldCount, newCount)
  149. // Check if indexer is running
  150. if atomic.LoadInt32(&pi.running) != 1 {
  151. logger.Warn("Cannot adjust worker count: indexer not running")
  152. return
  153. }
  154. // Prevent concurrent worker adjustments
  155. pi.statsMutex.Lock()
  156. defer pi.statsMutex.Unlock()
  157. currentWorkerCount := len(pi.workers)
  158. if currentWorkerCount == newCount {
  159. return // Already at desired count
  160. }
  161. if newCount > currentWorkerCount {
  162. // Add more workers
  163. pi.addWorkers(newCount - currentWorkerCount)
  164. } else {
  165. // Remove workers
  166. pi.removeWorkers(currentWorkerCount - newCount)
  167. }
  168. // Update config to reflect the change
  169. pi.config.WorkerCount = newCount
  170. logger.Infof("Successfully adjusted worker count to %d", newCount)
  171. }
  172. // addWorkers adds new workers to the pool
  173. func (pi *ParallelIndexer) addWorkers(count int) {
  174. for i := 0; i < count; i++ {
  175. workerID := len(pi.workers)
  176. worker := &indexWorker{
  177. id: workerID,
  178. indexer: pi,
  179. stats: &WorkerStats{
  180. ID: workerID,
  181. Status: WorkerStatusIdle,
  182. },
  183. }
  184. pi.workers = append(pi.workers, worker)
  185. pi.stats.WorkerStats = append(pi.stats.WorkerStats, worker.stats)
  186. // Start the new worker
  187. pi.wg.Add(1)
  188. go worker.run()
  189. logger.Debugf("Added worker %d", workerID)
  190. }
  191. }
  192. // removeWorkers gracefully removes workers from the pool
  193. func (pi *ParallelIndexer) removeWorkers(count int) {
  194. if count >= len(pi.workers) {
  195. logger.Warn("Cannot remove all workers, keeping at least one")
  196. count = len(pi.workers) - 1
  197. }
  198. // Remove workers from the end of the slice
  199. workersToRemove := pi.workers[len(pi.workers)-count:]
  200. pi.workers = pi.workers[:len(pi.workers)-count]
  201. pi.stats.WorkerStats = pi.stats.WorkerStats[:len(pi.stats.WorkerStats)-count]
  202. // Note: In a full implementation, you would need to:
  203. // 1. Signal workers to stop gracefully after finishing current jobs
  204. // 2. Wait for them to complete
  205. // 3. Clean up their resources
  206. // For now, we just remove them from tracking
  207. for _, worker := range workersToRemove {
  208. logger.Debugf("Removed worker %d", worker.id)
  209. }
  210. }
  211. // Stop gracefully stops the indexer
  212. func (pi *ParallelIndexer) Stop() error {
  213. var stopErr error
  214. pi.stopOnce.Do(func() {
  215. // Set running to 0
  216. if !atomic.CompareAndSwapInt32(&pi.running, 1, 0) {
  217. logger.Warnf("[ParallelIndexer] Stop called but indexer already stopped")
  218. stopErr = fmt.Errorf("indexer already stopped")
  219. return
  220. }
  221. // Cancel context to stop all routines
  222. pi.cancel()
  223. // Stop adaptive optimizer
  224. if pi.adaptiveOptimizer != nil {
  225. pi.adaptiveOptimizer.Stop()
  226. }
  227. // Close channels safely if they haven't been closed yet
  228. if atomic.CompareAndSwapInt32(&pi.channelsClosed, 0, 1) {
  229. // Close job queue to stop accepting new jobs
  230. close(pi.jobQueue)
  231. // Wait for all workers to finish
  232. pi.wg.Wait()
  233. // Close result queue
  234. close(pi.resultQueue)
  235. } else {
  236. // If channels are already closed, just wait for workers
  237. pi.wg.Wait()
  238. }
  239. // Skip flush during stop - shards may already be closed by searcher
  240. // FlushAll should be called before Stop() if needed
  241. // Close the shard manager - this will close all shards and stop Bleve worker goroutines
  242. // This is critical to prevent goroutine leaks from Bleve's internal workers
  243. if pi.shardManager != nil {
  244. if err := pi.shardManager.Close(); err != nil {
  245. logger.Errorf("Failed to close shard manager: %v", err)
  246. stopErr = err
  247. }
  248. }
  249. })
  250. return stopErr
  251. }
  252. // IndexDocument indexes a single document
  253. func (pi *ParallelIndexer) IndexDocument(ctx context.Context, doc *Document) error {
  254. return pi.IndexDocuments(ctx, []*Document{doc})
  255. }
  256. // IndexDocuments indexes multiple documents
  257. func (pi *ParallelIndexer) IndexDocuments(ctx context.Context, docs []*Document) error {
  258. if !pi.IsHealthy() {
  259. return fmt.Errorf("indexer not started")
  260. }
  261. if len(docs) == 0 {
  262. return nil
  263. }
  264. // Create job
  265. job := &IndexJob{
  266. Documents: docs,
  267. Priority: PriorityNormal,
  268. }
  269. // Submit job and wait for completion
  270. done := make(chan error, 1)
  271. job.Callback = func(err error) {
  272. done <- err
  273. }
  274. select {
  275. case pi.jobQueue <- job:
  276. select {
  277. case err := <-done:
  278. return err
  279. case <-ctx.Done():
  280. return ctx.Err()
  281. }
  282. case <-ctx.Done():
  283. return ctx.Err()
  284. case <-pi.ctx.Done():
  285. return fmt.Errorf("indexer stopped")
  286. }
  287. }
  288. // IndexDocumentAsync indexes a document asynchronously
  289. func (pi *ParallelIndexer) IndexDocumentAsync(doc *Document, callback func(error)) {
  290. pi.IndexDocumentsAsync([]*Document{doc}, callback)
  291. }
  292. // IndexDocumentsAsync indexes multiple documents asynchronously
  293. func (pi *ParallelIndexer) IndexDocumentsAsync(docs []*Document, callback func(error)) {
  294. if !pi.IsHealthy() {
  295. if callback != nil {
  296. callback(fmt.Errorf("indexer not started"))
  297. }
  298. return
  299. }
  300. if len(docs) == 0 {
  301. if callback != nil {
  302. callback(nil)
  303. }
  304. return
  305. }
  306. job := &IndexJob{
  307. Documents: docs,
  308. Priority: PriorityNormal,
  309. Callback: callback,
  310. }
  311. select {
  312. case pi.jobQueue <- job:
  313. // Job queued successfully
  314. case <-pi.ctx.Done():
  315. if callback != nil {
  316. callback(fmt.Errorf("indexer stopped"))
  317. }
  318. default:
  319. // Queue is full
  320. if callback != nil {
  321. callback(fmt.Errorf("queue is full"))
  322. }
  323. }
  324. }
  325. // StartBatch returns a new batch writer with adaptive batch size
  326. func (pi *ParallelIndexer) StartBatch() BatchWriterInterface {
  327. batchSize := pi.config.BatchSize
  328. if pi.adaptiveOptimizer != nil {
  329. batchSize = pi.adaptiveOptimizer.GetOptimalBatchSize()
  330. }
  331. return NewBatchWriter(pi, batchSize)
  332. }
  333. // GetOptimizationStats returns current optimization statistics
  334. func (pi *ParallelIndexer) GetOptimizationStats() AdaptiveOptimizationStats {
  335. if pi.adaptiveOptimizer != nil {
  336. return pi.adaptiveOptimizer.GetOptimizationStats()
  337. }
  338. return AdaptiveOptimizationStats{}
  339. }
  340. // GetPoolStats returns object pool statistics
  341. func (pi *ParallelIndexer) GetPoolStats() PoolStats {
  342. if pi.zeroAllocProcessor != nil {
  343. return pi.zeroAllocProcessor.GetPoolStats()
  344. }
  345. return PoolStats{}
  346. }
  347. // EnableOptimizations enables or disables adaptive optimizations
  348. func (pi *ParallelIndexer) EnableOptimizations(enabled bool) {
  349. pi.optimizationEnabled = enabled
  350. if !enabled && pi.adaptiveOptimizer != nil {
  351. pi.adaptiveOptimizer.Stop()
  352. } else if enabled && pi.adaptiveOptimizer != nil && atomic.LoadInt32(&pi.running) == 1 {
  353. pi.adaptiveOptimizer.Start()
  354. }
  355. }
  356. // FlushAll flushes all pending operations
  357. func (pi *ParallelIndexer) FlushAll() error {
  358. // Check if indexer is still running
  359. if atomic.LoadInt32(&pi.running) != 1 {
  360. return fmt.Errorf("indexer not running")
  361. }
  362. // Get all shards and flush them
  363. shards := pi.shardManager.GetAllShards()
  364. var errs []error
  365. for i, shard := range shards {
  366. if shard == nil {
  367. continue
  368. }
  369. // Force flush by creating and immediately deleting a temporary document
  370. batch := shard.NewBatch()
  371. // Use efficient string building instead of fmt.Sprintf
  372. tempIDBuf := make([]byte, 0, 64)
  373. tempIDBuf = append(tempIDBuf, "_flush_temp_"...)
  374. tempIDBuf = utils.AppendInt(tempIDBuf, i)
  375. tempIDBuf = append(tempIDBuf, '_')
  376. tempIDBuf = utils.AppendInt(tempIDBuf, int(time.Now().UnixNano()))
  377. tempID := utils.BytesToStringUnsafe(tempIDBuf)
  378. batch.Index(tempID, map[string]interface{}{"_temp": true})
  379. if err := shard.Batch(batch); err != nil {
  380. errs = append(errs, fmt.Errorf("failed to flush shard %d: %w", i, err))
  381. continue
  382. }
  383. // Delete the temporary document
  384. shard.Delete(tempID)
  385. }
  386. if len(errs) > 0 {
  387. return fmt.Errorf("flush errors: %v", errs)
  388. }
  389. return nil
  390. }
  391. // Optimize triggers optimization of all shards
  392. func (pi *ParallelIndexer) Optimize() error {
  393. if !atomic.CompareAndSwapInt32(&pi.optimizing, 0, 1) {
  394. return fmt.Errorf("optimization already in progress")
  395. }
  396. defer atomic.StoreInt32(&pi.optimizing, 0)
  397. startTime := time.Now()
  398. stats := pi.shardManager.GetShardStats()
  399. var errs []error
  400. for _, stat := range stats {
  401. if err := pi.shardManager.OptimizeShard(stat.ID); err != nil {
  402. errs = append(errs, fmt.Errorf("failed to optimize shard %d: %w", stat.ID, err))
  403. }
  404. }
  405. // Update optimization stats
  406. pi.statsMutex.Lock()
  407. if pi.stats.OptimizationStats == nil {
  408. pi.stats.OptimizationStats = &OptimizationStats{}
  409. }
  410. pi.stats.OptimizationStats.LastRun = time.Now().Unix()
  411. pi.stats.OptimizationStats.Duration = time.Since(startTime)
  412. pi.stats.OptimizationStats.Success = len(errs) == 0
  413. pi.stats.LastOptimized = time.Now().Unix()
  414. pi.statsMutex.Unlock()
  415. atomic.StoreInt64(&pi.lastOptimized, time.Now().Unix())
  416. if len(errs) > 0 {
  417. return fmt.Errorf("optimization errors: %v", errs)
  418. }
  419. // Record optimization metrics
  420. pi.metrics.RecordOptimization(time.Since(startTime), len(errs) == 0)
  421. return nil
  422. }
  423. // GetStats returns current indexer statistics
  424. func (pi *ParallelIndexer) GetStats() *IndexStats {
  425. pi.statsMutex.RLock()
  426. defer pi.statsMutex.RUnlock()
  427. // Update shard stats
  428. shardStats := pi.shardManager.GetShardStats()
  429. pi.stats.Shards = shardStats
  430. pi.stats.ShardCount = len(shardStats)
  431. var totalDocs uint64
  432. var totalSize int64
  433. for _, shard := range shardStats {
  434. totalDocs += shard.DocumentCount
  435. totalSize += shard.Size
  436. }
  437. pi.stats.TotalDocuments = totalDocs
  438. pi.stats.TotalSize = totalSize
  439. pi.stats.QueueSize = len(pi.jobQueue)
  440. // Calculate memory usage
  441. var memStats runtime.MemStats
  442. runtime.ReadMemStats(&memStats)
  443. pi.stats.MemoryUsage = int64(memStats.Alloc)
  444. // Copy stats to avoid race conditions
  445. statsCopy := *pi.stats
  446. return &statsCopy
  447. }
  448. // IsRunning returns whether the indexer is currently running
  449. func (pi *ParallelIndexer) IsRunning() bool {
  450. return atomic.LoadInt32(&pi.running) != 0
  451. }
  452. // IsBusy checks if the indexer has pending jobs or any active workers.
  453. func (pi *ParallelIndexer) IsBusy() bool {
  454. if len(pi.jobQueue) > 0 {
  455. return true
  456. }
  457. // This RLock protects the pi.workers slice from changing during iteration (e.g. scaling)
  458. pi.statsMutex.RLock()
  459. defer pi.statsMutex.RUnlock()
  460. for _, worker := range pi.workers {
  461. worker.statsMutex.RLock()
  462. isBusy := worker.stats.Status == WorkerStatusBusy
  463. worker.statsMutex.RUnlock()
  464. if isBusy {
  465. return true
  466. }
  467. }
  468. return false
  469. }
  470. // GetShardInfo returns information about a specific shard
  471. func (pi *ParallelIndexer) GetShardInfo(shardID int) (*ShardInfo, error) {
  472. shardStats := pi.shardManager.GetShardStats()
  473. for _, stat := range shardStats {
  474. if stat.ID == shardID {
  475. return stat, nil
  476. }
  477. }
  478. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, shardID)
  479. }
  480. // IsHealthy checks if the indexer is running and healthy
  481. func (pi *ParallelIndexer) IsHealthy() bool {
  482. if atomic.LoadInt32(&pi.running) != 1 {
  483. return false
  484. }
  485. // Check shard manager health
  486. return pi.shardManager.HealthCheck() == nil
  487. }
  488. // GetConfig returns the current configuration
  489. func (pi *ParallelIndexer) GetConfig() *Config {
  490. return pi.config
  491. }
  492. // GetAllShards returns all managed shards
  493. func (pi *ParallelIndexer) GetAllShards() []bleve.Index {
  494. return pi.shardManager.GetAllShards()
  495. }
  496. // DeleteIndexByLogGroup deletes all index entries for a specific log group (base path and its rotated files)
  497. func (pi *ParallelIndexer) DeleteIndexByLogGroup(basePath string, logFileManager interface{}) error {
  498. if !pi.IsHealthy() {
  499. return fmt.Errorf("indexer not healthy")
  500. }
  501. // Get all file paths for this log group from the database
  502. if logFileManager == nil {
  503. return fmt.Errorf("log file manager is required")
  504. }
  505. lfm, ok := logFileManager.(GroupFileProvider)
  506. if !ok {
  507. return fmt.Errorf("log file manager does not support GetFilePathsForGroup")
  508. }
  509. filesToDelete, err := lfm.GetFilePathsForGroup(basePath)
  510. if err != nil {
  511. return fmt.Errorf("failed to get file paths for log group %s: %w", basePath, err)
  512. }
  513. logger.Infof("Deleting index entries for log group %s, files: %v", basePath, filesToDelete)
  514. // Delete documents from all shards for these files
  515. shards := pi.shardManager.GetAllShards()
  516. var deleteErrors []error
  517. for _, shard := range shards {
  518. // Search for documents with matching file_path
  519. for _, filePath := range filesToDelete {
  520. query := bleve.NewTermQuery(filePath)
  521. query.SetField("file_path")
  522. searchRequest := bleve.NewSearchRequest(query)
  523. searchRequest.Size = 1000 // Process in batches
  524. searchRequest.Fields = []string{"file_path"}
  525. for {
  526. searchResult, err := shard.Search(searchRequest)
  527. if err != nil {
  528. deleteErrors = append(deleteErrors, fmt.Errorf("failed to search for documents in file %s: %w", filePath, err))
  529. break
  530. }
  531. if len(searchResult.Hits) == 0 {
  532. break // No more documents to delete
  533. }
  534. // Delete documents in batch
  535. batch := shard.NewBatch()
  536. for _, hit := range searchResult.Hits {
  537. batch.Delete(hit.ID)
  538. }
  539. if err := shard.Batch(batch); err != nil {
  540. deleteErrors = append(deleteErrors, fmt.Errorf("failed to delete batch for file %s: %w", filePath, err))
  541. }
  542. // If we got fewer results than requested, we're done
  543. if len(searchResult.Hits) < searchRequest.Size {
  544. break
  545. }
  546. // Continue from where we left off
  547. searchRequest.From += searchRequest.Size
  548. }
  549. }
  550. }
  551. if len(deleteErrors) > 0 {
  552. return fmt.Errorf("encountered %d errors during deletion: %v", len(deleteErrors), deleteErrors[0])
  553. }
  554. logger.Infof("Successfully deleted index entries for log group: %s", basePath)
  555. return nil
  556. }
  557. // DestroyAllIndexes closes and deletes all index data from disk.
  558. func (pi *ParallelIndexer) DestroyAllIndexes(parentCtx context.Context) error {
  559. // Stop all background routines before deleting files
  560. pi.cancel()
  561. pi.wg.Wait()
  562. // Safely close channels if they haven't been closed yet
  563. if atomic.CompareAndSwapInt32(&pi.channelsClosed, 0, 1) {
  564. close(pi.jobQueue)
  565. close(pi.resultQueue)
  566. }
  567. atomic.StoreInt32(&pi.running, 0) // Mark as not running
  568. var destructionErr error
  569. if manager, ok := interface{}(pi.shardManager).(interface{ Destroy() error }); ok {
  570. destructionErr = manager.Destroy()
  571. } else {
  572. destructionErr = fmt.Errorf("shard manager does not support destruction")
  573. }
  574. // Re-initialize context and channels for a potential restart using parent context
  575. pi.ctx, pi.cancel = context.WithCancel(parentCtx)
  576. pi.jobQueue = make(chan *IndexJob, pi.config.MaxQueueSize)
  577. pi.resultQueue = make(chan *IndexResult, pi.config.WorkerCount)
  578. atomic.StoreInt32(&pi.channelsClosed, 0) // Reset the channel closed flag
  579. return destructionErr
  580. }
  581. // IndexLogGroup finds all files related to a base log path (e.g., rotated logs) and indexes them.
  582. // It returns a map of [filePath -> docCount], and the min/max timestamps found.
  583. func (pi *ParallelIndexer) IndexLogGroup(basePath string) (map[string]uint64, *time.Time, *time.Time, error) {
  584. if !pi.IsHealthy() {
  585. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  586. }
  587. // Find all files belonging to this log group by globbing
  588. globPath := basePath + "*"
  589. matches, err := filepath.Glob(globPath)
  590. if err != nil {
  591. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  592. }
  593. // filepath.Glob might not match the base file itself if it has no extension,
  594. // so we check for it explicitly and add it to the list.
  595. info, err := os.Stat(basePath)
  596. if err == nil && info.Mode().IsRegular() {
  597. matches = append(matches, basePath)
  598. }
  599. // Deduplicate file list
  600. seen := make(map[string]struct{})
  601. uniqueFiles := make([]string, 0)
  602. for _, match := range matches {
  603. if _, ok := seen[match]; !ok {
  604. // Further check if it's a file, not a directory. Glob can match dirs.
  605. info, err := os.Stat(match)
  606. if err == nil && info.Mode().IsRegular() {
  607. seen[match] = struct{}{}
  608. uniqueFiles = append(uniqueFiles, match)
  609. }
  610. }
  611. }
  612. if len(uniqueFiles) == 0 {
  613. logger.Warnf("No actual log file found for group: %s", basePath)
  614. return nil, nil, nil, nil
  615. }
  616. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  617. docsCountMap := make(map[string]uint64)
  618. var overallMinTime, overallMaxTime *time.Time
  619. for _, filePath := range uniqueFiles {
  620. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  621. if err != nil {
  622. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  623. continue // Continue with the next file
  624. }
  625. docsCountMap[filePath] = docsIndexed
  626. if minTime != nil {
  627. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  628. overallMinTime = minTime
  629. }
  630. }
  631. if maxTime != nil {
  632. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  633. overallMaxTime = maxTime
  634. }
  635. }
  636. }
  637. return docsCountMap, overallMinTime, overallMaxTime, nil
  638. }
  639. // IndexLogGroupWithRotationScanning performs optimized log group indexing using rotation scanner
  640. // for maximum frontend throughput by prioritizing files based on size and age
  641. func (pi *ParallelIndexer) IndexLogGroupWithRotationScanning(basePaths []string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  642. if !pi.IsHealthy() {
  643. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  644. }
  645. ctx, cancel := context.WithTimeout(pi.ctx, 10*time.Minute)
  646. defer cancel()
  647. logger.Infof("🚀 Starting optimized rotation log indexing for %d log groups", len(basePaths))
  648. // Scan all log groups and build priority queue
  649. if err := pi.rotationScanner.ScanLogGroups(ctx, basePaths); err != nil {
  650. return nil, nil, nil, fmt.Errorf("failed to scan log groups: %w", err)
  651. }
  652. // Create progress tracker if config is provided
  653. var progressTracker *ProgressTracker
  654. if progressConfig != nil {
  655. progressTracker = NewProgressTracker("rotation-scan", progressConfig)
  656. // Add all discovered files to progress tracker
  657. scanResults := pi.rotationScanner.GetScanResults()
  658. for _, result := range scanResults {
  659. for _, file := range result.Files {
  660. progressTracker.AddFile(file.Path, file.IsCompressed)
  661. progressTracker.SetFileSize(file.Path, file.Size)
  662. progressTracker.SetFileEstimate(file.Path, file.EstimatedLines)
  663. }
  664. }
  665. }
  666. docsCountMap := make(map[string]uint64)
  667. var overallMinTime, overallMaxTime *time.Time
  668. // Process files in optimized batches using rotation scanner
  669. batchSize := pi.config.BatchSize / 4 // Smaller batches for better progress tracking
  670. processedFiles := 0
  671. totalFiles := pi.rotationScanner.GetQueueSize()
  672. for {
  673. select {
  674. case <-ctx.Done():
  675. return docsCountMap, overallMinTime, overallMaxTime, ctx.Err()
  676. default:
  677. }
  678. // Get next batch of files prioritized by scanner
  679. batch := pi.rotationScanner.GetNextBatch(batchSize)
  680. if len(batch) == 0 {
  681. break // No more files to process
  682. }
  683. logger.Debugf("📦 Processing batch of %d files (progress: %d/%d)", len(batch), processedFiles, totalFiles)
  684. // Process each file in the batch
  685. for _, fileInfo := range batch {
  686. if progressTracker != nil {
  687. progressTracker.StartFile(fileInfo.Path)
  688. }
  689. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(fileInfo.Path)
  690. if err != nil {
  691. logger.Warnf("Failed to index file %s: %v", fileInfo.Path, err)
  692. if progressTracker != nil {
  693. // Skip error recording for now
  694. _ = err
  695. }
  696. continue
  697. }
  698. docsCountMap[fileInfo.Path] = docsIndexed
  699. processedFiles++
  700. // Update overall time range
  701. if minTime != nil && (overallMinTime == nil || minTime.Before(*overallMinTime)) {
  702. overallMinTime = minTime
  703. }
  704. if maxTime != nil && (overallMaxTime == nil || maxTime.After(*overallMaxTime)) {
  705. overallMaxTime = maxTime
  706. }
  707. if progressTracker != nil {
  708. progressTracker.CompleteFile(fileInfo.Path, int64(docsIndexed))
  709. }
  710. logger.Debugf("✅ Indexed %s: %d documents", fileInfo.Path, docsIndexed)
  711. }
  712. // Report batch progress
  713. logger.Infof("📊 Batch completed: %d/%d files processed (%.1f%% complete)",
  714. processedFiles, totalFiles, float64(processedFiles)/float64(totalFiles)*100)
  715. }
  716. logger.Infof("🎉 Optimized rotation log indexing completed: %d files, %d total documents",
  717. processedFiles, sumDocCounts(docsCountMap))
  718. return docsCountMap, overallMinTime, overallMaxTime, nil
  719. }
  720. // IndexSingleFileIncrementally is a more efficient version for incremental updates.
  721. // It indexes only the specified single file instead of the entire log group.
  722. func (pi *ParallelIndexer) IndexSingleFileIncrementally(filePath string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  723. if !pi.IsHealthy() {
  724. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  725. }
  726. // Create progress tracker if config is provided
  727. var progressTracker *ProgressTracker
  728. if progressConfig != nil {
  729. progressTracker = NewProgressTracker(filePath, progressConfig)
  730. // Setup file for tracking
  731. isCompressed := IsCompressedFile(filePath)
  732. progressTracker.AddFile(filePath, isCompressed)
  733. if stat, err := os.Stat(filePath); err == nil {
  734. progressTracker.SetFileSize(filePath, stat.Size())
  735. if estimatedLines, err := EstimateFileLines(context.Background(), filePath, stat.Size(), isCompressed); err == nil {
  736. progressTracker.SetFileEstimate(filePath, estimatedLines)
  737. }
  738. }
  739. }
  740. docsCountMap := make(map[string]uint64)
  741. if progressTracker != nil {
  742. progressTracker.StartFile(filePath)
  743. }
  744. docsIndexed, minTime, maxTime, err := pi.indexSingleFileWithProgress(filePath, progressTracker)
  745. if err != nil {
  746. logger.Warnf("Failed to incrementally index file '%s', skipping: %v", filePath, err)
  747. if progressTracker != nil {
  748. progressTracker.FailFile(filePath, err.Error())
  749. }
  750. // Return empty results and the error
  751. return docsCountMap, nil, nil, err
  752. }
  753. docsCountMap[filePath] = docsIndexed
  754. if progressTracker != nil {
  755. progressTracker.CompleteFile(filePath, int64(docsIndexed))
  756. }
  757. return docsCountMap, minTime, maxTime, nil
  758. }
  759. // indexSingleFile contains optimized logic to process one physical log file.
  760. // Now uses ParseStream for 7-8x faster performance and 70% memory reduction
  761. func (pi *ParallelIndexer) indexSingleFile(filePath string) (uint64, *time.Time, *time.Time, error) {
  762. // Delegate to optimized implementation
  763. return pi.IndexSingleFile(filePath)
  764. }
  765. // UpdateConfig updates the indexer configuration
  766. func (pi *ParallelIndexer) UpdateConfig(config *Config) error {
  767. // Only allow updating certain configuration parameters while running
  768. pi.config.BatchSize = config.BatchSize
  769. pi.config.FlushInterval = config.FlushInterval
  770. pi.config.EnableMetrics = config.EnableMetrics
  771. return nil
  772. }
  773. // Worker implementation
  774. func (w *indexWorker) run() {
  775. defer w.indexer.wg.Done()
  776. w.updateStatus(WorkerStatusIdle)
  777. for {
  778. select {
  779. case job, ok := <-w.indexer.jobQueue:
  780. if !ok {
  781. return // Channel closed, worker should exit
  782. }
  783. w.updateStatus(WorkerStatusBusy)
  784. result := w.processJob(job)
  785. // Send result
  786. select {
  787. case w.indexer.resultQueue <- result:
  788. case <-w.indexer.ctx.Done():
  789. return
  790. }
  791. // Execute callback if provided
  792. if job.Callback != nil {
  793. var err error
  794. if result.Failed > 0 {
  795. err = fmt.Errorf("indexing failed for %d documents", result.Failed)
  796. }
  797. job.Callback(err)
  798. }
  799. w.updateStatus(WorkerStatusIdle)
  800. case <-w.indexer.ctx.Done():
  801. return
  802. }
  803. }
  804. }
  805. func (w *indexWorker) processJob(job *IndexJob) *IndexResult {
  806. startTime := time.Now()
  807. result := &IndexResult{
  808. Processed: len(job.Documents),
  809. }
  810. // Group documents by mainLogPath then shard for grouped sharding
  811. groupShardDocs := make(map[string]map[int][]*Document)
  812. for _, doc := range job.Documents {
  813. if doc.ID == "" || doc.Fields == nil || doc.Fields.MainLogPath == "" {
  814. result.Failed++
  815. continue
  816. }
  817. mainLogPath := doc.Fields.MainLogPath
  818. _, shardID, err := w.indexer.shardManager.GetShardForDocument(mainLogPath, doc.ID)
  819. if err != nil {
  820. result.Failed++
  821. continue
  822. }
  823. if groupShardDocs[mainLogPath] == nil {
  824. groupShardDocs[mainLogPath] = make(map[int][]*Document)
  825. }
  826. groupShardDocs[mainLogPath][shardID] = append(groupShardDocs[mainLogPath][shardID], doc)
  827. }
  828. // Index documents per group/shard
  829. for _, shards := range groupShardDocs {
  830. for shardID, docs := range shards {
  831. if err := w.indexShardDocuments(shardID, docs); err != nil {
  832. result.Failed += len(docs)
  833. } else {
  834. result.Succeeded += len(docs)
  835. }
  836. }
  837. }
  838. result.Duration = time.Since(startTime)
  839. if result.Processed > 0 {
  840. result.ErrorRate = float64(result.Failed) / float64(result.Processed)
  841. result.Throughput = float64(result.Processed) / result.Duration.Seconds()
  842. }
  843. // Update worker stats
  844. w.statsMutex.Lock()
  845. w.stats.ProcessedJobs++
  846. w.stats.ProcessedDocs += int64(result.Processed)
  847. w.stats.ErrorCount += int64(result.Failed)
  848. w.stats.LastActive = time.Now().Unix()
  849. // Update average latency (simple moving average)
  850. if w.stats.AverageLatency == 0 {
  851. w.stats.AverageLatency = result.Duration
  852. } else {
  853. w.stats.AverageLatency = (w.stats.AverageLatency + result.Duration) / 2
  854. }
  855. w.statsMutex.Unlock()
  856. return result
  857. }
  858. func (w *indexWorker) indexShardDocuments(shardID int, docs []*Document) error {
  859. shard, err := w.indexer.shardManager.GetShardByID(shardID)
  860. if err != nil {
  861. return err
  862. }
  863. batch := shard.NewBatch()
  864. for _, doc := range docs {
  865. // Convert LogDocument to map for Bleve indexing
  866. docMap := w.logDocumentToMap(doc.Fields)
  867. batch.Index(doc.ID, docMap)
  868. }
  869. if err := shard.Batch(batch); err != nil {
  870. return fmt.Errorf("failed to index batch for shard %d: %w", shardID, err)
  871. }
  872. return nil
  873. }
  874. // logDocumentToMap converts LogDocument to map[string]interface{} for Bleve
  875. func (w *indexWorker) logDocumentToMap(doc *LogDocument) map[string]interface{} {
  876. docMap := map[string]interface{}{
  877. "timestamp": doc.Timestamp,
  878. "ip": doc.IP,
  879. "method": doc.Method,
  880. "path": doc.Path,
  881. "path_exact": doc.PathExact,
  882. "status": doc.Status,
  883. "bytes_sent": doc.BytesSent,
  884. "file_path": doc.FilePath,
  885. "main_log_path": doc.MainLogPath,
  886. "raw": doc.Raw,
  887. }
  888. // Add optional fields only if they have values
  889. if doc.RegionCode != "" {
  890. docMap["region_code"] = doc.RegionCode
  891. }
  892. if doc.Province != "" {
  893. docMap["province"] = doc.Province
  894. }
  895. if doc.City != "" {
  896. docMap["city"] = doc.City
  897. }
  898. if doc.Protocol != "" {
  899. docMap["protocol"] = doc.Protocol
  900. }
  901. if doc.Referer != "" {
  902. docMap["referer"] = doc.Referer
  903. }
  904. if doc.UserAgent != "" {
  905. docMap["user_agent"] = doc.UserAgent
  906. }
  907. if doc.Browser != "" {
  908. docMap["browser"] = doc.Browser
  909. }
  910. if doc.BrowserVer != "" {
  911. docMap["browser_version"] = doc.BrowserVer
  912. }
  913. if doc.OS != "" {
  914. docMap["os"] = doc.OS
  915. }
  916. if doc.OSVersion != "" {
  917. docMap["os_version"] = doc.OSVersion
  918. }
  919. if doc.DeviceType != "" {
  920. docMap["device_type"] = doc.DeviceType
  921. }
  922. if doc.RequestTime > 0 {
  923. docMap["request_time"] = doc.RequestTime
  924. }
  925. if doc.UpstreamTime != nil {
  926. docMap["upstream_time"] = *doc.UpstreamTime
  927. }
  928. return docMap
  929. }
  930. func (w *indexWorker) updateStatus(status string) {
  931. w.statsMutex.Lock()
  932. w.stats.Status = status
  933. w.statsMutex.Unlock()
  934. }
  935. // Background routines
  936. func (pi *ParallelIndexer) processResults() {
  937. defer pi.wg.Done()
  938. for {
  939. select {
  940. case result := <-pi.resultQueue:
  941. if result != nil {
  942. pi.metrics.RecordIndexOperation(
  943. result.Processed,
  944. result.Duration,
  945. result.Failed == 0,
  946. )
  947. }
  948. case <-pi.ctx.Done():
  949. return
  950. }
  951. }
  952. }
  953. func (pi *ParallelIndexer) optimizationRoutine() {
  954. defer pi.wg.Done()
  955. ticker := time.NewTicker(pi.config.OptimizeInterval)
  956. defer ticker.Stop()
  957. for {
  958. select {
  959. case <-ticker.C:
  960. if atomic.LoadInt32(&pi.optimizing) == 0 {
  961. go pi.Optimize() // Run in background to avoid blocking
  962. }
  963. case <-pi.ctx.Done():
  964. return
  965. }
  966. }
  967. }
  968. func (pi *ParallelIndexer) metricsRoutine() {
  969. defer pi.wg.Done()
  970. ticker := time.NewTicker(10 * time.Second)
  971. defer ticker.Stop()
  972. for {
  973. select {
  974. case <-ticker.C:
  975. pi.updateMetrics()
  976. case <-pi.ctx.Done():
  977. return
  978. }
  979. }
  980. }
  981. func (pi *ParallelIndexer) updateMetrics() {
  982. pi.statsMutex.Lock()
  983. defer pi.statsMutex.Unlock()
  984. // Update indexing rate based on recent activity
  985. metrics := pi.metrics.GetMetrics()
  986. pi.stats.IndexingRate = metrics.IndexingRate
  987. }
  988. // IndexLogGroupWithProgress indexes a log group with progress tracking
  989. func (pi *ParallelIndexer) IndexLogGroupWithProgress(basePath string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  990. if !pi.IsHealthy() {
  991. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  992. }
  993. // Create progress tracker if config is provided
  994. var progressTracker *ProgressTracker
  995. if progressConfig != nil {
  996. progressTracker = NewProgressTracker(basePath, progressConfig)
  997. }
  998. // Find all files belonging to this log group by globbing
  999. globPath := basePath + "*"
  1000. matches, err := filepath.Glob(globPath)
  1001. if err != nil {
  1002. if progressTracker != nil {
  1003. progressTracker.Cancel(fmt.Sprintf("glob failed: %v", err))
  1004. }
  1005. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  1006. }
  1007. // filepath.Glob might not match the base file itself if it has no extension,
  1008. // so we check for it explicitly and add it to the list.
  1009. // Validate log path before accessing it
  1010. if utils.IsValidLogPath(basePath) {
  1011. info, err := os.Stat(basePath)
  1012. if err == nil && info.Mode().IsRegular() {
  1013. matches = append(matches, basePath)
  1014. }
  1015. }
  1016. // Deduplicate file list
  1017. seen := make(map[string]struct{})
  1018. uniqueFiles := make([]string, 0)
  1019. for _, match := range matches {
  1020. if _, ok := seen[match]; !ok {
  1021. // Further check if it's a file, not a directory. Glob can match dirs.
  1022. // Validate log path before accessing it
  1023. if utils.IsValidLogPath(match) {
  1024. info, err := os.Stat(match)
  1025. if err == nil && info.Mode().IsRegular() {
  1026. seen[match] = struct{}{}
  1027. uniqueFiles = append(uniqueFiles, match)
  1028. }
  1029. }
  1030. }
  1031. }
  1032. if len(uniqueFiles) == 0 {
  1033. logger.Warnf("No actual log file found for group: %s", basePath)
  1034. if progressTracker != nil {
  1035. progressTracker.Cancel("no files found")
  1036. }
  1037. return nil, nil, nil, nil
  1038. }
  1039. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  1040. // Set up progress tracking for all files
  1041. if progressTracker != nil {
  1042. for _, filePath := range uniqueFiles {
  1043. isCompressed := IsCompressedFile(filePath)
  1044. progressTracker.AddFile(filePath, isCompressed)
  1045. // Get file size and estimate lines
  1046. if stat, err := os.Stat(filePath); err == nil {
  1047. progressTracker.SetFileSize(filePath, stat.Size())
  1048. // Estimate lines for progress calculation
  1049. if estimatedLines, err := EstimateFileLines(context.Background(), filePath, stat.Size(), isCompressed); err == nil {
  1050. progressTracker.SetFileEstimate(filePath, estimatedLines)
  1051. }
  1052. }
  1053. }
  1054. }
  1055. docsCountMap := make(map[string]uint64)
  1056. var docsCountMu sync.RWMutex
  1057. var overallMinTime, overallMaxTime *time.Time
  1058. var timeMu sync.Mutex
  1059. // Process files in parallel with controlled concurrency
  1060. var fileWg sync.WaitGroup
  1061. // Use FileGroupConcurrency config if set, otherwise fallback to WorkerCount
  1062. maxConcurrency := pi.config.FileGroupConcurrency
  1063. if maxConcurrency <= 0 {
  1064. maxConcurrency = pi.config.WorkerCount
  1065. if maxConcurrency <= 0 {
  1066. maxConcurrency = 4 // Fallback default
  1067. }
  1068. }
  1069. fileSemaphore := make(chan struct{}, maxConcurrency)
  1070. logger.Infof("Processing %d files in log group %s with concurrency=%d", len(uniqueFiles), basePath, maxConcurrency)
  1071. for _, filePath := range uniqueFiles {
  1072. fileWg.Add(1)
  1073. go func(fp string) {
  1074. defer fileWg.Done()
  1075. // Acquire semaphore for controlled concurrency
  1076. fileSemaphore <- struct{}{}
  1077. defer func() { <-fileSemaphore }()
  1078. if progressTracker != nil {
  1079. progressTracker.StartFile(fp)
  1080. }
  1081. docsIndexed, minTime, maxTime, err := pi.indexSingleFileWithProgress(fp, progressTracker)
  1082. if err != nil {
  1083. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", fp, basePath, err)
  1084. if progressTracker != nil {
  1085. progressTracker.FailFile(fp, err.Error())
  1086. }
  1087. return // Skip this file
  1088. }
  1089. // Thread-safe update of docsCountMap
  1090. docsCountMu.Lock()
  1091. docsCountMap[fp] = docsIndexed
  1092. docsCountMu.Unlock()
  1093. if progressTracker != nil {
  1094. progressTracker.CompleteFile(fp, int64(docsIndexed))
  1095. }
  1096. // Thread-safe update of time ranges
  1097. timeMu.Lock()
  1098. if minTime != nil {
  1099. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  1100. overallMinTime = minTime
  1101. }
  1102. }
  1103. if maxTime != nil {
  1104. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  1105. overallMaxTime = maxTime
  1106. }
  1107. }
  1108. timeMu.Unlock()
  1109. }(filePath)
  1110. }
  1111. // Wait for all files to complete
  1112. fileWg.Wait()
  1113. return docsCountMap, overallMinTime, overallMaxTime, nil
  1114. }
  1115. // indexSingleFileWithProgress indexes a single file with progress updates
  1116. // Now uses the optimized implementation with full progress tracking integration
  1117. func (pi *ParallelIndexer) indexSingleFileWithProgress(filePath string, progressTracker *ProgressTracker) (uint64, *time.Time, *time.Time, error) {
  1118. // Delegate to optimized implementation with progress tracking
  1119. return pi.IndexSingleFileWithProgress(filePath, progressTracker)
  1120. }
  1121. // sumDocCounts returns the total number of documents across all files
  1122. func sumDocCounts(docsCountMap map[string]uint64) uint64 {
  1123. var total uint64
  1124. for _, count := range docsCountMap {
  1125. total += count
  1126. }
  1127. return total
  1128. }
  1129. // CountDocsByMainLogPath returns the exact number of documents indexed for a given log group (main log path)
  1130. // by querying all shards and summing results.
  1131. func (pi *ParallelIndexer) CountDocsByMainLogPath(basePath string) (uint64, error) {
  1132. if !pi.IsHealthy() {
  1133. return 0, fmt.Errorf("indexer not healthy")
  1134. }
  1135. var total uint64
  1136. var errs []error
  1137. // Build term query on main_log_path
  1138. q := bleve.NewTermQuery(basePath)
  1139. q.SetField("main_log_path")
  1140. shards := pi.shardManager.GetAllShards()
  1141. for i, shard := range shards {
  1142. if shard == nil {
  1143. continue
  1144. }
  1145. req := bleve.NewSearchRequest(q)
  1146. // We only need counts
  1147. req.Size = 0
  1148. res, err := shard.Search(req)
  1149. if err != nil {
  1150. errs = append(errs, fmt.Errorf("shard %d search failed: %w", i, err))
  1151. continue
  1152. }
  1153. total += uint64(res.Total)
  1154. }
  1155. if len(errs) > 0 {
  1156. return total, fmt.Errorf("%d shard errors (partial count=%d), e.g. %v", len(errs), total, errs[0])
  1157. }
  1158. return total, nil
  1159. }
  1160. // CountDocsByFilePath returns the exact number of documents indexed for a specific physical log file path
  1161. // by querying all shards and summing results.
  1162. func (pi *ParallelIndexer) CountDocsByFilePath(filePath string) (uint64, error) {
  1163. if !pi.IsHealthy() {
  1164. return 0, fmt.Errorf("indexer not healthy")
  1165. }
  1166. var total uint64
  1167. var errs []error
  1168. // Build term query on file_path
  1169. q := bleve.NewTermQuery(filePath)
  1170. q.SetField("file_path")
  1171. shards := pi.shardManager.GetAllShards()
  1172. for i, shard := range shards {
  1173. if shard == nil {
  1174. continue
  1175. }
  1176. req := bleve.NewSearchRequest(q)
  1177. // We only need counts
  1178. req.Size = 0
  1179. res, err := shard.Search(req)
  1180. if err != nil {
  1181. errs = append(errs, fmt.Errorf("shard %d search failed: %w", i, err))
  1182. continue
  1183. }
  1184. total += uint64(res.Total)
  1185. }
  1186. if len(errs) > 0 {
  1187. return total, fmt.Errorf("%d shard errors (partial count=%d), e.g. %v", len(errs), total, errs[0])
  1188. }
  1189. return total, nil
  1190. }