parallel_indexer.go 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. package indexer
  2. import (
  3. "bufio"
  4. "compress/gzip"
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "sync/atomic"
  14. "time"
  15. "github.com/blevesearch/bleve/v2"
  16. "github.com/uozi-tech/cosy/logger"
  17. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  18. )
  19. // ParallelIndexer provides high-performance parallel indexing with sharding
  20. type ParallelIndexer struct {
  21. config *Config
  22. shardManager ShardManager
  23. metrics MetricsCollector
  24. // Worker management
  25. workers []*indexWorker
  26. jobQueue chan *IndexJob
  27. resultQueue chan *IndexResult
  28. // State management
  29. ctx context.Context
  30. cancel context.CancelFunc
  31. wg sync.WaitGroup
  32. running int32
  33. // Cleanup control
  34. stopOnce sync.Once
  35. channelsClosed int32
  36. // Statistics
  37. stats *IndexStats
  38. statsMutex sync.RWMutex
  39. // Optimization
  40. lastOptimized int64
  41. optimizing int32
  42. }
  43. // indexWorker represents a single indexing worker
  44. type indexWorker struct {
  45. id int
  46. indexer *ParallelIndexer
  47. stats *WorkerStats
  48. statsMutex sync.RWMutex
  49. }
  50. // NewParallelIndexer creates a new parallel indexer
  51. func NewParallelIndexer(config *Config, shardManager ShardManager) *ParallelIndexer {
  52. if config == nil {
  53. config = DefaultIndexerConfig()
  54. }
  55. ctx, cancel := context.WithCancel(context.Background())
  56. indexer := &ParallelIndexer{
  57. config: config,
  58. shardManager: shardManager,
  59. metrics: NewDefaultMetricsCollector(),
  60. jobQueue: make(chan *IndexJob, config.MaxQueueSize),
  61. resultQueue: make(chan *IndexResult, config.WorkerCount),
  62. ctx: ctx,
  63. cancel: cancel,
  64. stats: &IndexStats{
  65. WorkerStats: make([]*WorkerStats, config.WorkerCount),
  66. },
  67. }
  68. // Initialize workers
  69. indexer.workers = make([]*indexWorker, config.WorkerCount)
  70. for i := 0; i < config.WorkerCount; i++ {
  71. indexer.workers[i] = &indexWorker{
  72. id: i,
  73. indexer: indexer,
  74. stats: &WorkerStats{
  75. ID: i,
  76. Status: WorkerStatusIdle,
  77. },
  78. }
  79. indexer.stats.WorkerStats[i] = indexer.workers[i].stats
  80. }
  81. return indexer
  82. }
  83. // Start begins the indexer operation
  84. func (pi *ParallelIndexer) Start(ctx context.Context) error {
  85. if !atomic.CompareAndSwapInt32(&pi.running, 0, 1) {
  86. return fmt.Errorf("indexer not started")
  87. }
  88. // Initialize shard manager
  89. if err := pi.shardManager.Initialize(); err != nil {
  90. atomic.StoreInt32(&pi.running, 0)
  91. return fmt.Errorf("failed to initialize shard manager: %w", err)
  92. }
  93. // Start workers
  94. for _, worker := range pi.workers {
  95. pi.wg.Add(1)
  96. go worker.run()
  97. }
  98. // Start result processor
  99. pi.wg.Add(1)
  100. go pi.processResults()
  101. // Start optimization routine if enabled
  102. if pi.config.OptimizeInterval > 0 {
  103. pi.wg.Add(1)
  104. go pi.optimizationRoutine()
  105. }
  106. // Start metrics collection if enabled
  107. if pi.config.EnableMetrics {
  108. pi.wg.Add(1)
  109. go pi.metricsRoutine()
  110. }
  111. return nil
  112. }
  113. // Stop gracefully stops the indexer
  114. func (pi *ParallelIndexer) Stop() error {
  115. var stopErr error
  116. pi.stopOnce.Do(func() {
  117. // Set running to 0
  118. if !atomic.CompareAndSwapInt32(&pi.running, 1, 0) {
  119. logger.Warnf("[ParallelIndexer] Stop called but indexer already stopped")
  120. stopErr = fmt.Errorf("indexer already stopped")
  121. return
  122. }
  123. // Cancel context to stop all routines
  124. pi.cancel()
  125. // Close channels safely if they haven't been closed yet
  126. if atomic.CompareAndSwapInt32(&pi.channelsClosed, 0, 1) {
  127. // Close job queue to stop accepting new jobs
  128. close(pi.jobQueue)
  129. // Wait for all workers to finish
  130. pi.wg.Wait()
  131. // Close result queue
  132. close(pi.resultQueue)
  133. } else {
  134. // If channels are already closed, just wait for workers
  135. pi.wg.Wait()
  136. }
  137. // Skip flush during stop - shards may already be closed by searcher
  138. // FlushAll should be called before Stop() if needed
  139. // Close the shard manager - this will close all shards
  140. // But we don't do this here because the shards might be in use by the searcher
  141. // The shards will be closed when the searcher is stopped
  142. })
  143. return stopErr
  144. }
  145. // IndexDocument indexes a single document
  146. func (pi *ParallelIndexer) IndexDocument(ctx context.Context, doc *Document) error {
  147. return pi.IndexDocuments(ctx, []*Document{doc})
  148. }
  149. // IndexDocuments indexes multiple documents
  150. func (pi *ParallelIndexer) IndexDocuments(ctx context.Context, docs []*Document) error {
  151. if !pi.IsHealthy() {
  152. return fmt.Errorf("indexer not started")
  153. }
  154. if len(docs) == 0 {
  155. return nil
  156. }
  157. // Create job
  158. job := &IndexJob{
  159. Documents: docs,
  160. Priority: PriorityNormal,
  161. }
  162. // Submit job and wait for completion
  163. done := make(chan error, 1)
  164. job.Callback = func(err error) {
  165. done <- err
  166. }
  167. select {
  168. case pi.jobQueue <- job:
  169. select {
  170. case err := <-done:
  171. return err
  172. case <-ctx.Done():
  173. return ctx.Err()
  174. }
  175. case <-ctx.Done():
  176. return ctx.Err()
  177. case <-pi.ctx.Done():
  178. return fmt.Errorf("indexer stopped")
  179. }
  180. }
  181. // IndexDocumentAsync indexes a document asynchronously
  182. func (pi *ParallelIndexer) IndexDocumentAsync(doc *Document, callback func(error)) {
  183. pi.IndexDocumentsAsync([]*Document{doc}, callback)
  184. }
  185. // IndexDocumentsAsync indexes multiple documents asynchronously
  186. func (pi *ParallelIndexer) IndexDocumentsAsync(docs []*Document, callback func(error)) {
  187. if !pi.IsHealthy() {
  188. if callback != nil {
  189. callback(fmt.Errorf("indexer not started"))
  190. }
  191. return
  192. }
  193. if len(docs) == 0 {
  194. if callback != nil {
  195. callback(nil)
  196. }
  197. return
  198. }
  199. job := &IndexJob{
  200. Documents: docs,
  201. Priority: PriorityNormal,
  202. Callback: callback,
  203. }
  204. select {
  205. case pi.jobQueue <- job:
  206. // Job queued successfully
  207. case <-pi.ctx.Done():
  208. if callback != nil {
  209. callback(fmt.Errorf("indexer stopped"))
  210. }
  211. default:
  212. // Queue is full
  213. if callback != nil {
  214. callback(fmt.Errorf("queue is full"))
  215. }
  216. }
  217. }
  218. // StartBatch returns a new batch writer
  219. func (pi *ParallelIndexer) StartBatch() BatchWriterInterface {
  220. return NewBatchWriter(pi, pi.config.BatchSize)
  221. }
  222. // FlushAll flushes all pending operations
  223. func (pi *ParallelIndexer) FlushAll() error {
  224. // Check if indexer is still running
  225. if atomic.LoadInt32(&pi.running) != 1 {
  226. return fmt.Errorf("indexer not running")
  227. }
  228. // Get all shards and flush them
  229. shards := pi.shardManager.GetAllShards()
  230. var errs []error
  231. for i, shard := range shards {
  232. if shard == nil {
  233. continue
  234. }
  235. // Force flush by creating and immediately deleting a temporary document
  236. batch := shard.NewBatch()
  237. // Use efficient string building instead of fmt.Sprintf
  238. tempIDBuf := make([]byte, 0, 64)
  239. tempIDBuf = append(tempIDBuf, "_flush_temp_"...)
  240. tempIDBuf = utils.AppendInt(tempIDBuf, i)
  241. tempIDBuf = append(tempIDBuf, '_')
  242. tempIDBuf = utils.AppendInt(tempIDBuf, int(time.Now().UnixNano()))
  243. tempID := utils.BytesToStringUnsafe(tempIDBuf)
  244. batch.Index(tempID, map[string]interface{}{"_temp": true})
  245. if err := shard.Batch(batch); err != nil {
  246. errs = append(errs, fmt.Errorf("failed to flush shard %d: %w", i, err))
  247. continue
  248. }
  249. // Delete the temporary document
  250. shard.Delete(tempID)
  251. }
  252. if len(errs) > 0 {
  253. return fmt.Errorf("flush errors: %v", errs)
  254. }
  255. return nil
  256. }
  257. // Optimize triggers optimization of all shards
  258. func (pi *ParallelIndexer) Optimize() error {
  259. if !atomic.CompareAndSwapInt32(&pi.optimizing, 0, 1) {
  260. return fmt.Errorf("optimization already in progress")
  261. }
  262. defer atomic.StoreInt32(&pi.optimizing, 0)
  263. startTime := time.Now()
  264. stats := pi.shardManager.GetShardStats()
  265. var errs []error
  266. for _, stat := range stats {
  267. if err := pi.shardManager.OptimizeShard(stat.ID); err != nil {
  268. errs = append(errs, fmt.Errorf("failed to optimize shard %d: %w", stat.ID, err))
  269. }
  270. }
  271. // Update optimization stats
  272. pi.statsMutex.Lock()
  273. if pi.stats.OptimizationStats == nil {
  274. pi.stats.OptimizationStats = &OptimizationStats{}
  275. }
  276. pi.stats.OptimizationStats.LastRun = time.Now().Unix()
  277. pi.stats.OptimizationStats.Duration = time.Since(startTime)
  278. pi.stats.OptimizationStats.Success = len(errs) == 0
  279. pi.stats.LastOptimized = time.Now().Unix()
  280. pi.statsMutex.Unlock()
  281. atomic.StoreInt64(&pi.lastOptimized, time.Now().Unix())
  282. if len(errs) > 0 {
  283. return fmt.Errorf("optimization errors: %v", errs)
  284. }
  285. // Record optimization metrics
  286. pi.metrics.RecordOptimization(time.Since(startTime), len(errs) == 0)
  287. return nil
  288. }
  289. // IndexLogFile reads and indexes a single log file
  290. func (pi *ParallelIndexer) IndexLogFile(filePath string) error {
  291. if !pi.IsHealthy() {
  292. return fmt.Errorf("indexer not healthy")
  293. }
  294. file, err := os.Open(filePath)
  295. if err != nil {
  296. return fmt.Errorf("failed to open log file %s: %w", filePath, err)
  297. }
  298. defer file.Close()
  299. // Use a batch writer for efficient indexing
  300. batch := pi.StartBatch()
  301. scanner := bufio.NewScanner(file)
  302. docCount := 0
  303. for scanner.Scan() {
  304. line := scanner.Text()
  305. if line == "" {
  306. continue
  307. }
  308. // In a real implementation, parse the log line into a structured format
  309. // For now, we create a simple document
  310. logDoc, err := ParseLogLine(line) // Assuming a parser function exists
  311. if err != nil {
  312. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  313. continue
  314. }
  315. logDoc.FilePath = filePath
  316. // Use efficient string building for document ID
  317. docIDBuf := make([]byte, 0, len(filePath)+16)
  318. docIDBuf = append(docIDBuf, filePath...)
  319. docIDBuf = append(docIDBuf, '-')
  320. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  321. doc := &Document{
  322. ID: utils.BytesToStringUnsafe(docIDBuf),
  323. Fields: logDoc,
  324. }
  325. if err := batch.Add(doc); err != nil {
  326. // This indicates an auto-flush occurred and failed.
  327. // Log the error and stop processing this file to avoid further issues.
  328. return fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  329. }
  330. docCount++
  331. }
  332. if err := scanner.Err(); err != nil {
  333. return fmt.Errorf("error reading log file %s: %w", filePath, err)
  334. }
  335. if _, err := batch.Flush(); err != nil {
  336. return fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  337. }
  338. return nil
  339. }
  340. // GetStats returns current indexer statistics
  341. func (pi *ParallelIndexer) GetStats() *IndexStats {
  342. pi.statsMutex.RLock()
  343. defer pi.statsMutex.RUnlock()
  344. // Update shard stats
  345. shardStats := pi.shardManager.GetShardStats()
  346. pi.stats.Shards = shardStats
  347. pi.stats.ShardCount = len(shardStats)
  348. var totalDocs uint64
  349. var totalSize int64
  350. for _, shard := range shardStats {
  351. totalDocs += shard.DocumentCount
  352. totalSize += shard.Size
  353. }
  354. pi.stats.TotalDocuments = totalDocs
  355. pi.stats.TotalSize = totalSize
  356. pi.stats.QueueSize = len(pi.jobQueue)
  357. // Calculate memory usage
  358. var memStats runtime.MemStats
  359. runtime.ReadMemStats(&memStats)
  360. pi.stats.MemoryUsage = int64(memStats.Alloc)
  361. // Copy stats to avoid race conditions
  362. statsCopy := *pi.stats
  363. return &statsCopy
  364. }
  365. // IsRunning returns whether the indexer is currently running
  366. func (pi *ParallelIndexer) IsRunning() bool {
  367. return atomic.LoadInt32(&pi.running) != 0
  368. }
  369. // GetShardInfo returns information about a specific shard
  370. func (pi *ParallelIndexer) GetShardInfo(shardID int) (*ShardInfo, error) {
  371. shardStats := pi.shardManager.GetShardStats()
  372. for _, stat := range shardStats {
  373. if stat.ID == shardID {
  374. return stat, nil
  375. }
  376. }
  377. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, shardID)
  378. }
  379. // IsHealthy checks if the indexer is running and healthy
  380. func (pi *ParallelIndexer) IsHealthy() bool {
  381. if atomic.LoadInt32(&pi.running) != 1 {
  382. return false
  383. }
  384. // Check shard manager health
  385. return pi.shardManager.HealthCheck() == nil
  386. }
  387. // GetConfig returns the current configuration
  388. func (pi *ParallelIndexer) GetConfig() *Config {
  389. return pi.config
  390. }
  391. // GetAllShards returns all managed shards
  392. func (pi *ParallelIndexer) GetAllShards() []bleve.Index {
  393. return pi.shardManager.GetAllShards()
  394. }
  395. // DeleteIndexByLogGroup deletes all index entries for a specific log group (base path and its rotated files)
  396. func (pi *ParallelIndexer) DeleteIndexByLogGroup(basePath string, logFileManager interface{}) error {
  397. if !pi.IsHealthy() {
  398. return fmt.Errorf("indexer not healthy")
  399. }
  400. // Get all file paths for this log group from the database
  401. if logFileManager == nil {
  402. return fmt.Errorf("log file manager is required")
  403. }
  404. lfm, ok := logFileManager.(interface {
  405. GetFilePathsForGroup(string) ([]string, error)
  406. })
  407. if !ok {
  408. return fmt.Errorf("log file manager does not support GetFilePathsForGroup")
  409. }
  410. filesToDelete, err := lfm.GetFilePathsForGroup(basePath)
  411. if err != nil {
  412. return fmt.Errorf("failed to get file paths for log group %s: %w", basePath, err)
  413. }
  414. logger.Infof("Deleting index entries for log group %s, files: %v", basePath, filesToDelete)
  415. // Delete documents from all shards for these files
  416. shards := pi.shardManager.GetAllShards()
  417. var deleteErrors []error
  418. for _, shard := range shards {
  419. // Search for documents with matching file_path
  420. for _, filePath := range filesToDelete {
  421. query := bleve.NewTermQuery(filePath)
  422. query.SetField("file_path")
  423. searchRequest := bleve.NewSearchRequest(query)
  424. searchRequest.Size = 1000 // Process in batches
  425. searchRequest.Fields = []string{"file_path"}
  426. for {
  427. searchResult, err := shard.Search(searchRequest)
  428. if err != nil {
  429. deleteErrors = append(deleteErrors, fmt.Errorf("failed to search for documents in file %s: %w", filePath, err))
  430. break
  431. }
  432. if len(searchResult.Hits) == 0 {
  433. break // No more documents to delete
  434. }
  435. // Delete documents in batch
  436. batch := shard.NewBatch()
  437. for _, hit := range searchResult.Hits {
  438. batch.Delete(hit.ID)
  439. }
  440. if err := shard.Batch(batch); err != nil {
  441. deleteErrors = append(deleteErrors, fmt.Errorf("failed to delete batch for file %s: %w", filePath, err))
  442. }
  443. // If we got fewer results than requested, we're done
  444. if len(searchResult.Hits) < searchRequest.Size {
  445. break
  446. }
  447. // Continue from where we left off
  448. searchRequest.From += searchRequest.Size
  449. }
  450. }
  451. }
  452. if len(deleteErrors) > 0 {
  453. return fmt.Errorf("encountered %d errors during deletion: %v", len(deleteErrors), deleteErrors[0])
  454. }
  455. logger.Infof("Successfully deleted index entries for log group: %s", basePath)
  456. return nil
  457. }
  458. // DestroyAllIndexes closes and deletes all index data from disk.
  459. func (pi *ParallelIndexer) DestroyAllIndexes(parentCtx context.Context) error {
  460. // Stop all background routines before deleting files
  461. pi.cancel()
  462. pi.wg.Wait()
  463. // Safely close channels if they haven't been closed yet
  464. if atomic.CompareAndSwapInt32(&pi.channelsClosed, 0, 1) {
  465. close(pi.jobQueue)
  466. close(pi.resultQueue)
  467. }
  468. atomic.StoreInt32(&pi.running, 0) // Mark as not running
  469. var destructionErr error
  470. if manager, ok := pi.shardManager.(*DefaultShardManager); ok {
  471. destructionErr = manager.Destroy()
  472. } else {
  473. destructionErr = fmt.Errorf("shard manager does not support destruction")
  474. }
  475. // Re-initialize context and channels for a potential restart using parent context
  476. pi.ctx, pi.cancel = context.WithCancel(parentCtx)
  477. pi.jobQueue = make(chan *IndexJob, pi.config.MaxQueueSize)
  478. pi.resultQueue = make(chan *IndexResult, pi.config.WorkerCount)
  479. atomic.StoreInt32(&pi.channelsClosed, 0) // Reset the channel closed flag
  480. return destructionErr
  481. }
  482. // IndexLogGroup finds all files related to a base log path (e.g., rotated logs) and indexes them.
  483. // It returns a map of [filePath -> docCount], and the min/max timestamps found.
  484. func (pi *ParallelIndexer) IndexLogGroup(basePath string) (map[string]uint64, *time.Time, *time.Time, error) {
  485. if !pi.IsHealthy() {
  486. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  487. }
  488. // Find all files belonging to this log group by globbing
  489. globPath := basePath + "*"
  490. matches, err := filepath.Glob(globPath)
  491. if err != nil {
  492. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  493. }
  494. // filepath.Glob might not match the base file itself if it has no extension,
  495. // so we check for it explicitly and add it to the list.
  496. info, err := os.Stat(basePath)
  497. if err == nil && info.Mode().IsRegular() {
  498. matches = append(matches, basePath)
  499. }
  500. // Deduplicate file list
  501. seen := make(map[string]struct{})
  502. uniqueFiles := make([]string, 0)
  503. for _, match := range matches {
  504. if _, ok := seen[match]; !ok {
  505. // Further check if it's a file, not a directory. Glob can match dirs.
  506. info, err := os.Stat(match)
  507. if err == nil && info.Mode().IsRegular() {
  508. seen[match] = struct{}{}
  509. uniqueFiles = append(uniqueFiles, match)
  510. }
  511. }
  512. }
  513. if len(uniqueFiles) == 0 {
  514. logger.Warnf("No actual log file found for group: %s", basePath)
  515. return nil, nil, nil, nil
  516. }
  517. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  518. docsCountMap := make(map[string]uint64)
  519. var overallMinTime, overallMaxTime *time.Time
  520. for _, filePath := range uniqueFiles {
  521. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  522. if err != nil {
  523. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  524. continue // Continue with the next file
  525. }
  526. docsCountMap[filePath] = docsIndexed
  527. if minTime != nil {
  528. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  529. overallMinTime = minTime
  530. }
  531. }
  532. if maxTime != nil {
  533. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  534. overallMaxTime = maxTime
  535. }
  536. }
  537. }
  538. return docsCountMap, overallMinTime, overallMaxTime, nil
  539. }
  540. // indexSingleFile contains the logic to process one physical log file.
  541. // It returns the number of documents indexed from the file, and the min/max timestamps.
  542. func (pi *ParallelIndexer) indexSingleFile(filePath string) (uint64, *time.Time, *time.Time, error) {
  543. file, err := os.Open(filePath)
  544. if err != nil {
  545. return 0, nil, nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
  546. }
  547. defer file.Close()
  548. var reader io.Reader = file
  549. // Handle gzipped files
  550. if strings.HasSuffix(filePath, ".gz") {
  551. gz, err := gzip.NewReader(file)
  552. if err != nil {
  553. return 0, nil, nil, fmt.Errorf("failed to create gzip reader for %s: %w", filePath, err)
  554. }
  555. defer gz.Close()
  556. reader = gz
  557. }
  558. logger.Infof("Starting to process file: %s", filePath)
  559. batch := pi.StartBatch()
  560. scanner := bufio.NewScanner(reader)
  561. docCount := 0
  562. var minTime, maxTime *time.Time
  563. for scanner.Scan() {
  564. line := scanner.Text()
  565. if line == "" {
  566. continue
  567. }
  568. logDoc, err := ParseLogLine(line)
  569. if err != nil {
  570. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  571. continue
  572. }
  573. logDoc.FilePath = filePath
  574. // Track min/max timestamps
  575. ts := time.Unix(logDoc.Timestamp, 0)
  576. if minTime == nil || ts.Before(*minTime) {
  577. minTime = &ts
  578. }
  579. if maxTime == nil || ts.After(*maxTime) {
  580. maxTime = &ts
  581. }
  582. // Use efficient string building for document ID
  583. docIDBuf := make([]byte, 0, len(filePath)+16)
  584. docIDBuf = append(docIDBuf, filePath...)
  585. docIDBuf = append(docIDBuf, '-')
  586. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  587. doc := &Document{
  588. ID: utils.BytesToStringUnsafe(docIDBuf),
  589. Fields: logDoc,
  590. }
  591. if err := batch.Add(doc); err != nil {
  592. // This indicates an auto-flush occurred and failed.
  593. // Log the error and stop processing this file to avoid further issues.
  594. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  595. }
  596. docCount++
  597. }
  598. if err := scanner.Err(); err != nil {
  599. return uint64(docCount), minTime, maxTime, fmt.Errorf("error reading log file %s: %w", filePath, err)
  600. }
  601. logger.Infof("Finished processing file: %s. Total lines processed: %d", filePath, docCount)
  602. if docCount > 0 {
  603. if _, err := batch.Flush(); err != nil {
  604. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  605. }
  606. }
  607. return uint64(docCount), minTime, maxTime, nil
  608. }
  609. // UpdateConfig updates the indexer configuration
  610. func (pi *ParallelIndexer) UpdateConfig(config *Config) error {
  611. // Only allow updating certain configuration parameters while running
  612. pi.config.BatchSize = config.BatchSize
  613. pi.config.FlushInterval = config.FlushInterval
  614. pi.config.EnableMetrics = config.EnableMetrics
  615. return nil
  616. }
  617. // Worker implementation
  618. func (w *indexWorker) run() {
  619. defer w.indexer.wg.Done()
  620. w.updateStatus(WorkerStatusIdle)
  621. for {
  622. select {
  623. case job, ok := <-w.indexer.jobQueue:
  624. if !ok {
  625. return // Channel closed, worker should exit
  626. }
  627. w.updateStatus(WorkerStatusBusy)
  628. result := w.processJob(job)
  629. // Send result
  630. select {
  631. case w.indexer.resultQueue <- result:
  632. case <-w.indexer.ctx.Done():
  633. return
  634. }
  635. // Execute callback if provided
  636. if job.Callback != nil {
  637. var err error
  638. if result.Failed > 0 {
  639. err = fmt.Errorf("indexing failed for %d documents", result.Failed)
  640. }
  641. job.Callback(err)
  642. }
  643. w.updateStatus(WorkerStatusIdle)
  644. case <-w.indexer.ctx.Done():
  645. return
  646. }
  647. }
  648. }
  649. func (w *indexWorker) processJob(job *IndexJob) *IndexResult {
  650. startTime := time.Now()
  651. result := &IndexResult{
  652. Processed: len(job.Documents),
  653. }
  654. // Group documents by shard
  655. shardDocs := make(map[int][]*Document)
  656. for _, doc := range job.Documents {
  657. if doc.ID == "" {
  658. result.Failed++
  659. continue
  660. }
  661. _, shardID, err := w.indexer.shardManager.GetShard(doc.ID)
  662. if err != nil {
  663. result.Failed++
  664. continue
  665. }
  666. shardDocs[shardID] = append(shardDocs[shardID], doc)
  667. }
  668. // Index documents per shard
  669. for shardID, docs := range shardDocs {
  670. if err := w.indexShardDocuments(shardID, docs); err != nil {
  671. result.Failed += len(docs)
  672. } else {
  673. result.Succeeded += len(docs)
  674. }
  675. }
  676. result.Duration = time.Since(startTime)
  677. if result.Processed > 0 {
  678. result.ErrorRate = float64(result.Failed) / float64(result.Processed)
  679. result.Throughput = float64(result.Processed) / result.Duration.Seconds()
  680. }
  681. // Update worker stats
  682. w.statsMutex.Lock()
  683. w.stats.ProcessedJobs++
  684. w.stats.ProcessedDocs += int64(result.Processed)
  685. w.stats.ErrorCount += int64(result.Failed)
  686. w.stats.LastActive = time.Now().Unix()
  687. // Update average latency (simple moving average)
  688. if w.stats.AverageLatency == 0 {
  689. w.stats.AverageLatency = result.Duration
  690. } else {
  691. w.stats.AverageLatency = (w.stats.AverageLatency + result.Duration) / 2
  692. }
  693. w.statsMutex.Unlock()
  694. return result
  695. }
  696. func (w *indexWorker) indexShardDocuments(shardID int, docs []*Document) error {
  697. shard, err := w.indexer.shardManager.GetShardByID(shardID)
  698. if err != nil {
  699. return err
  700. }
  701. batch := shard.NewBatch()
  702. for _, doc := range docs {
  703. // Convert LogDocument to map for Bleve indexing
  704. docMap := w.logDocumentToMap(doc.Fields)
  705. batch.Index(doc.ID, docMap)
  706. }
  707. if err := shard.Batch(batch); err != nil {
  708. return fmt.Errorf("failed to index batch for shard %d: %w", shardID, err)
  709. }
  710. return nil
  711. }
  712. // logDocumentToMap converts LogDocument to map[string]interface{} for Bleve
  713. func (w *indexWorker) logDocumentToMap(doc *LogDocument) map[string]interface{} {
  714. docMap := map[string]interface{}{
  715. "timestamp": doc.Timestamp,
  716. "ip": doc.IP,
  717. "method": doc.Method,
  718. "path": doc.Path,
  719. "path_exact": doc.PathExact,
  720. "status": doc.Status,
  721. "bytes_sent": doc.BytesSent,
  722. "file_path": doc.FilePath,
  723. "raw": doc.Raw,
  724. }
  725. // Add optional fields only if they have values
  726. if doc.RegionCode != "" {
  727. docMap["region_code"] = doc.RegionCode
  728. }
  729. if doc.Province != "" {
  730. docMap["province"] = doc.Province
  731. }
  732. if doc.City != "" {
  733. docMap["city"] = doc.City
  734. }
  735. if doc.Protocol != "" {
  736. docMap["protocol"] = doc.Protocol
  737. }
  738. if doc.Referer != "" {
  739. docMap["referer"] = doc.Referer
  740. }
  741. if doc.UserAgent != "" {
  742. docMap["user_agent"] = doc.UserAgent
  743. }
  744. if doc.Browser != "" {
  745. docMap["browser"] = doc.Browser
  746. }
  747. if doc.BrowserVer != "" {
  748. docMap["browser_version"] = doc.BrowserVer
  749. }
  750. if doc.OS != "" {
  751. docMap["os"] = doc.OS
  752. }
  753. if doc.OSVersion != "" {
  754. docMap["os_version"] = doc.OSVersion
  755. }
  756. if doc.DeviceType != "" {
  757. docMap["device_type"] = doc.DeviceType
  758. }
  759. if doc.RequestTime > 0 {
  760. docMap["request_time"] = doc.RequestTime
  761. }
  762. if doc.UpstreamTime != nil {
  763. docMap["upstream_time"] = *doc.UpstreamTime
  764. }
  765. return docMap
  766. }
  767. func (w *indexWorker) updateStatus(status string) {
  768. w.statsMutex.Lock()
  769. w.stats.Status = status
  770. w.statsMutex.Unlock()
  771. }
  772. // Background routines
  773. func (pi *ParallelIndexer) processResults() {
  774. defer pi.wg.Done()
  775. for {
  776. select {
  777. case result := <-pi.resultQueue:
  778. if result != nil {
  779. pi.metrics.RecordIndexOperation(
  780. result.Processed,
  781. result.Duration,
  782. result.Failed == 0,
  783. )
  784. }
  785. case <-pi.ctx.Done():
  786. return
  787. }
  788. }
  789. }
  790. func (pi *ParallelIndexer) optimizationRoutine() {
  791. defer pi.wg.Done()
  792. ticker := time.NewTicker(pi.config.OptimizeInterval)
  793. defer ticker.Stop()
  794. for {
  795. select {
  796. case <-ticker.C:
  797. if atomic.LoadInt32(&pi.optimizing) == 0 {
  798. go pi.Optimize() // Run in background to avoid blocking
  799. }
  800. case <-pi.ctx.Done():
  801. return
  802. }
  803. }
  804. }
  805. func (pi *ParallelIndexer) metricsRoutine() {
  806. defer pi.wg.Done()
  807. ticker := time.NewTicker(10 * time.Second)
  808. defer ticker.Stop()
  809. for {
  810. select {
  811. case <-ticker.C:
  812. pi.updateMetrics()
  813. case <-pi.ctx.Done():
  814. return
  815. }
  816. }
  817. }
  818. func (pi *ParallelIndexer) updateMetrics() {
  819. pi.statsMutex.Lock()
  820. defer pi.statsMutex.Unlock()
  821. // Update indexing rate based on recent activity
  822. metrics := pi.metrics.GetMetrics()
  823. pi.stats.IndexingRate = metrics.IndexingRate
  824. }
  825. // IndexLogGroupWithProgress indexes a log group with progress tracking
  826. func (pi *ParallelIndexer) IndexLogGroupWithProgress(basePath string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  827. if !pi.IsHealthy() {
  828. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  829. }
  830. // Create progress tracker if config is provided
  831. var progressTracker *ProgressTracker
  832. if progressConfig != nil {
  833. progressTracker = NewProgressTracker(basePath, progressConfig)
  834. }
  835. // Find all files belonging to this log group by globbing
  836. globPath := basePath + "*"
  837. matches, err := filepath.Glob(globPath)
  838. if err != nil {
  839. if progressTracker != nil {
  840. progressTracker.Cancel(fmt.Sprintf("glob failed: %v", err))
  841. }
  842. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  843. }
  844. // filepath.Glob might not match the base file itself if it has no extension,
  845. // so we check for it explicitly and add it to the list.
  846. info, err := os.Stat(basePath)
  847. if err == nil && info.Mode().IsRegular() {
  848. matches = append(matches, basePath)
  849. }
  850. // Deduplicate file list
  851. seen := make(map[string]struct{})
  852. uniqueFiles := make([]string, 0)
  853. for _, match := range matches {
  854. if _, ok := seen[match]; !ok {
  855. // Further check if it's a file, not a directory. Glob can match dirs.
  856. info, err := os.Stat(match)
  857. if err == nil && info.Mode().IsRegular() {
  858. seen[match] = struct{}{}
  859. uniqueFiles = append(uniqueFiles, match)
  860. }
  861. }
  862. }
  863. if len(uniqueFiles) == 0 {
  864. logger.Warnf("No actual log file found for group: %s", basePath)
  865. if progressTracker != nil {
  866. progressTracker.Cancel("no files found")
  867. }
  868. return nil, nil, nil, nil
  869. }
  870. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  871. // Set up progress tracking for all files
  872. if progressTracker != nil {
  873. for _, filePath := range uniqueFiles {
  874. isCompressed := IsCompressedFile(filePath)
  875. progressTracker.AddFile(filePath, isCompressed)
  876. // Get file size and estimate lines
  877. if stat, err := os.Stat(filePath); err == nil {
  878. progressTracker.SetFileSize(filePath, stat.Size())
  879. // Estimate lines for progress calculation
  880. if estimatedLines, err := EstimateFileLines(context.Background(), filePath, stat.Size(), isCompressed); err == nil {
  881. progressTracker.SetFileEstimate(filePath, estimatedLines)
  882. }
  883. }
  884. }
  885. }
  886. docsCountMap := make(map[string]uint64)
  887. var overallMinTime, overallMaxTime *time.Time
  888. // Process each file with progress tracking
  889. for _, filePath := range uniqueFiles {
  890. if progressTracker != nil {
  891. progressTracker.StartFile(filePath)
  892. }
  893. docsIndexed, minTime, maxTime, err := pi.indexSingleFileWithProgress(filePath, progressTracker)
  894. if err != nil {
  895. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  896. if progressTracker != nil {
  897. progressTracker.FailFile(filePath, err.Error())
  898. }
  899. continue // Continue with the next file
  900. }
  901. docsCountMap[filePath] = docsIndexed
  902. if progressTracker != nil {
  903. progressTracker.CompleteFile(filePath, int64(docsIndexed))
  904. }
  905. if minTime != nil {
  906. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  907. overallMinTime = minTime
  908. }
  909. }
  910. if maxTime != nil {
  911. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  912. overallMaxTime = maxTime
  913. }
  914. }
  915. }
  916. return docsCountMap, overallMinTime, overallMaxTime, nil
  917. }
  918. // indexSingleFileWithProgress indexes a single file with progress updates
  919. func (pi *ParallelIndexer) indexSingleFileWithProgress(filePath string, progressTracker *ProgressTracker) (uint64, *time.Time, *time.Time, error) {
  920. // If no progress tracker, just call the original method
  921. if progressTracker == nil {
  922. return pi.indexSingleFile(filePath)
  923. }
  924. // Call the original indexing method to do the actual indexing work
  925. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  926. if err != nil {
  927. return 0, nil, nil, err
  928. }
  929. // Just do one final progress update when done - no artificial delays
  930. if progressTracker != nil && docsIndexed > 0 {
  931. if strings.HasSuffix(filePath, ".gz") {
  932. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed))
  933. } else {
  934. // Estimate position based on average line size
  935. estimatedPos := int64(docsIndexed * 150) // Assume ~150 bytes per line
  936. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed), estimatedPos)
  937. }
  938. }
  939. // Return the actual timestamps from the original method
  940. return docsIndexed, minTime, maxTime, nil
  941. }