parallel_indexer.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020
  1. package indexer
  2. import (
  3. "bufio"
  4. "compress/gzip"
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "sync/atomic"
  14. "time"
  15. "github.com/blevesearch/bleve/v2"
  16. "github.com/uozi-tech/cosy/logger"
  17. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  18. )
  19. // ParallelIndexer provides high-performance parallel indexing with sharding
  20. type ParallelIndexer struct {
  21. config *Config
  22. shardManager ShardManager
  23. metrics MetricsCollector
  24. // Worker management
  25. workers []*indexWorker
  26. jobQueue chan *IndexJob
  27. resultQueue chan *IndexResult
  28. // State management
  29. ctx context.Context
  30. cancel context.CancelFunc
  31. wg sync.WaitGroup
  32. running int32
  33. // Statistics
  34. stats *IndexStats
  35. statsMutex sync.RWMutex
  36. // Optimization
  37. lastOptimized int64
  38. optimizing int32
  39. }
  40. // indexWorker represents a single indexing worker
  41. type indexWorker struct {
  42. id int
  43. indexer *ParallelIndexer
  44. stats *WorkerStats
  45. statsMutex sync.RWMutex
  46. }
  47. // NewParallelIndexer creates a new parallel indexer
  48. func NewParallelIndexer(config *Config, shardManager ShardManager) *ParallelIndexer {
  49. if config == nil {
  50. config = DefaultIndexerConfig()
  51. }
  52. ctx, cancel := context.WithCancel(context.Background())
  53. indexer := &ParallelIndexer{
  54. config: config,
  55. shardManager: shardManager,
  56. metrics: NewDefaultMetricsCollector(),
  57. jobQueue: make(chan *IndexJob, config.MaxQueueSize),
  58. resultQueue: make(chan *IndexResult, config.WorkerCount),
  59. ctx: ctx,
  60. cancel: cancel,
  61. stats: &IndexStats{
  62. WorkerStats: make([]*WorkerStats, config.WorkerCount),
  63. },
  64. }
  65. // Initialize workers
  66. indexer.workers = make([]*indexWorker, config.WorkerCount)
  67. for i := 0; i < config.WorkerCount; i++ {
  68. indexer.workers[i] = &indexWorker{
  69. id: i,
  70. indexer: indexer,
  71. stats: &WorkerStats{
  72. ID: i,
  73. Status: WorkerStatusIdle,
  74. },
  75. }
  76. indexer.stats.WorkerStats[i] = indexer.workers[i].stats
  77. }
  78. return indexer
  79. }
  80. // Start begins the indexer operation
  81. func (pi *ParallelIndexer) Start(ctx context.Context) error {
  82. if !atomic.CompareAndSwapInt32(&pi.running, 0, 1) {
  83. return fmt.Errorf("indexer not started")
  84. }
  85. // Initialize shard manager
  86. if err := pi.shardManager.Initialize(); err != nil {
  87. atomic.StoreInt32(&pi.running, 0)
  88. return fmt.Errorf("failed to initialize shard manager: %w", err)
  89. }
  90. // Start workers
  91. for _, worker := range pi.workers {
  92. pi.wg.Add(1)
  93. go worker.run()
  94. }
  95. // Start result processor
  96. pi.wg.Add(1)
  97. go pi.processResults()
  98. // Start optimization routine if enabled
  99. if pi.config.OptimizeInterval > 0 {
  100. pi.wg.Add(1)
  101. go pi.optimizationRoutine()
  102. }
  103. // Start metrics collection if enabled
  104. if pi.config.EnableMetrics {
  105. pi.wg.Add(1)
  106. go pi.metricsRoutine()
  107. }
  108. return nil
  109. }
  110. // Stop gracefully stops the indexer
  111. func (pi *ParallelIndexer) Stop() error {
  112. if !atomic.CompareAndSwapInt32(&pi.running, 1, 0) {
  113. return fmt.Errorf("indexer stopped")
  114. }
  115. // Cancel context to stop all routines
  116. pi.cancel()
  117. // Close job queue to stop accepting new jobs
  118. close(pi.jobQueue)
  119. // Wait for all workers to finish
  120. pi.wg.Wait()
  121. // Close result queue
  122. close(pi.resultQueue)
  123. // Flush all remaining data
  124. if err := pi.FlushAll(); err != nil {
  125. return fmt.Errorf("failed to flush during stop: %w", err)
  126. }
  127. return nil
  128. }
  129. // IndexDocument indexes a single document
  130. func (pi *ParallelIndexer) IndexDocument(ctx context.Context, doc *Document) error {
  131. return pi.IndexDocuments(ctx, []*Document{doc})
  132. }
  133. // IndexDocuments indexes multiple documents
  134. func (pi *ParallelIndexer) IndexDocuments(ctx context.Context, docs []*Document) error {
  135. if !pi.IsHealthy() {
  136. return fmt.Errorf("indexer not started")
  137. }
  138. if len(docs) == 0 {
  139. return nil
  140. }
  141. // Create job
  142. job := &IndexJob{
  143. Documents: docs,
  144. Priority: PriorityNormal,
  145. }
  146. // Submit job and wait for completion
  147. done := make(chan error, 1)
  148. job.Callback = func(err error) {
  149. done <- err
  150. }
  151. select {
  152. case pi.jobQueue <- job:
  153. select {
  154. case err := <-done:
  155. return err
  156. case <-ctx.Done():
  157. return ctx.Err()
  158. }
  159. case <-ctx.Done():
  160. return ctx.Err()
  161. case <-pi.ctx.Done():
  162. return fmt.Errorf("indexer stopped")
  163. }
  164. }
  165. // IndexDocumentAsync indexes a document asynchronously
  166. func (pi *ParallelIndexer) IndexDocumentAsync(doc *Document, callback func(error)) {
  167. pi.IndexDocumentsAsync([]*Document{doc}, callback)
  168. }
  169. // IndexDocumentsAsync indexes multiple documents asynchronously
  170. func (pi *ParallelIndexer) IndexDocumentsAsync(docs []*Document, callback func(error)) {
  171. if !pi.IsHealthy() {
  172. if callback != nil {
  173. callback(fmt.Errorf("indexer not started"))
  174. }
  175. return
  176. }
  177. if len(docs) == 0 {
  178. if callback != nil {
  179. callback(nil)
  180. }
  181. return
  182. }
  183. job := &IndexJob{
  184. Documents: docs,
  185. Priority: PriorityNormal,
  186. Callback: callback,
  187. }
  188. select {
  189. case pi.jobQueue <- job:
  190. // Job queued successfully
  191. case <-pi.ctx.Done():
  192. if callback != nil {
  193. callback(fmt.Errorf("indexer stopped"))
  194. }
  195. default:
  196. // Queue is full
  197. if callback != nil {
  198. callback(fmt.Errorf("queue is full"))
  199. }
  200. }
  201. }
  202. // StartBatch returns a new batch writer
  203. func (pi *ParallelIndexer) StartBatch() BatchWriterInterface {
  204. return NewBatchWriter(pi, pi.config.BatchSize)
  205. }
  206. // FlushAll flushes all pending operations
  207. func (pi *ParallelIndexer) FlushAll() error {
  208. // Get all shards and flush them
  209. shards := pi.shardManager.GetAllShards()
  210. var errs []error
  211. for i, shard := range shards {
  212. if shard == nil {
  213. continue
  214. }
  215. // Force flush by creating and immediately deleting a temporary document
  216. batch := shard.NewBatch()
  217. // Use efficient string building instead of fmt.Sprintf
  218. tempIDBuf := make([]byte, 0, 64)
  219. tempIDBuf = append(tempIDBuf, "_flush_temp_"...)
  220. tempIDBuf = utils.AppendInt(tempIDBuf, i)
  221. tempIDBuf = append(tempIDBuf, '_')
  222. tempIDBuf = utils.AppendInt(tempIDBuf, int(time.Now().UnixNano()))
  223. tempID := utils.BytesToStringUnsafe(tempIDBuf)
  224. batch.Index(tempID, map[string]interface{}{"_temp": true})
  225. if err := shard.Batch(batch); err != nil {
  226. errs = append(errs, fmt.Errorf("failed to flush shard %d: %w", i, err))
  227. continue
  228. }
  229. // Delete the temporary document
  230. shard.Delete(tempID)
  231. }
  232. if len(errs) > 0 {
  233. return fmt.Errorf("flush errors: %v", errs)
  234. }
  235. return nil
  236. }
  237. // Optimize triggers optimization of all shards
  238. func (pi *ParallelIndexer) Optimize() error {
  239. if !atomic.CompareAndSwapInt32(&pi.optimizing, 0, 1) {
  240. return fmt.Errorf("optimization already in progress")
  241. }
  242. defer atomic.StoreInt32(&pi.optimizing, 0)
  243. startTime := time.Now()
  244. stats := pi.shardManager.GetShardStats()
  245. var errs []error
  246. for _, stat := range stats {
  247. if err := pi.shardManager.OptimizeShard(stat.ID); err != nil {
  248. errs = append(errs, fmt.Errorf("failed to optimize shard %d: %w", stat.ID, err))
  249. }
  250. }
  251. // Update optimization stats
  252. pi.statsMutex.Lock()
  253. if pi.stats.OptimizationStats == nil {
  254. pi.stats.OptimizationStats = &OptimizationStats{}
  255. }
  256. pi.stats.OptimizationStats.LastRun = time.Now().Unix()
  257. pi.stats.OptimizationStats.Duration = time.Since(startTime)
  258. pi.stats.OptimizationStats.Success = len(errs) == 0
  259. pi.stats.LastOptimized = time.Now().Unix()
  260. pi.statsMutex.Unlock()
  261. atomic.StoreInt64(&pi.lastOptimized, time.Now().Unix())
  262. if len(errs) > 0 {
  263. return fmt.Errorf("optimization errors: %v", errs)
  264. }
  265. // Record optimization metrics
  266. pi.metrics.RecordOptimization(time.Since(startTime), len(errs) == 0)
  267. return nil
  268. }
  269. // IndexLogFile reads and indexes a single log file
  270. func (pi *ParallelIndexer) IndexLogFile(filePath string) error {
  271. if !pi.IsHealthy() {
  272. return fmt.Errorf("indexer not healthy")
  273. }
  274. file, err := os.Open(filePath)
  275. if err != nil {
  276. return fmt.Errorf("failed to open log file %s: %w", filePath, err)
  277. }
  278. defer file.Close()
  279. // Use a batch writer for efficient indexing
  280. batch := pi.StartBatch()
  281. scanner := bufio.NewScanner(file)
  282. docCount := 0
  283. for scanner.Scan() {
  284. line := scanner.Text()
  285. if line == "" {
  286. continue
  287. }
  288. // In a real implementation, parse the log line into a structured format
  289. // For now, we create a simple document
  290. logDoc, err := ParseLogLine(line) // Assuming a parser function exists
  291. if err != nil {
  292. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  293. continue
  294. }
  295. logDoc.FilePath = filePath
  296. // Use efficient string building for document ID
  297. docIDBuf := make([]byte, 0, len(filePath)+16)
  298. docIDBuf = append(docIDBuf, filePath...)
  299. docIDBuf = append(docIDBuf, '-')
  300. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  301. doc := &Document{
  302. ID: utils.BytesToStringUnsafe(docIDBuf),
  303. Fields: logDoc,
  304. }
  305. if err := batch.Add(doc); err != nil {
  306. // This indicates an auto-flush occurred and failed.
  307. // Log the error and stop processing this file to avoid further issues.
  308. return fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  309. }
  310. docCount++
  311. }
  312. if err := scanner.Err(); err != nil {
  313. return fmt.Errorf("error reading log file %s: %w", filePath, err)
  314. }
  315. if _, err := batch.Flush(); err != nil {
  316. return fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  317. }
  318. return nil
  319. }
  320. // GetStats returns current indexer statistics
  321. func (pi *ParallelIndexer) GetStats() *IndexStats {
  322. pi.statsMutex.RLock()
  323. defer pi.statsMutex.RUnlock()
  324. // Update shard stats
  325. shardStats := pi.shardManager.GetShardStats()
  326. pi.stats.Shards = shardStats
  327. pi.stats.ShardCount = len(shardStats)
  328. var totalDocs uint64
  329. var totalSize int64
  330. for _, shard := range shardStats {
  331. totalDocs += shard.DocumentCount
  332. totalSize += shard.Size
  333. }
  334. pi.stats.TotalDocuments = totalDocs
  335. pi.stats.TotalSize = totalSize
  336. pi.stats.QueueSize = len(pi.jobQueue)
  337. // Calculate memory usage
  338. var memStats runtime.MemStats
  339. runtime.ReadMemStats(&memStats)
  340. pi.stats.MemoryUsage = int64(memStats.Alloc)
  341. // Copy stats to avoid race conditions
  342. statsCopy := *pi.stats
  343. return &statsCopy
  344. }
  345. // IsRunning returns whether the indexer is currently running
  346. func (pi *ParallelIndexer) IsRunning() bool {
  347. return atomic.LoadInt32(&pi.running) != 0
  348. }
  349. // GetShardInfo returns information about a specific shard
  350. func (pi *ParallelIndexer) GetShardInfo(shardID int) (*ShardInfo, error) {
  351. shardStats := pi.shardManager.GetShardStats()
  352. for _, stat := range shardStats {
  353. if stat.ID == shardID {
  354. return stat, nil
  355. }
  356. }
  357. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, shardID)
  358. }
  359. // IsHealthy checks if the indexer is running and healthy
  360. func (pi *ParallelIndexer) IsHealthy() bool {
  361. if atomic.LoadInt32(&pi.running) != 1 {
  362. return false
  363. }
  364. // Check shard manager health
  365. return pi.shardManager.HealthCheck() == nil
  366. }
  367. // GetConfig returns the current configuration
  368. func (pi *ParallelIndexer) GetConfig() *Config {
  369. return pi.config
  370. }
  371. // GetAllShards returns all managed shards
  372. func (pi *ParallelIndexer) GetAllShards() []bleve.Index {
  373. return pi.shardManager.GetAllShards()
  374. }
  375. // DestroyAllIndexes closes and deletes all index data from disk.
  376. func (pi *ParallelIndexer) DestroyAllIndexes() error {
  377. // Stop all background routines before deleting files
  378. pi.cancel()
  379. pi.wg.Wait()
  380. close(pi.jobQueue)
  381. close(pi.resultQueue)
  382. atomic.StoreInt32(&pi.running, 0) // Mark as not running
  383. var destructionErr error
  384. if manager, ok := pi.shardManager.(*DefaultShardManager); ok {
  385. destructionErr = manager.Destroy()
  386. } else {
  387. destructionErr = fmt.Errorf("shard manager does not support destruction")
  388. }
  389. // Re-initialize context and channels for a potential restart
  390. pi.ctx, pi.cancel = context.WithCancel(context.Background())
  391. pi.jobQueue = make(chan *IndexJob, pi.config.MaxQueueSize)
  392. pi.resultQueue = make(chan *IndexResult, pi.config.WorkerCount)
  393. return destructionErr
  394. }
  395. // IndexLogGroup finds all files related to a base log path (e.g., rotated logs) and indexes them.
  396. // It returns a map of [filePath -> docCount], and the min/max timestamps found.
  397. func (pi *ParallelIndexer) IndexLogGroup(basePath string) (map[string]uint64, *time.Time, *time.Time, error) {
  398. if !pi.IsHealthy() {
  399. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  400. }
  401. // Find all files belonging to this log group by globbing
  402. globPath := basePath + "*"
  403. matches, err := filepath.Glob(globPath)
  404. if err != nil {
  405. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  406. }
  407. // filepath.Glob might not match the base file itself if it has no extension,
  408. // so we check for it explicitly and add it to the list.
  409. info, err := os.Stat(basePath)
  410. if err == nil && info.Mode().IsRegular() {
  411. matches = append(matches, basePath)
  412. }
  413. // Deduplicate file list
  414. seen := make(map[string]struct{})
  415. uniqueFiles := make([]string, 0)
  416. for _, match := range matches {
  417. if _, ok := seen[match]; !ok {
  418. // Further check if it's a file, not a directory. Glob can match dirs.
  419. info, err := os.Stat(match)
  420. if err == nil && info.Mode().IsRegular() {
  421. seen[match] = struct{}{}
  422. uniqueFiles = append(uniqueFiles, match)
  423. }
  424. }
  425. }
  426. if len(uniqueFiles) == 0 {
  427. logger.Warnf("No actual log file found for group: %s", basePath)
  428. return nil, nil, nil, nil
  429. }
  430. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  431. docsCountMap := make(map[string]uint64)
  432. var overallMinTime, overallMaxTime *time.Time
  433. for _, filePath := range uniqueFiles {
  434. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  435. if err != nil {
  436. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  437. continue // Continue with the next file
  438. }
  439. docsCountMap[filePath] = docsIndexed
  440. if minTime != nil {
  441. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  442. overallMinTime = minTime
  443. }
  444. }
  445. if maxTime != nil {
  446. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  447. overallMaxTime = maxTime
  448. }
  449. }
  450. }
  451. return docsCountMap, overallMinTime, overallMaxTime, nil
  452. }
  453. // indexSingleFile contains the logic to process one physical log file.
  454. // It returns the number of documents indexed from the file, and the min/max timestamps.
  455. func (pi *ParallelIndexer) indexSingleFile(filePath string) (uint64, *time.Time, *time.Time, error) {
  456. file, err := os.Open(filePath)
  457. if err != nil {
  458. return 0, nil, nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
  459. }
  460. defer file.Close()
  461. var reader io.Reader = file
  462. // Handle gzipped files
  463. if strings.HasSuffix(filePath, ".gz") {
  464. gz, err := gzip.NewReader(file)
  465. if err != nil {
  466. return 0, nil, nil, fmt.Errorf("failed to create gzip reader for %s: %w", filePath, err)
  467. }
  468. defer gz.Close()
  469. reader = gz
  470. }
  471. logger.Infof("Starting to process file: %s", filePath)
  472. batch := pi.StartBatch()
  473. scanner := bufio.NewScanner(reader)
  474. docCount := 0
  475. var minTime, maxTime *time.Time
  476. for scanner.Scan() {
  477. line := scanner.Text()
  478. if line == "" {
  479. continue
  480. }
  481. logDoc, err := ParseLogLine(line)
  482. if err != nil {
  483. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  484. continue
  485. }
  486. logDoc.FilePath = filePath
  487. // Track min/max timestamps
  488. ts := time.Unix(logDoc.Timestamp, 0)
  489. if minTime == nil || ts.Before(*minTime) {
  490. minTime = &ts
  491. }
  492. if maxTime == nil || ts.After(*maxTime) {
  493. maxTime = &ts
  494. }
  495. // Use efficient string building for document ID
  496. docIDBuf := make([]byte, 0, len(filePath)+16)
  497. docIDBuf = append(docIDBuf, filePath...)
  498. docIDBuf = append(docIDBuf, '-')
  499. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  500. doc := &Document{
  501. ID: utils.BytesToStringUnsafe(docIDBuf),
  502. Fields: logDoc,
  503. }
  504. if err := batch.Add(doc); err != nil {
  505. // This indicates an auto-flush occurred and failed.
  506. // Log the error and stop processing this file to avoid further issues.
  507. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  508. }
  509. docCount++
  510. }
  511. if err := scanner.Err(); err != nil {
  512. return uint64(docCount), minTime, maxTime, fmt.Errorf("error reading log file %s: %w", filePath, err)
  513. }
  514. logger.Infof("Finished processing file: %s. Total lines processed: %d", filePath, docCount)
  515. if docCount > 0 {
  516. if _, err := batch.Flush(); err != nil {
  517. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  518. }
  519. }
  520. return uint64(docCount), minTime, maxTime, nil
  521. }
  522. // UpdateConfig updates the indexer configuration
  523. func (pi *ParallelIndexer) UpdateConfig(config *Config) error {
  524. // Only allow updating certain configuration parameters while running
  525. pi.config.BatchSize = config.BatchSize
  526. pi.config.FlushInterval = config.FlushInterval
  527. pi.config.EnableMetrics = config.EnableMetrics
  528. return nil
  529. }
  530. // Worker implementation
  531. func (w *indexWorker) run() {
  532. defer w.indexer.wg.Done()
  533. w.updateStatus(WorkerStatusIdle)
  534. for {
  535. select {
  536. case job, ok := <-w.indexer.jobQueue:
  537. if !ok {
  538. return // Channel closed, worker should exit
  539. }
  540. w.updateStatus(WorkerStatusBusy)
  541. result := w.processJob(job)
  542. // Send result
  543. select {
  544. case w.indexer.resultQueue <- result:
  545. case <-w.indexer.ctx.Done():
  546. return
  547. }
  548. // Execute callback if provided
  549. if job.Callback != nil {
  550. var err error
  551. if result.Failed > 0 {
  552. err = fmt.Errorf("indexing failed for %d documents", result.Failed)
  553. }
  554. job.Callback(err)
  555. }
  556. w.updateStatus(WorkerStatusIdle)
  557. case <-w.indexer.ctx.Done():
  558. return
  559. }
  560. }
  561. }
  562. func (w *indexWorker) processJob(job *IndexJob) *IndexResult {
  563. startTime := time.Now()
  564. result := &IndexResult{
  565. Processed: len(job.Documents),
  566. }
  567. // Group documents by shard
  568. shardDocs := make(map[int][]*Document)
  569. for _, doc := range job.Documents {
  570. if doc.ID == "" {
  571. result.Failed++
  572. continue
  573. }
  574. _, shardID, err := w.indexer.shardManager.GetShard(doc.ID)
  575. if err != nil {
  576. result.Failed++
  577. continue
  578. }
  579. shardDocs[shardID] = append(shardDocs[shardID], doc)
  580. }
  581. // Index documents per shard
  582. for shardID, docs := range shardDocs {
  583. if err := w.indexShardDocuments(shardID, docs); err != nil {
  584. result.Failed += len(docs)
  585. } else {
  586. result.Succeeded += len(docs)
  587. }
  588. }
  589. result.Duration = time.Since(startTime)
  590. if result.Processed > 0 {
  591. result.ErrorRate = float64(result.Failed) / float64(result.Processed)
  592. result.Throughput = float64(result.Processed) / result.Duration.Seconds()
  593. }
  594. // Update worker stats
  595. w.statsMutex.Lock()
  596. w.stats.ProcessedJobs++
  597. w.stats.ProcessedDocs += int64(result.Processed)
  598. w.stats.ErrorCount += int64(result.Failed)
  599. w.stats.LastActive = time.Now().Unix()
  600. // Update average latency (simple moving average)
  601. if w.stats.AverageLatency == 0 {
  602. w.stats.AverageLatency = result.Duration
  603. } else {
  604. w.stats.AverageLatency = (w.stats.AverageLatency + result.Duration) / 2
  605. }
  606. w.statsMutex.Unlock()
  607. return result
  608. }
  609. func (w *indexWorker) indexShardDocuments(shardID int, docs []*Document) error {
  610. shard, err := w.indexer.shardManager.GetShardByID(shardID)
  611. if err != nil {
  612. return err
  613. }
  614. batch := shard.NewBatch()
  615. for _, doc := range docs {
  616. // Convert LogDocument to map for Bleve indexing
  617. docMap := w.logDocumentToMap(doc.Fields)
  618. batch.Index(doc.ID, docMap)
  619. }
  620. if err := shard.Batch(batch); err != nil {
  621. return fmt.Errorf("failed to index batch for shard %d: %w", shardID, err)
  622. }
  623. return nil
  624. }
  625. // logDocumentToMap converts LogDocument to map[string]interface{} for Bleve
  626. func (w *indexWorker) logDocumentToMap(doc *LogDocument) map[string]interface{} {
  627. docMap := map[string]interface{}{
  628. "timestamp": doc.Timestamp,
  629. "ip": doc.IP,
  630. "method": doc.Method,
  631. "path": doc.Path,
  632. "path_exact": doc.PathExact,
  633. "status": doc.Status,
  634. "bytes_sent": doc.BytesSent,
  635. "file_path": doc.FilePath,
  636. "raw": doc.Raw,
  637. }
  638. // Add optional fields only if they have values
  639. if doc.RegionCode != "" {
  640. docMap["region_code"] = doc.RegionCode
  641. }
  642. if doc.Province != "" {
  643. docMap["province"] = doc.Province
  644. }
  645. if doc.City != "" {
  646. docMap["city"] = doc.City
  647. }
  648. if doc.Protocol != "" {
  649. docMap["protocol"] = doc.Protocol
  650. }
  651. if doc.Referer != "" {
  652. docMap["referer"] = doc.Referer
  653. }
  654. if doc.UserAgent != "" {
  655. docMap["user_agent"] = doc.UserAgent
  656. }
  657. if doc.Browser != "" {
  658. docMap["browser"] = doc.Browser
  659. }
  660. if doc.BrowserVer != "" {
  661. docMap["browser_version"] = doc.BrowserVer
  662. }
  663. if doc.OS != "" {
  664. docMap["os"] = doc.OS
  665. }
  666. if doc.OSVersion != "" {
  667. docMap["os_version"] = doc.OSVersion
  668. }
  669. if doc.DeviceType != "" {
  670. docMap["device_type"] = doc.DeviceType
  671. }
  672. if doc.RequestTime > 0 {
  673. docMap["request_time"] = doc.RequestTime
  674. }
  675. if doc.UpstreamTime != nil {
  676. docMap["upstream_time"] = *doc.UpstreamTime
  677. }
  678. return docMap
  679. }
  680. func (w *indexWorker) updateStatus(status string) {
  681. w.statsMutex.Lock()
  682. w.stats.Status = status
  683. w.statsMutex.Unlock()
  684. }
  685. // Background routines
  686. func (pi *ParallelIndexer) processResults() {
  687. defer pi.wg.Done()
  688. for {
  689. select {
  690. case result := <-pi.resultQueue:
  691. if result != nil {
  692. pi.metrics.RecordIndexOperation(
  693. result.Processed,
  694. result.Duration,
  695. result.Failed == 0,
  696. )
  697. }
  698. case <-pi.ctx.Done():
  699. return
  700. }
  701. }
  702. }
  703. func (pi *ParallelIndexer) optimizationRoutine() {
  704. defer pi.wg.Done()
  705. ticker := time.NewTicker(pi.config.OptimizeInterval)
  706. defer ticker.Stop()
  707. for {
  708. select {
  709. case <-ticker.C:
  710. if atomic.LoadInt32(&pi.optimizing) == 0 {
  711. go pi.Optimize() // Run in background to avoid blocking
  712. }
  713. case <-pi.ctx.Done():
  714. return
  715. }
  716. }
  717. }
  718. func (pi *ParallelIndexer) metricsRoutine() {
  719. defer pi.wg.Done()
  720. ticker := time.NewTicker(10 * time.Second)
  721. defer ticker.Stop()
  722. for {
  723. select {
  724. case <-ticker.C:
  725. pi.updateMetrics()
  726. case <-pi.ctx.Done():
  727. return
  728. }
  729. }
  730. }
  731. func (pi *ParallelIndexer) updateMetrics() {
  732. pi.statsMutex.Lock()
  733. defer pi.statsMutex.Unlock()
  734. // Update indexing rate based on recent activity
  735. metrics := pi.metrics.GetMetrics()
  736. pi.stats.IndexingRate = metrics.IndexingRate
  737. }
  738. // IndexLogGroupWithProgress indexes a log group with progress tracking
  739. func (pi *ParallelIndexer) IndexLogGroupWithProgress(basePath string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  740. if !pi.IsHealthy() {
  741. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  742. }
  743. // Create progress tracker if config is provided
  744. var progressTracker *ProgressTracker
  745. if progressConfig != nil {
  746. progressTracker = NewProgressTracker(basePath, progressConfig)
  747. }
  748. // Find all files belonging to this log group by globbing
  749. globPath := basePath + "*"
  750. matches, err := filepath.Glob(globPath)
  751. if err != nil {
  752. if progressTracker != nil {
  753. progressTracker.Cancel(fmt.Sprintf("glob failed: %v", err))
  754. }
  755. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  756. }
  757. // filepath.Glob might not match the base file itself if it has no extension,
  758. // so we check for it explicitly and add it to the list.
  759. info, err := os.Stat(basePath)
  760. if err == nil && info.Mode().IsRegular() {
  761. matches = append(matches, basePath)
  762. }
  763. // Deduplicate file list
  764. seen := make(map[string]struct{})
  765. uniqueFiles := make([]string, 0)
  766. for _, match := range matches {
  767. if _, ok := seen[match]; !ok {
  768. // Further check if it's a file, not a directory. Glob can match dirs.
  769. info, err := os.Stat(match)
  770. if err == nil && info.Mode().IsRegular() {
  771. seen[match] = struct{}{}
  772. uniqueFiles = append(uniqueFiles, match)
  773. }
  774. }
  775. }
  776. if len(uniqueFiles) == 0 {
  777. logger.Warnf("No actual log file found for group: %s", basePath)
  778. if progressTracker != nil {
  779. progressTracker.Cancel("no files found")
  780. }
  781. return nil, nil, nil, nil
  782. }
  783. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  784. // Set up progress tracking for all files
  785. if progressTracker != nil {
  786. for _, filePath := range uniqueFiles {
  787. isCompressed := IsCompressedFile(filePath)
  788. progressTracker.AddFile(filePath, isCompressed)
  789. // Get file size and estimate lines
  790. if stat, err := os.Stat(filePath); err == nil {
  791. progressTracker.SetFileSize(filePath, stat.Size())
  792. // Estimate lines for progress calculation
  793. if estimatedLines, err := EstimateFileLines(context.Background(), filePath, stat.Size(), isCompressed); err == nil {
  794. progressTracker.SetFileEstimate(filePath, estimatedLines)
  795. }
  796. }
  797. }
  798. }
  799. docsCountMap := make(map[string]uint64)
  800. var overallMinTime, overallMaxTime *time.Time
  801. // Process each file with progress tracking
  802. for _, filePath := range uniqueFiles {
  803. if progressTracker != nil {
  804. progressTracker.StartFile(filePath)
  805. }
  806. docsIndexed, minTime, maxTime, err := pi.indexSingleFileWithProgress(filePath, progressTracker)
  807. if err != nil {
  808. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  809. if progressTracker != nil {
  810. progressTracker.FailFile(filePath, err.Error())
  811. }
  812. continue // Continue with the next file
  813. }
  814. docsCountMap[filePath] = docsIndexed
  815. if progressTracker != nil {
  816. progressTracker.CompleteFile(filePath, int64(docsIndexed))
  817. }
  818. if minTime != nil {
  819. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  820. overallMinTime = minTime
  821. }
  822. }
  823. if maxTime != nil {
  824. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  825. overallMaxTime = maxTime
  826. }
  827. }
  828. }
  829. return docsCountMap, overallMinTime, overallMaxTime, nil
  830. }
  831. // indexSingleFileWithProgress indexes a single file with progress updates
  832. func (pi *ParallelIndexer) indexSingleFileWithProgress(filePath string, progressTracker *ProgressTracker) (uint64, *time.Time, *time.Time, error) {
  833. // If no progress tracker, just call the original method
  834. if progressTracker == nil {
  835. return pi.indexSingleFile(filePath)
  836. }
  837. // Call the original indexing method to do the actual indexing work
  838. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  839. if err != nil {
  840. return 0, nil, nil, err
  841. }
  842. // Just do one final progress update when done - no artificial delays
  843. if progressTracker != nil && docsIndexed > 0 {
  844. if strings.HasSuffix(filePath, ".gz") {
  845. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed))
  846. } else {
  847. // Estimate position based on average line size
  848. estimatedPos := int64(docsIndexed * 150) // Assume ~150 bytes per line
  849. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed), estimatedPos)
  850. }
  851. }
  852. // Return the actual timestamps from the original method
  853. return docsIndexed, minTime, maxTime, nil
  854. }