parallel_indexer.go 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. package indexer
  2. import (
  3. "bufio"
  4. "compress/gzip"
  5. "context"
  6. "fmt"
  7. "io"
  8. "os"
  9. "path/filepath"
  10. "runtime"
  11. "strings"
  12. "sync"
  13. "sync/atomic"
  14. "time"
  15. "github.com/blevesearch/bleve/v2"
  16. "github.com/uozi-tech/cosy/logger"
  17. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  18. )
  19. // ParallelIndexer provides high-performance parallel indexing with sharding
  20. type ParallelIndexer struct {
  21. config *Config
  22. shardManager ShardManager
  23. metrics MetricsCollector
  24. // Worker management
  25. workers []*indexWorker
  26. jobQueue chan *IndexJob
  27. resultQueue chan *IndexResult
  28. // State management
  29. ctx context.Context
  30. cancel context.CancelFunc
  31. wg sync.WaitGroup
  32. running int32
  33. // Statistics
  34. stats *IndexStats
  35. statsMutex sync.RWMutex
  36. // Optimization
  37. lastOptimized int64
  38. optimizing int32
  39. }
  40. // indexWorker represents a single indexing worker
  41. type indexWorker struct {
  42. id int
  43. indexer *ParallelIndexer
  44. stats *WorkerStats
  45. statsMutex sync.RWMutex
  46. }
  47. // NewParallelIndexer creates a new parallel indexer
  48. func NewParallelIndexer(config *Config, shardManager ShardManager) *ParallelIndexer {
  49. if config == nil {
  50. config = DefaultIndexerConfig()
  51. }
  52. ctx, cancel := context.WithCancel(context.Background())
  53. indexer := &ParallelIndexer{
  54. config: config,
  55. shardManager: shardManager,
  56. metrics: NewDefaultMetricsCollector(),
  57. jobQueue: make(chan *IndexJob, config.MaxQueueSize),
  58. resultQueue: make(chan *IndexResult, config.WorkerCount),
  59. ctx: ctx,
  60. cancel: cancel,
  61. stats: &IndexStats{
  62. WorkerStats: make([]*WorkerStats, config.WorkerCount),
  63. },
  64. }
  65. // Initialize workers
  66. indexer.workers = make([]*indexWorker, config.WorkerCount)
  67. for i := 0; i < config.WorkerCount; i++ {
  68. indexer.workers[i] = &indexWorker{
  69. id: i,
  70. indexer: indexer,
  71. stats: &WorkerStats{
  72. ID: i,
  73. Status: WorkerStatusIdle,
  74. },
  75. }
  76. indexer.stats.WorkerStats[i] = indexer.workers[i].stats
  77. }
  78. return indexer
  79. }
  80. // Start begins the indexer operation
  81. func (pi *ParallelIndexer) Start(ctx context.Context) error {
  82. if !atomic.CompareAndSwapInt32(&pi.running, 0, 1) {
  83. return fmt.Errorf("indexer not started")
  84. }
  85. // Initialize shard manager
  86. if err := pi.shardManager.Initialize(); err != nil {
  87. atomic.StoreInt32(&pi.running, 0)
  88. return fmt.Errorf("failed to initialize shard manager: %w", err)
  89. }
  90. // Start workers
  91. for _, worker := range pi.workers {
  92. pi.wg.Add(1)
  93. go worker.run()
  94. }
  95. // Start result processor
  96. pi.wg.Add(1)
  97. go pi.processResults()
  98. // Start optimization routine if enabled
  99. if pi.config.OptimizeInterval > 0 {
  100. pi.wg.Add(1)
  101. go pi.optimizationRoutine()
  102. }
  103. // Start metrics collection if enabled
  104. if pi.config.EnableMetrics {
  105. pi.wg.Add(1)
  106. go pi.metricsRoutine()
  107. }
  108. return nil
  109. }
  110. // Stop gracefully stops the indexer
  111. func (pi *ParallelIndexer) Stop() error {
  112. if !atomic.CompareAndSwapInt32(&pi.running, 1, 0) {
  113. logger.Warnf("[ParallelIndexer] Stop called but indexer already stopped")
  114. return fmt.Errorf("indexer stopped")
  115. }
  116. // Cancel context to stop all routines
  117. pi.cancel()
  118. // Close job queue to stop accepting new jobs
  119. close(pi.jobQueue)
  120. // Wait for all workers to finish
  121. pi.wg.Wait()
  122. // Close result queue
  123. close(pi.resultQueue)
  124. // Flush all remaining data
  125. if err := pi.FlushAll(); err != nil {
  126. logger.Errorf("[ParallelIndexer] Failed to flush during stop: %v", err)
  127. // Don't return error here, continue with cleanup
  128. }
  129. // Close the shard manager - this will close all shards
  130. // But we don't do this here because the shards might be in use by the searcher
  131. // The shards will be closed when the searcher is stopped
  132. return nil
  133. }
  134. // IndexDocument indexes a single document
  135. func (pi *ParallelIndexer) IndexDocument(ctx context.Context, doc *Document) error {
  136. return pi.IndexDocuments(ctx, []*Document{doc})
  137. }
  138. // IndexDocuments indexes multiple documents
  139. func (pi *ParallelIndexer) IndexDocuments(ctx context.Context, docs []*Document) error {
  140. if !pi.IsHealthy() {
  141. return fmt.Errorf("indexer not started")
  142. }
  143. if len(docs) == 0 {
  144. return nil
  145. }
  146. // Create job
  147. job := &IndexJob{
  148. Documents: docs,
  149. Priority: PriorityNormal,
  150. }
  151. // Submit job and wait for completion
  152. done := make(chan error, 1)
  153. job.Callback = func(err error) {
  154. done <- err
  155. }
  156. select {
  157. case pi.jobQueue <- job:
  158. select {
  159. case err := <-done:
  160. return err
  161. case <-ctx.Done():
  162. return ctx.Err()
  163. }
  164. case <-ctx.Done():
  165. return ctx.Err()
  166. case <-pi.ctx.Done():
  167. return fmt.Errorf("indexer stopped")
  168. }
  169. }
  170. // IndexDocumentAsync indexes a document asynchronously
  171. func (pi *ParallelIndexer) IndexDocumentAsync(doc *Document, callback func(error)) {
  172. pi.IndexDocumentsAsync([]*Document{doc}, callback)
  173. }
  174. // IndexDocumentsAsync indexes multiple documents asynchronously
  175. func (pi *ParallelIndexer) IndexDocumentsAsync(docs []*Document, callback func(error)) {
  176. if !pi.IsHealthy() {
  177. if callback != nil {
  178. callback(fmt.Errorf("indexer not started"))
  179. }
  180. return
  181. }
  182. if len(docs) == 0 {
  183. if callback != nil {
  184. callback(nil)
  185. }
  186. return
  187. }
  188. job := &IndexJob{
  189. Documents: docs,
  190. Priority: PriorityNormal,
  191. Callback: callback,
  192. }
  193. select {
  194. case pi.jobQueue <- job:
  195. // Job queued successfully
  196. case <-pi.ctx.Done():
  197. if callback != nil {
  198. callback(fmt.Errorf("indexer stopped"))
  199. }
  200. default:
  201. // Queue is full
  202. if callback != nil {
  203. callback(fmt.Errorf("queue is full"))
  204. }
  205. }
  206. }
  207. // StartBatch returns a new batch writer
  208. func (pi *ParallelIndexer) StartBatch() BatchWriterInterface {
  209. return NewBatchWriter(pi, pi.config.BatchSize)
  210. }
  211. // FlushAll flushes all pending operations
  212. func (pi *ParallelIndexer) FlushAll() error {
  213. // Get all shards and flush them
  214. shards := pi.shardManager.GetAllShards()
  215. var errs []error
  216. for i, shard := range shards {
  217. if shard == nil {
  218. continue
  219. }
  220. // Force flush by creating and immediately deleting a temporary document
  221. batch := shard.NewBatch()
  222. // Use efficient string building instead of fmt.Sprintf
  223. tempIDBuf := make([]byte, 0, 64)
  224. tempIDBuf = append(tempIDBuf, "_flush_temp_"...)
  225. tempIDBuf = utils.AppendInt(tempIDBuf, i)
  226. tempIDBuf = append(tempIDBuf, '_')
  227. tempIDBuf = utils.AppendInt(tempIDBuf, int(time.Now().UnixNano()))
  228. tempID := utils.BytesToStringUnsafe(tempIDBuf)
  229. batch.Index(tempID, map[string]interface{}{"_temp": true})
  230. if err := shard.Batch(batch); err != nil {
  231. errs = append(errs, fmt.Errorf("failed to flush shard %d: %w", i, err))
  232. continue
  233. }
  234. // Delete the temporary document
  235. shard.Delete(tempID)
  236. }
  237. if len(errs) > 0 {
  238. return fmt.Errorf("flush errors: %v", errs)
  239. }
  240. return nil
  241. }
  242. // Optimize triggers optimization of all shards
  243. func (pi *ParallelIndexer) Optimize() error {
  244. if !atomic.CompareAndSwapInt32(&pi.optimizing, 0, 1) {
  245. return fmt.Errorf("optimization already in progress")
  246. }
  247. defer atomic.StoreInt32(&pi.optimizing, 0)
  248. startTime := time.Now()
  249. stats := pi.shardManager.GetShardStats()
  250. var errs []error
  251. for _, stat := range stats {
  252. if err := pi.shardManager.OptimizeShard(stat.ID); err != nil {
  253. errs = append(errs, fmt.Errorf("failed to optimize shard %d: %w", stat.ID, err))
  254. }
  255. }
  256. // Update optimization stats
  257. pi.statsMutex.Lock()
  258. if pi.stats.OptimizationStats == nil {
  259. pi.stats.OptimizationStats = &OptimizationStats{}
  260. }
  261. pi.stats.OptimizationStats.LastRun = time.Now().Unix()
  262. pi.stats.OptimizationStats.Duration = time.Since(startTime)
  263. pi.stats.OptimizationStats.Success = len(errs) == 0
  264. pi.stats.LastOptimized = time.Now().Unix()
  265. pi.statsMutex.Unlock()
  266. atomic.StoreInt64(&pi.lastOptimized, time.Now().Unix())
  267. if len(errs) > 0 {
  268. return fmt.Errorf("optimization errors: %v", errs)
  269. }
  270. // Record optimization metrics
  271. pi.metrics.RecordOptimization(time.Since(startTime), len(errs) == 0)
  272. return nil
  273. }
  274. // IndexLogFile reads and indexes a single log file
  275. func (pi *ParallelIndexer) IndexLogFile(filePath string) error {
  276. if !pi.IsHealthy() {
  277. return fmt.Errorf("indexer not healthy")
  278. }
  279. file, err := os.Open(filePath)
  280. if err != nil {
  281. return fmt.Errorf("failed to open log file %s: %w", filePath, err)
  282. }
  283. defer file.Close()
  284. // Use a batch writer for efficient indexing
  285. batch := pi.StartBatch()
  286. scanner := bufio.NewScanner(file)
  287. docCount := 0
  288. for scanner.Scan() {
  289. line := scanner.Text()
  290. if line == "" {
  291. continue
  292. }
  293. // In a real implementation, parse the log line into a structured format
  294. // For now, we create a simple document
  295. logDoc, err := ParseLogLine(line) // Assuming a parser function exists
  296. if err != nil {
  297. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  298. continue
  299. }
  300. logDoc.FilePath = filePath
  301. // Use efficient string building for document ID
  302. docIDBuf := make([]byte, 0, len(filePath)+16)
  303. docIDBuf = append(docIDBuf, filePath...)
  304. docIDBuf = append(docIDBuf, '-')
  305. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  306. doc := &Document{
  307. ID: utils.BytesToStringUnsafe(docIDBuf),
  308. Fields: logDoc,
  309. }
  310. if err := batch.Add(doc); err != nil {
  311. // This indicates an auto-flush occurred and failed.
  312. // Log the error and stop processing this file to avoid further issues.
  313. return fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  314. }
  315. docCount++
  316. }
  317. if err := scanner.Err(); err != nil {
  318. return fmt.Errorf("error reading log file %s: %w", filePath, err)
  319. }
  320. if _, err := batch.Flush(); err != nil {
  321. return fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  322. }
  323. return nil
  324. }
  325. // GetStats returns current indexer statistics
  326. func (pi *ParallelIndexer) GetStats() *IndexStats {
  327. pi.statsMutex.RLock()
  328. defer pi.statsMutex.RUnlock()
  329. // Update shard stats
  330. shardStats := pi.shardManager.GetShardStats()
  331. pi.stats.Shards = shardStats
  332. pi.stats.ShardCount = len(shardStats)
  333. var totalDocs uint64
  334. var totalSize int64
  335. for _, shard := range shardStats {
  336. totalDocs += shard.DocumentCount
  337. totalSize += shard.Size
  338. }
  339. pi.stats.TotalDocuments = totalDocs
  340. pi.stats.TotalSize = totalSize
  341. pi.stats.QueueSize = len(pi.jobQueue)
  342. // Calculate memory usage
  343. var memStats runtime.MemStats
  344. runtime.ReadMemStats(&memStats)
  345. pi.stats.MemoryUsage = int64(memStats.Alloc)
  346. // Copy stats to avoid race conditions
  347. statsCopy := *pi.stats
  348. return &statsCopy
  349. }
  350. // IsRunning returns whether the indexer is currently running
  351. func (pi *ParallelIndexer) IsRunning() bool {
  352. return atomic.LoadInt32(&pi.running) != 0
  353. }
  354. // GetShardInfo returns information about a specific shard
  355. func (pi *ParallelIndexer) GetShardInfo(shardID int) (*ShardInfo, error) {
  356. shardStats := pi.shardManager.GetShardStats()
  357. for _, stat := range shardStats {
  358. if stat.ID == shardID {
  359. return stat, nil
  360. }
  361. }
  362. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, shardID)
  363. }
  364. // IsHealthy checks if the indexer is running and healthy
  365. func (pi *ParallelIndexer) IsHealthy() bool {
  366. if atomic.LoadInt32(&pi.running) != 1 {
  367. return false
  368. }
  369. // Check shard manager health
  370. return pi.shardManager.HealthCheck() == nil
  371. }
  372. // GetConfig returns the current configuration
  373. func (pi *ParallelIndexer) GetConfig() *Config {
  374. return pi.config
  375. }
  376. // GetAllShards returns all managed shards
  377. func (pi *ParallelIndexer) GetAllShards() []bleve.Index {
  378. return pi.shardManager.GetAllShards()
  379. }
  380. // DeleteIndexByLogGroup deletes all index entries for a specific log group (base path and its rotated files)
  381. func (pi *ParallelIndexer) DeleteIndexByLogGroup(basePath string, logFileManager interface{}) error {
  382. if !pi.IsHealthy() {
  383. return fmt.Errorf("indexer not healthy")
  384. }
  385. // Get all file paths for this log group from the database
  386. if logFileManager == nil {
  387. return fmt.Errorf("log file manager is required")
  388. }
  389. lfm, ok := logFileManager.(interface {
  390. GetFilePathsForGroup(string) ([]string, error)
  391. })
  392. if !ok {
  393. return fmt.Errorf("log file manager does not support GetFilePathsForGroup")
  394. }
  395. filesToDelete, err := lfm.GetFilePathsForGroup(basePath)
  396. if err != nil {
  397. return fmt.Errorf("failed to get file paths for log group %s: %w", basePath, err)
  398. }
  399. logger.Infof("Deleting index entries for log group %s, files: %v", basePath, filesToDelete)
  400. // Delete documents from all shards for these files
  401. shards := pi.shardManager.GetAllShards()
  402. var deleteErrors []error
  403. for _, shard := range shards {
  404. // Search for documents with matching file_path
  405. for _, filePath := range filesToDelete {
  406. query := bleve.NewTermQuery(filePath)
  407. query.SetField("file_path")
  408. searchRequest := bleve.NewSearchRequest(query)
  409. searchRequest.Size = 1000 // Process in batches
  410. searchRequest.Fields = []string{"file_path"}
  411. for {
  412. searchResult, err := shard.Search(searchRequest)
  413. if err != nil {
  414. deleteErrors = append(deleteErrors, fmt.Errorf("failed to search for documents in file %s: %w", filePath, err))
  415. break
  416. }
  417. if len(searchResult.Hits) == 0 {
  418. break // No more documents to delete
  419. }
  420. // Delete documents in batch
  421. batch := shard.NewBatch()
  422. for _, hit := range searchResult.Hits {
  423. batch.Delete(hit.ID)
  424. }
  425. if err := shard.Batch(batch); err != nil {
  426. deleteErrors = append(deleteErrors, fmt.Errorf("failed to delete batch for file %s: %w", filePath, err))
  427. }
  428. // If we got fewer results than requested, we're done
  429. if len(searchResult.Hits) < searchRequest.Size {
  430. break
  431. }
  432. // Continue from where we left off
  433. searchRequest.From += searchRequest.Size
  434. }
  435. }
  436. }
  437. if len(deleteErrors) > 0 {
  438. return fmt.Errorf("encountered %d errors during deletion: %v", len(deleteErrors), deleteErrors[0])
  439. }
  440. logger.Infof("Successfully deleted index entries for log group: %s", basePath)
  441. return nil
  442. }
  443. // DestroyAllIndexes closes and deletes all index data from disk.
  444. func (pi *ParallelIndexer) DestroyAllIndexes() error {
  445. // Stop all background routines before deleting files
  446. pi.cancel()
  447. pi.wg.Wait()
  448. close(pi.jobQueue)
  449. close(pi.resultQueue)
  450. atomic.StoreInt32(&pi.running, 0) // Mark as not running
  451. var destructionErr error
  452. if manager, ok := pi.shardManager.(*DefaultShardManager); ok {
  453. destructionErr = manager.Destroy()
  454. } else {
  455. destructionErr = fmt.Errorf("shard manager does not support destruction")
  456. }
  457. // Re-initialize context and channels for a potential restart
  458. pi.ctx, pi.cancel = context.WithCancel(context.Background())
  459. pi.jobQueue = make(chan *IndexJob, pi.config.MaxQueueSize)
  460. pi.resultQueue = make(chan *IndexResult, pi.config.WorkerCount)
  461. return destructionErr
  462. }
  463. // IndexLogGroup finds all files related to a base log path (e.g., rotated logs) and indexes them.
  464. // It returns a map of [filePath -> docCount], and the min/max timestamps found.
  465. func (pi *ParallelIndexer) IndexLogGroup(basePath string) (map[string]uint64, *time.Time, *time.Time, error) {
  466. if !pi.IsHealthy() {
  467. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  468. }
  469. // Find all files belonging to this log group by globbing
  470. globPath := basePath + "*"
  471. matches, err := filepath.Glob(globPath)
  472. if err != nil {
  473. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  474. }
  475. // filepath.Glob might not match the base file itself if it has no extension,
  476. // so we check for it explicitly and add it to the list.
  477. info, err := os.Stat(basePath)
  478. if err == nil && info.Mode().IsRegular() {
  479. matches = append(matches, basePath)
  480. }
  481. // Deduplicate file list
  482. seen := make(map[string]struct{})
  483. uniqueFiles := make([]string, 0)
  484. for _, match := range matches {
  485. if _, ok := seen[match]; !ok {
  486. // Further check if it's a file, not a directory. Glob can match dirs.
  487. info, err := os.Stat(match)
  488. if err == nil && info.Mode().IsRegular() {
  489. seen[match] = struct{}{}
  490. uniqueFiles = append(uniqueFiles, match)
  491. }
  492. }
  493. }
  494. if len(uniqueFiles) == 0 {
  495. logger.Warnf("No actual log file found for group: %s", basePath)
  496. return nil, nil, nil, nil
  497. }
  498. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  499. docsCountMap := make(map[string]uint64)
  500. var overallMinTime, overallMaxTime *time.Time
  501. for _, filePath := range uniqueFiles {
  502. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  503. if err != nil {
  504. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  505. continue // Continue with the next file
  506. }
  507. docsCountMap[filePath] = docsIndexed
  508. if minTime != nil {
  509. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  510. overallMinTime = minTime
  511. }
  512. }
  513. if maxTime != nil {
  514. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  515. overallMaxTime = maxTime
  516. }
  517. }
  518. }
  519. return docsCountMap, overallMinTime, overallMaxTime, nil
  520. }
  521. // indexSingleFile contains the logic to process one physical log file.
  522. // It returns the number of documents indexed from the file, and the min/max timestamps.
  523. func (pi *ParallelIndexer) indexSingleFile(filePath string) (uint64, *time.Time, *time.Time, error) {
  524. file, err := os.Open(filePath)
  525. if err != nil {
  526. return 0, nil, nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
  527. }
  528. defer file.Close()
  529. var reader io.Reader = file
  530. // Handle gzipped files
  531. if strings.HasSuffix(filePath, ".gz") {
  532. gz, err := gzip.NewReader(file)
  533. if err != nil {
  534. return 0, nil, nil, fmt.Errorf("failed to create gzip reader for %s: %w", filePath, err)
  535. }
  536. defer gz.Close()
  537. reader = gz
  538. }
  539. logger.Infof("Starting to process file: %s", filePath)
  540. batch := pi.StartBatch()
  541. scanner := bufio.NewScanner(reader)
  542. docCount := 0
  543. var minTime, maxTime *time.Time
  544. for scanner.Scan() {
  545. line := scanner.Text()
  546. if line == "" {
  547. continue
  548. }
  549. logDoc, err := ParseLogLine(line)
  550. if err != nil {
  551. logger.Warnf("Skipping line due to parse error in file %s: %v", filePath, err)
  552. continue
  553. }
  554. logDoc.FilePath = filePath
  555. // Track min/max timestamps
  556. ts := time.Unix(logDoc.Timestamp, 0)
  557. if minTime == nil || ts.Before(*minTime) {
  558. minTime = &ts
  559. }
  560. if maxTime == nil || ts.After(*maxTime) {
  561. maxTime = &ts
  562. }
  563. // Use efficient string building for document ID
  564. docIDBuf := make([]byte, 0, len(filePath)+16)
  565. docIDBuf = append(docIDBuf, filePath...)
  566. docIDBuf = append(docIDBuf, '-')
  567. docIDBuf = utils.AppendInt(docIDBuf, int(docCount))
  568. doc := &Document{
  569. ID: utils.BytesToStringUnsafe(docIDBuf),
  570. Fields: logDoc,
  571. }
  572. if err := batch.Add(doc); err != nil {
  573. // This indicates an auto-flush occurred and failed.
  574. // Log the error and stop processing this file to avoid further issues.
  575. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to add document to batch for %s (auto-flush might have failed): %w", filePath, err)
  576. }
  577. docCount++
  578. }
  579. if err := scanner.Err(); err != nil {
  580. return uint64(docCount), minTime, maxTime, fmt.Errorf("error reading log file %s: %w", filePath, err)
  581. }
  582. logger.Infof("Finished processing file: %s. Total lines processed: %d", filePath, docCount)
  583. if docCount > 0 {
  584. if _, err := batch.Flush(); err != nil {
  585. return uint64(docCount), minTime, maxTime, fmt.Errorf("failed to flush batch for %s: %w", filePath, err)
  586. }
  587. }
  588. return uint64(docCount), minTime, maxTime, nil
  589. }
  590. // UpdateConfig updates the indexer configuration
  591. func (pi *ParallelIndexer) UpdateConfig(config *Config) error {
  592. // Only allow updating certain configuration parameters while running
  593. pi.config.BatchSize = config.BatchSize
  594. pi.config.FlushInterval = config.FlushInterval
  595. pi.config.EnableMetrics = config.EnableMetrics
  596. return nil
  597. }
  598. // Worker implementation
  599. func (w *indexWorker) run() {
  600. defer w.indexer.wg.Done()
  601. w.updateStatus(WorkerStatusIdle)
  602. for {
  603. select {
  604. case job, ok := <-w.indexer.jobQueue:
  605. if !ok {
  606. return // Channel closed, worker should exit
  607. }
  608. w.updateStatus(WorkerStatusBusy)
  609. result := w.processJob(job)
  610. // Send result
  611. select {
  612. case w.indexer.resultQueue <- result:
  613. case <-w.indexer.ctx.Done():
  614. return
  615. }
  616. // Execute callback if provided
  617. if job.Callback != nil {
  618. var err error
  619. if result.Failed > 0 {
  620. err = fmt.Errorf("indexing failed for %d documents", result.Failed)
  621. }
  622. job.Callback(err)
  623. }
  624. w.updateStatus(WorkerStatusIdle)
  625. case <-w.indexer.ctx.Done():
  626. return
  627. }
  628. }
  629. }
  630. func (w *indexWorker) processJob(job *IndexJob) *IndexResult {
  631. startTime := time.Now()
  632. result := &IndexResult{
  633. Processed: len(job.Documents),
  634. }
  635. // Group documents by shard
  636. shardDocs := make(map[int][]*Document)
  637. for _, doc := range job.Documents {
  638. if doc.ID == "" {
  639. result.Failed++
  640. continue
  641. }
  642. _, shardID, err := w.indexer.shardManager.GetShard(doc.ID)
  643. if err != nil {
  644. result.Failed++
  645. continue
  646. }
  647. shardDocs[shardID] = append(shardDocs[shardID], doc)
  648. }
  649. // Index documents per shard
  650. for shardID, docs := range shardDocs {
  651. if err := w.indexShardDocuments(shardID, docs); err != nil {
  652. result.Failed += len(docs)
  653. } else {
  654. result.Succeeded += len(docs)
  655. }
  656. }
  657. result.Duration = time.Since(startTime)
  658. if result.Processed > 0 {
  659. result.ErrorRate = float64(result.Failed) / float64(result.Processed)
  660. result.Throughput = float64(result.Processed) / result.Duration.Seconds()
  661. }
  662. // Update worker stats
  663. w.statsMutex.Lock()
  664. w.stats.ProcessedJobs++
  665. w.stats.ProcessedDocs += int64(result.Processed)
  666. w.stats.ErrorCount += int64(result.Failed)
  667. w.stats.LastActive = time.Now().Unix()
  668. // Update average latency (simple moving average)
  669. if w.stats.AverageLatency == 0 {
  670. w.stats.AverageLatency = result.Duration
  671. } else {
  672. w.stats.AverageLatency = (w.stats.AverageLatency + result.Duration) / 2
  673. }
  674. w.statsMutex.Unlock()
  675. return result
  676. }
  677. func (w *indexWorker) indexShardDocuments(shardID int, docs []*Document) error {
  678. shard, err := w.indexer.shardManager.GetShardByID(shardID)
  679. if err != nil {
  680. return err
  681. }
  682. batch := shard.NewBatch()
  683. for _, doc := range docs {
  684. // Convert LogDocument to map for Bleve indexing
  685. docMap := w.logDocumentToMap(doc.Fields)
  686. batch.Index(doc.ID, docMap)
  687. }
  688. if err := shard.Batch(batch); err != nil {
  689. return fmt.Errorf("failed to index batch for shard %d: %w", shardID, err)
  690. }
  691. return nil
  692. }
  693. // logDocumentToMap converts LogDocument to map[string]interface{} for Bleve
  694. func (w *indexWorker) logDocumentToMap(doc *LogDocument) map[string]interface{} {
  695. docMap := map[string]interface{}{
  696. "timestamp": doc.Timestamp,
  697. "ip": doc.IP,
  698. "method": doc.Method,
  699. "path": doc.Path,
  700. "path_exact": doc.PathExact,
  701. "status": doc.Status,
  702. "bytes_sent": doc.BytesSent,
  703. "file_path": doc.FilePath,
  704. "raw": doc.Raw,
  705. }
  706. // Add optional fields only if they have values
  707. if doc.RegionCode != "" {
  708. docMap["region_code"] = doc.RegionCode
  709. }
  710. if doc.Province != "" {
  711. docMap["province"] = doc.Province
  712. }
  713. if doc.City != "" {
  714. docMap["city"] = doc.City
  715. }
  716. if doc.Protocol != "" {
  717. docMap["protocol"] = doc.Protocol
  718. }
  719. if doc.Referer != "" {
  720. docMap["referer"] = doc.Referer
  721. }
  722. if doc.UserAgent != "" {
  723. docMap["user_agent"] = doc.UserAgent
  724. }
  725. if doc.Browser != "" {
  726. docMap["browser"] = doc.Browser
  727. }
  728. if doc.BrowserVer != "" {
  729. docMap["browser_version"] = doc.BrowserVer
  730. }
  731. if doc.OS != "" {
  732. docMap["os"] = doc.OS
  733. }
  734. if doc.OSVersion != "" {
  735. docMap["os_version"] = doc.OSVersion
  736. }
  737. if doc.DeviceType != "" {
  738. docMap["device_type"] = doc.DeviceType
  739. }
  740. if doc.RequestTime > 0 {
  741. docMap["request_time"] = doc.RequestTime
  742. }
  743. if doc.UpstreamTime != nil {
  744. docMap["upstream_time"] = *doc.UpstreamTime
  745. }
  746. return docMap
  747. }
  748. func (w *indexWorker) updateStatus(status string) {
  749. w.statsMutex.Lock()
  750. w.stats.Status = status
  751. w.statsMutex.Unlock()
  752. }
  753. // Background routines
  754. func (pi *ParallelIndexer) processResults() {
  755. defer pi.wg.Done()
  756. for {
  757. select {
  758. case result := <-pi.resultQueue:
  759. if result != nil {
  760. pi.metrics.RecordIndexOperation(
  761. result.Processed,
  762. result.Duration,
  763. result.Failed == 0,
  764. )
  765. }
  766. case <-pi.ctx.Done():
  767. return
  768. }
  769. }
  770. }
  771. func (pi *ParallelIndexer) optimizationRoutine() {
  772. defer pi.wg.Done()
  773. ticker := time.NewTicker(pi.config.OptimizeInterval)
  774. defer ticker.Stop()
  775. for {
  776. select {
  777. case <-ticker.C:
  778. if atomic.LoadInt32(&pi.optimizing) == 0 {
  779. go pi.Optimize() // Run in background to avoid blocking
  780. }
  781. case <-pi.ctx.Done():
  782. return
  783. }
  784. }
  785. }
  786. func (pi *ParallelIndexer) metricsRoutine() {
  787. defer pi.wg.Done()
  788. ticker := time.NewTicker(10 * time.Second)
  789. defer ticker.Stop()
  790. for {
  791. select {
  792. case <-ticker.C:
  793. pi.updateMetrics()
  794. case <-pi.ctx.Done():
  795. return
  796. }
  797. }
  798. }
  799. func (pi *ParallelIndexer) updateMetrics() {
  800. pi.statsMutex.Lock()
  801. defer pi.statsMutex.Unlock()
  802. // Update indexing rate based on recent activity
  803. metrics := pi.metrics.GetMetrics()
  804. pi.stats.IndexingRate = metrics.IndexingRate
  805. }
  806. // IndexLogGroupWithProgress indexes a log group with progress tracking
  807. func (pi *ParallelIndexer) IndexLogGroupWithProgress(basePath string, progressConfig *ProgressConfig) (map[string]uint64, *time.Time, *time.Time, error) {
  808. if !pi.IsHealthy() {
  809. return nil, nil, nil, fmt.Errorf("indexer not healthy")
  810. }
  811. // Create progress tracker if config is provided
  812. var progressTracker *ProgressTracker
  813. if progressConfig != nil {
  814. progressTracker = NewProgressTracker(basePath, progressConfig)
  815. }
  816. // Find all files belonging to this log group by globbing
  817. globPath := basePath + "*"
  818. matches, err := filepath.Glob(globPath)
  819. if err != nil {
  820. if progressTracker != nil {
  821. progressTracker.Cancel(fmt.Sprintf("glob failed: %v", err))
  822. }
  823. return nil, nil, nil, fmt.Errorf("failed to glob for log files with base %s: %w", basePath, err)
  824. }
  825. // filepath.Glob might not match the base file itself if it has no extension,
  826. // so we check for it explicitly and add it to the list.
  827. info, err := os.Stat(basePath)
  828. if err == nil && info.Mode().IsRegular() {
  829. matches = append(matches, basePath)
  830. }
  831. // Deduplicate file list
  832. seen := make(map[string]struct{})
  833. uniqueFiles := make([]string, 0)
  834. for _, match := range matches {
  835. if _, ok := seen[match]; !ok {
  836. // Further check if it's a file, not a directory. Glob can match dirs.
  837. info, err := os.Stat(match)
  838. if err == nil && info.Mode().IsRegular() {
  839. seen[match] = struct{}{}
  840. uniqueFiles = append(uniqueFiles, match)
  841. }
  842. }
  843. }
  844. if len(uniqueFiles) == 0 {
  845. logger.Warnf("No actual log file found for group: %s", basePath)
  846. if progressTracker != nil {
  847. progressTracker.Cancel("no files found")
  848. }
  849. return nil, nil, nil, nil
  850. }
  851. logger.Infof("Found %d file(s) for log group %s: %v", len(uniqueFiles), basePath, uniqueFiles)
  852. // Set up progress tracking for all files
  853. if progressTracker != nil {
  854. for _, filePath := range uniqueFiles {
  855. isCompressed := IsCompressedFile(filePath)
  856. progressTracker.AddFile(filePath, isCompressed)
  857. // Get file size and estimate lines
  858. if stat, err := os.Stat(filePath); err == nil {
  859. progressTracker.SetFileSize(filePath, stat.Size())
  860. // Estimate lines for progress calculation
  861. if estimatedLines, err := EstimateFileLines(context.Background(), filePath, stat.Size(), isCompressed); err == nil {
  862. progressTracker.SetFileEstimate(filePath, estimatedLines)
  863. }
  864. }
  865. }
  866. }
  867. docsCountMap := make(map[string]uint64)
  868. var overallMinTime, overallMaxTime *time.Time
  869. // Process each file with progress tracking
  870. for _, filePath := range uniqueFiles {
  871. if progressTracker != nil {
  872. progressTracker.StartFile(filePath)
  873. }
  874. docsIndexed, minTime, maxTime, err := pi.indexSingleFileWithProgress(filePath, progressTracker)
  875. if err != nil {
  876. logger.Warnf("Failed to index file '%s' in group '%s', skipping: %v", filePath, basePath, err)
  877. if progressTracker != nil {
  878. progressTracker.FailFile(filePath, err.Error())
  879. }
  880. continue // Continue with the next file
  881. }
  882. docsCountMap[filePath] = docsIndexed
  883. if progressTracker != nil {
  884. progressTracker.CompleteFile(filePath, int64(docsIndexed))
  885. }
  886. if minTime != nil {
  887. if overallMinTime == nil || minTime.Before(*overallMinTime) {
  888. overallMinTime = minTime
  889. }
  890. }
  891. if maxTime != nil {
  892. if overallMaxTime == nil || maxTime.After(*overallMaxTime) {
  893. overallMaxTime = maxTime
  894. }
  895. }
  896. }
  897. return docsCountMap, overallMinTime, overallMaxTime, nil
  898. }
  899. // indexSingleFileWithProgress indexes a single file with progress updates
  900. func (pi *ParallelIndexer) indexSingleFileWithProgress(filePath string, progressTracker *ProgressTracker) (uint64, *time.Time, *time.Time, error) {
  901. // If no progress tracker, just call the original method
  902. if progressTracker == nil {
  903. return pi.indexSingleFile(filePath)
  904. }
  905. // Call the original indexing method to do the actual indexing work
  906. docsIndexed, minTime, maxTime, err := pi.indexSingleFile(filePath)
  907. if err != nil {
  908. return 0, nil, nil, err
  909. }
  910. // Just do one final progress update when done - no artificial delays
  911. if progressTracker != nil && docsIndexed > 0 {
  912. if strings.HasSuffix(filePath, ".gz") {
  913. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed))
  914. } else {
  915. // Estimate position based on average line size
  916. estimatedPos := int64(docsIndexed * 150) // Assume ~150 bytes per line
  917. progressTracker.UpdateFileProgress(filePath, int64(docsIndexed), estimatedPos)
  918. }
  919. }
  920. // Return the actual timestamps from the original method
  921. return docsIndexed, minTime, maxTime, nil
  922. }