log_file_manager.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. package indexer
  2. import (
  3. "fmt"
  4. "os"
  5. "path/filepath"
  6. "regexp"
  7. "sort"
  8. "strings"
  9. "sync"
  10. "time"
  11. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  12. "github.com/0xJacky/Nginx-UI/model"
  13. "github.com/uozi-tech/cosy/logger"
  14. )
  15. // Legacy constants for backward compatibility - use IndexStatus enum in types.go instead
  16. // NginxLogCache represents a cached log entry from nginx configuration
  17. type NginxLogCache struct {
  18. Path string `json:"path"` // Path to the log file
  19. Type string `json:"type"` // Type of log: "access" or "error"
  20. Name string `json:"name"` // Name of the log file
  21. ConfigFile string `json:"config_file"` // Path to the configuration file that contains this log directive
  22. }
  23. // NginxLogWithIndex represents a log file with its index status information
  24. type NginxLogWithIndex struct {
  25. Path string `json:"path"` // Path to the log file
  26. Type string `json:"type"` // Type of log: "access" or "error"
  27. Name string `json:"name"` // Name of the log file
  28. ConfigFile string `json:"config_file"` // Path to the configuration file
  29. IndexStatus string `json:"index_status"` // Index status: indexed, indexing, not_indexed, queued, error
  30. LastModified int64 `json:"last_modified,omitempty"` // Unix timestamp of last modification time
  31. LastSize int64 `json:"last_size,omitempty"` // Last known size of the file
  32. LastIndexed int64 `json:"last_indexed,omitempty"` // Unix timestamp when the file was last indexed
  33. IndexStartTime int64 `json:"index_start_time,omitempty"` // Unix timestamp when the last indexing operation started
  34. IndexDuration int64 `json:"index_duration,omitempty"` // Duration of last indexing operation in milliseconds
  35. IsCompressed bool `json:"is_compressed"` // Whether the file is compressed
  36. HasTimeRange bool `json:"has_timerange"` // Whether time range is available
  37. TimeRangeStart int64 `json:"timerange_start,omitempty"` // Unix timestamp of start of time range in the log
  38. TimeRangeEnd int64 `json:"timerange_end,omitempty"` // Unix timestamp of end of time range in the log
  39. DocumentCount uint64 `json:"document_count,omitempty"` // Number of indexed documents from this file
  40. // Enhanced status tracking fields
  41. ErrorMessage string `json:"error_message,omitempty"` // Error message if indexing failed
  42. ErrorTime int64 `json:"error_time,omitempty"` // Unix timestamp when error occurred
  43. RetryCount int `json:"retry_count,omitempty"` // Number of retry attempts
  44. QueuePosition int `json:"queue_position,omitempty"` // Position in indexing queue
  45. }
  46. // LogFileManager manages nginx log file discovery and index status
  47. type LogFileManager struct {
  48. logCache map[string]*NginxLogCache
  49. cacheMutex sync.RWMutex
  50. persistence *PersistenceManager
  51. indexingStatus map[string]bool
  52. indexingMutex sync.RWMutex
  53. indexer *ParallelIndexer
  54. }
  55. // NewLogFileManager creates a new log file manager
  56. func NewLogFileManager() *LogFileManager {
  57. return &LogFileManager{
  58. logCache: make(map[string]*NginxLogCache),
  59. persistence: NewPersistenceManager(DefaultIncrementalConfig()),
  60. indexingStatus: make(map[string]bool),
  61. }
  62. }
  63. // SetIndexer injects the running ParallelIndexer so we can query exact doc counts before persisting
  64. func (lm *LogFileManager) SetIndexer(pi *ParallelIndexer) {
  65. lm.indexer = pi
  66. }
  67. // AddLogPath adds a log path to the log cache with the source config file
  68. func (lm *LogFileManager) AddLogPath(path, logType, name, configFile string) {
  69. lm.cacheMutex.Lock()
  70. defer lm.cacheMutex.Unlock()
  71. lm.logCache[path] = &NginxLogCache{
  72. Path: path,
  73. Type: logType,
  74. Name: name,
  75. ConfigFile: configFile,
  76. }
  77. }
  78. // RemoveLogPathsFromConfig removes all log paths associated with a specific config file
  79. func (lm *LogFileManager) RemoveLogPathsFromConfig(configFile string) {
  80. lm.cacheMutex.Lock()
  81. defer lm.cacheMutex.Unlock()
  82. for path, logEntry := range lm.logCache {
  83. if logEntry.ConfigFile == configFile {
  84. delete(lm.logCache, path)
  85. }
  86. }
  87. }
  88. // GetAllLogPaths returns all cached log paths, optionally filtered
  89. func (lm *LogFileManager) GetAllLogPaths(filters ...func(*NginxLogCache) bool) []*NginxLogCache {
  90. lm.cacheMutex.RLock()
  91. defer lm.cacheMutex.RUnlock()
  92. var logs []*NginxLogCache
  93. for _, logEntry := range lm.logCache {
  94. // Apply all filters
  95. include := true
  96. for _, filter := range filters {
  97. if !filter(logEntry) {
  98. include = false
  99. break
  100. }
  101. }
  102. if include {
  103. // Create a copy to avoid race conditions
  104. logCopy := *logEntry
  105. logs = append(logs, &logCopy)
  106. }
  107. }
  108. return logs
  109. }
  110. // SetIndexingStatus sets the indexing status for a specific file path
  111. func (lm *LogFileManager) SetIndexingStatus(path string, isIndexing bool) {
  112. lm.indexingMutex.Lock()
  113. defer lm.indexingMutex.Unlock()
  114. if isIndexing {
  115. lm.indexingStatus[path] = true
  116. } else {
  117. delete(lm.indexingStatus, path)
  118. }
  119. }
  120. // GetIndexingFiles returns a list of files currently being indexed
  121. func (lm *LogFileManager) GetIndexingFiles() []string {
  122. lm.indexingMutex.RLock()
  123. defer lm.indexingMutex.RUnlock()
  124. var files []string
  125. for path := range lm.indexingStatus {
  126. files = append(files, path)
  127. }
  128. return files
  129. }
  130. // getBaseLogName determines the base log file name for grouping rotated files
  131. func getBaseLogName(filePath string) string {
  132. dir := filepath.Dir(filePath)
  133. filename := filepath.Base(filePath)
  134. // Remove compression extensions first
  135. filename = strings.TrimSuffix(filename, ".gz")
  136. filename = strings.TrimSuffix(filename, ".bz2")
  137. // Handle numbered rotation (access.log.1, access.log.2, etc.)
  138. if match := regexp.MustCompile(`^(.+)\.(\d+)$`).FindStringSubmatch(filename); len(match) > 1 {
  139. baseFilename := match[1]
  140. return filepath.Join(dir, baseFilename)
  141. }
  142. // Handle date rotation suffixes
  143. parts := strings.Split(filename, ".")
  144. if len(parts) >= 2 {
  145. lastPart := parts[len(parts)-1]
  146. if isDatePattern(lastPart) {
  147. baseFilename := strings.Join(parts[:len(parts)-1], ".")
  148. // If the base doesn't end with .log, add it
  149. if !strings.HasSuffix(baseFilename, ".log") {
  150. baseFilename += ".log"
  151. }
  152. return filepath.Join(dir, baseFilename)
  153. }
  154. }
  155. // If it already looks like a base log file, return as-is
  156. return filePath
  157. }
  158. // GetAllLogsWithIndexGrouped returns logs grouped by their base name (e.g., access.log includes access.log.1, access.log.2.gz etc.)
  159. func (lm *LogFileManager) GetAllLogsWithIndexGrouped(filters ...func(*NginxLogWithIndex) bool) []*NginxLogWithIndex {
  160. lm.cacheMutex.RLock()
  161. defer lm.cacheMutex.RUnlock()
  162. // Get all logs from both cache (config files) and persistence (indexed files)
  163. allLogsMap := make(map[string]*NginxLogWithIndex)
  164. // First, get logs from the cache (these are from nginx config)
  165. for _, cache := range lm.logCache {
  166. logWithIndex := &NginxLogWithIndex{
  167. Path: cache.Path,
  168. Type: cache.Type,
  169. Name: cache.Name,
  170. ConfigFile: cache.ConfigFile,
  171. IndexStatus: string(IndexStatusNotIndexed),
  172. IsCompressed: false,
  173. HasTimeRange: false,
  174. }
  175. allLogsMap[cache.Path] = logWithIndex
  176. }
  177. // Get persistence indexes and update status
  178. persistenceIndexes, err := lm.persistence.GetAllLogIndexes()
  179. if err != nil {
  180. logger.Warnf("Failed to get persistence indexes: %v", err)
  181. persistenceIndexes = []*model.NginxLogIndex{}
  182. }
  183. // Add all indexed files from persistence (including rotated files)
  184. for _, idx := range persistenceIndexes {
  185. if _, exists := allLogsMap[idx.Path]; !exists {
  186. // This is a rotated file not in config cache, create entry for it
  187. logType := "access"
  188. if strings.Contains(idx.Path, "error") {
  189. logType = "error"
  190. }
  191. logWithIndex := &NginxLogWithIndex{
  192. Path: idx.Path,
  193. Type: logType,
  194. Name: filepath.Base(idx.Path),
  195. ConfigFile: "",
  196. IndexStatus: string(IndexStatusNotIndexed),
  197. }
  198. allLogsMap[idx.Path] = logWithIndex
  199. }
  200. // Update index status from persistence data
  201. logWithIndex := allLogsMap[idx.Path]
  202. logWithIndex.LastModified = idx.LastModified.Unix()
  203. logWithIndex.LastSize = idx.LastSize
  204. logWithIndex.LastIndexed = idx.LastIndexed.Unix()
  205. if idx.IndexStartTime != nil {
  206. logWithIndex.IndexStartTime = idx.IndexStartTime.Unix()
  207. }
  208. if idx.IndexDuration != nil {
  209. logWithIndex.IndexDuration = *idx.IndexDuration
  210. }
  211. logWithIndex.DocumentCount = idx.DocumentCount
  212. // Set queue position if available
  213. logWithIndex.QueuePosition = idx.QueuePosition
  214. // Set error message if available
  215. logWithIndex.ErrorMessage = idx.ErrorMessage
  216. if idx.ErrorTime != nil {
  217. logWithIndex.ErrorTime = idx.ErrorTime.Unix()
  218. }
  219. logWithIndex.RetryCount = idx.RetryCount
  220. // Use the index status from the database if it's set
  221. if idx.IndexStatus != "" {
  222. logWithIndex.IndexStatus = idx.IndexStatus
  223. } else {
  224. // Fallback to determining status if not set in DB
  225. lm.indexingMutex.RLock()
  226. isIndexing := lm.indexingStatus[idx.Path]
  227. lm.indexingMutex.RUnlock()
  228. if isIndexing {
  229. logWithIndex.IndexStatus = string(IndexStatusIndexing)
  230. } else if !idx.LastIndexed.IsZero() {
  231. // If file has been indexed (regardless of document count), it's indexed
  232. logWithIndex.IndexStatus = string(IndexStatusIndexed)
  233. }
  234. }
  235. // Set time range if available
  236. if idx.TimeRangeStart != nil && idx.TimeRangeEnd != nil && !idx.TimeRangeStart.IsZero() && !idx.TimeRangeEnd.IsZero() {
  237. logWithIndex.HasTimeRange = true
  238. logWithIndex.TimeRangeStart = idx.TimeRangeStart.Unix()
  239. logWithIndex.TimeRangeEnd = idx.TimeRangeEnd.Unix()
  240. }
  241. logWithIndex.IsCompressed = strings.HasSuffix(idx.Path, ".gz") || strings.HasSuffix(idx.Path, ".bz2")
  242. }
  243. // Convert to slice and apply filters
  244. var logs []*NginxLogWithIndex
  245. for _, log := range allLogsMap {
  246. // Apply all filters
  247. include := true
  248. for _, filter := range filters {
  249. if !filter(log) {
  250. include = false
  251. break
  252. }
  253. }
  254. if include {
  255. logs = append(logs, log)
  256. }
  257. }
  258. // Group by base log name with stable aggregation
  259. groupedMap := make(map[string]*NginxLogWithIndex)
  260. // Sort logs by path first to ensure consistent processing order
  261. sort.Slice(logs, func(i, j int) bool {
  262. return logs[i].Path < logs[j].Path
  263. })
  264. for _, log := range logs {
  265. baseLogName := getBaseLogName(log.Path)
  266. if existing, exists := groupedMap[baseLogName]; exists {
  267. // Check if current log is a main log path record (already aggregated)
  268. // or if existing record is a main log path record
  269. logIsMainPath := (log.Path == baseLogName)
  270. existingIsMainPath := (existing.Path == baseLogName)
  271. if logIsMainPath && !existingIsMainPath {
  272. // Current log is the main aggregated record, replace existing
  273. groupedLog := *log
  274. groupedLog.Path = baseLogName
  275. groupedLog.Name = filepath.Base(baseLogName)
  276. groupedMap[baseLogName] = &groupedLog
  277. } else if !logIsMainPath && existingIsMainPath {
  278. // Existing is main record, keep it, don't accumulate
  279. // Only update status if needed
  280. if log.IndexStatus == string(IndexStatusIndexing) {
  281. existing.IndexStatus = string(IndexStatusIndexing)
  282. }
  283. } else if !logIsMainPath && !existingIsMainPath {
  284. // Both are individual files, accumulate normally
  285. if log.LastIndexed > existing.LastIndexed {
  286. existing.LastModified = log.LastModified
  287. existing.LastIndexed = log.LastIndexed
  288. existing.IndexStartTime = log.IndexStartTime
  289. existing.IndexDuration = log.IndexDuration
  290. }
  291. existing.DocumentCount += log.DocumentCount
  292. existing.LastSize += log.LastSize
  293. // Update status with priority: indexing > queued > indexed > error > not_indexed
  294. if log.IndexStatus == string(IndexStatusIndexing) {
  295. existing.IndexStatus = string(IndexStatusIndexing)
  296. } else if log.IndexStatus == string(IndexStatusQueued) &&
  297. existing.IndexStatus != string(IndexStatusIndexing) {
  298. existing.IndexStatus = string(IndexStatusQueued)
  299. // Keep the queue position from the queued log
  300. if log.QueuePosition > 0 {
  301. existing.QueuePosition = log.QueuePosition
  302. }
  303. } else if log.IndexStatus == string(IndexStatusIndexed) &&
  304. existing.IndexStatus != string(IndexStatusIndexing) &&
  305. existing.IndexStatus != string(IndexStatusQueued) {
  306. existing.IndexStatus = string(IndexStatusIndexed)
  307. } else if log.IndexStatus == string(IndexStatusError) &&
  308. existing.IndexStatus != string(IndexStatusIndexing) &&
  309. existing.IndexStatus != string(IndexStatusQueued) &&
  310. existing.IndexStatus != string(IndexStatusIndexed) {
  311. existing.IndexStatus = string(IndexStatusError)
  312. existing.ErrorMessage = log.ErrorMessage
  313. existing.ErrorTime = log.ErrorTime
  314. }
  315. if log.HasTimeRange {
  316. if !existing.HasTimeRange {
  317. existing.HasTimeRange = true
  318. existing.TimeRangeStart = log.TimeRangeStart
  319. existing.TimeRangeEnd = log.TimeRangeEnd
  320. } else {
  321. if log.TimeRangeStart > 0 && (existing.TimeRangeStart == 0 || log.TimeRangeStart < existing.TimeRangeStart) {
  322. existing.TimeRangeStart = log.TimeRangeStart
  323. }
  324. if log.TimeRangeEnd > existing.TimeRangeEnd {
  325. existing.TimeRangeEnd = log.TimeRangeEnd
  326. }
  327. }
  328. }
  329. } else if logIsMainPath && existingIsMainPath {
  330. // If both are main paths, use the one with more recent LastIndexed
  331. if log.LastIndexed > existing.LastIndexed {
  332. groupedLog := *log
  333. groupedLog.Path = baseLogName
  334. groupedLog.Name = filepath.Base(baseLogName)
  335. groupedMap[baseLogName] = &groupedLog
  336. }
  337. }
  338. } else {
  339. // Create new entry with base log name as path for grouping
  340. groupedLog := *log
  341. groupedLog.Path = baseLogName
  342. groupedLog.Name = filepath.Base(baseLogName)
  343. // Preserve queue position and error info for the grouped log
  344. groupedLog.QueuePosition = log.QueuePosition
  345. groupedLog.ErrorMessage = log.ErrorMessage
  346. groupedLog.ErrorTime = log.ErrorTime
  347. groupedLog.RetryCount = log.RetryCount
  348. groupedMap[baseLogName] = &groupedLog
  349. }
  350. }
  351. // Convert map to slice with consistent ordering
  352. var result []*NginxLogWithIndex
  353. // Create a sorted list of keys to ensure consistent order
  354. var keys []string
  355. for key := range groupedMap {
  356. keys = append(keys, key)
  357. }
  358. sort.Strings(keys)
  359. // Build result in consistent order
  360. for _, key := range keys {
  361. result = append(result, groupedMap[key])
  362. }
  363. // --- START DIAGNOSTIC LOGGING ---
  364. logger.Debugf("===== FINAL GROUPED LIST =====")
  365. for _, fLog := range result {
  366. logger.Debugf("Final Group: Path=%s, DocCount=%d, Status=%s", fLog.Path, fLog.DocumentCount, fLog.IndexStatus)
  367. }
  368. logger.Debugf("===============================")
  369. // --- END DIAGNOSTIC LOGGING ---
  370. return result
  371. }
  372. // SaveIndexMetadata saves the metadata for a log group after an indexing operation.
  373. // It creates a new record for the base log path.
  374. func (lm *LogFileManager) SaveIndexMetadata(basePath string, documentCount uint64, startTime time.Time, duration time.Duration, minTime *time.Time, maxTime *time.Time) error {
  375. // We want to save the metadata against the base path (the "log group").
  376. // We get or create a record for this specific path.
  377. logIndex, err := lm.persistence.GetLogIndex(basePath)
  378. if err != nil {
  379. // If the error is anything other than "not found", it's a real problem.
  380. // GetLogIndex is designed to return a new object if not found, so this should be rare.
  381. return fmt.Errorf("could not get or create log index for '%s': %w", basePath, err)
  382. }
  383. // Get file stats to update LastModified and LastSize
  384. // Validate log path before accessing it
  385. if utils.IsValidLogPath(basePath) {
  386. if fileInfo, err := os.Stat(basePath); err == nil {
  387. logIndex.LastModified = fileInfo.ModTime()
  388. logIndex.LastSize = fileInfo.Size()
  389. }
  390. }
  391. // If indexer is available and healthy, query Bleve for exact document count
  392. if lm.indexer != nil && lm.indexer.IsHealthy() {
  393. // Decide whether this path is a main log path (group) or a specific file
  394. mainPath := getMainLogPathFromFile(basePath)
  395. if mainPath == basePath {
  396. if exact, err := lm.indexer.CountDocsByMainLogPath(basePath); err == nil {
  397. documentCount = exact
  398. } else {
  399. logger.Warnf("Falling back to provided documentCount for group %s due to count error: %v", basePath, err)
  400. }
  401. } else {
  402. if exact, err := lm.indexer.CountDocsByFilePath(basePath); err == nil {
  403. documentCount = exact
  404. } else {
  405. logger.Warnf("Falling back to provided documentCount for file %s due to count error: %v", basePath, err)
  406. }
  407. }
  408. }
  409. // Update the record with the (possibly corrected) metadata
  410. logIndex.DocumentCount = documentCount
  411. logIndex.LastIndexed = time.Now()
  412. logIndex.IndexStartTime = &startTime
  413. durationMs := duration.Milliseconds()
  414. logIndex.IndexDuration = &durationMs
  415. // Merge time ranges: preserve existing historical range and expand if necessary
  416. // This prevents incremental indexing from losing historical time range data
  417. if minTime != nil {
  418. if logIndex.TimeRangeStart == nil || minTime.Before(*logIndex.TimeRangeStart) {
  419. logIndex.TimeRangeStart = minTime
  420. }
  421. }
  422. if maxTime != nil {
  423. if logIndex.TimeRangeEnd == nil || maxTime.After(*logIndex.TimeRangeEnd) {
  424. logIndex.TimeRangeEnd = maxTime
  425. }
  426. }
  427. // Save the updated record to the database
  428. return lm.persistence.SaveLogIndex(logIndex)
  429. }
  430. // DeleteIndexMetadataByGroup deletes all database records for a given log group.
  431. func (lm *LogFileManager) DeleteIndexMetadataByGroup(basePath string) error {
  432. // The basePath is the main log path for the group.
  433. return lm.persistence.DeleteLogIndexesByGroup(basePath)
  434. }
  435. // DeleteAllIndexMetadata deletes all index metadata from the database.
  436. func (lm *LogFileManager) DeleteAllIndexMetadata() error {
  437. return lm.persistence.DeleteAllLogIndexes()
  438. }
  439. // GetLogByPath returns the full NginxLogWithIndex struct for a given base path.
  440. func (lm *LogFileManager) GetLogByPath(basePath string) (*NginxLogWithIndex, error) {
  441. // This is not the most efficient way, but it's reliable.
  442. // It ensures we get the same grouped and aggregated data the UI sees.
  443. allLogs := lm.GetAllLogsWithIndexGrouped()
  444. for _, log := range allLogs {
  445. if log.Path == basePath {
  446. return log, nil
  447. }
  448. }
  449. return nil, fmt.Errorf("log group with base path not found: %s", basePath)
  450. }
  451. // GetFilePathsForGroup returns all physical file paths for a given log group base path.
  452. func (lm *LogFileManager) GetFilePathsForGroup(basePath string) ([]string, error) {
  453. // Query the database for all log indexes with matching main_log_path
  454. logIndexes, err := lm.persistence.GetLogIndexesByGroup(basePath)
  455. if err != nil {
  456. return nil, fmt.Errorf("failed to get log indexes for group %s: %w", basePath, err)
  457. }
  458. // Extract file paths from the database records
  459. filePaths := make([]string, 0, len(logIndexes))
  460. for _, logIndex := range logIndexes {
  461. filePaths = append(filePaths, logIndex.Path)
  462. }
  463. return filePaths, nil
  464. }
  465. // GetPersistence returns the persistence manager for advanced operations
  466. func (lm *LogFileManager) GetPersistence() *PersistenceManager {
  467. return lm.persistence
  468. }
  469. // GetAllLogsWithIndex returns all cached log paths with their index status (non-grouped)
  470. func (lm *LogFileManager) GetAllLogsWithIndex(filters ...func(*NginxLogWithIndex) bool) []*NginxLogWithIndex {
  471. lm.cacheMutex.RLock()
  472. defer lm.cacheMutex.RUnlock()
  473. result := make([]*NginxLogWithIndex, 0, len(lm.logCache))
  474. // Get persistence indexes
  475. persistenceIndexes, err := lm.persistence.GetAllLogIndexes()
  476. if err != nil {
  477. logger.Warnf("Failed to get persistence indexes: %v", err)
  478. persistenceIndexes = []*model.NginxLogIndex{}
  479. }
  480. // Create a map of persistence indexes for quick lookup
  481. persistenceMap := make(map[string]*model.NginxLogIndex)
  482. for _, idx := range persistenceIndexes {
  483. persistenceMap[idx.Path] = idx
  484. }
  485. // Process cached logs (from nginx config)
  486. for _, cache := range lm.logCache {
  487. logWithIndex := &NginxLogWithIndex{
  488. Path: cache.Path,
  489. Type: cache.Type,
  490. Name: cache.Name,
  491. ConfigFile: cache.ConfigFile,
  492. IndexStatus: string(IndexStatusNotIndexed),
  493. IsCompressed: strings.HasSuffix(cache.Path, ".gz") || strings.HasSuffix(cache.Path, ".bz2"),
  494. }
  495. // Update with persistence data if available
  496. if idx, exists := persistenceMap[cache.Path]; exists {
  497. logWithIndex.LastModified = idx.LastModified.Unix()
  498. logWithIndex.LastSize = idx.LastSize
  499. logWithIndex.LastIndexed = idx.LastIndexed.Unix()
  500. if idx.IndexStartTime != nil {
  501. logWithIndex.IndexStartTime = idx.IndexStartTime.Unix()
  502. }
  503. if idx.IndexDuration != nil {
  504. logWithIndex.IndexDuration = *idx.IndexDuration
  505. }
  506. logWithIndex.DocumentCount = idx.DocumentCount
  507. // Determine status
  508. lm.indexingMutex.RLock()
  509. isIndexing := lm.indexingStatus[cache.Path]
  510. lm.indexingMutex.RUnlock()
  511. if isIndexing {
  512. logWithIndex.IndexStatus = string(IndexStatusIndexing)
  513. } else if !idx.LastIndexed.IsZero() {
  514. // If file has been indexed (regardless of document count), it's indexed
  515. logWithIndex.IndexStatus = string(IndexStatusIndexed)
  516. }
  517. // Set time range if available
  518. if idx.TimeRangeStart != nil && idx.TimeRangeEnd != nil && !idx.TimeRangeStart.IsZero() && !idx.TimeRangeEnd.IsZero() {
  519. logWithIndex.HasTimeRange = true
  520. logWithIndex.TimeRangeStart = idx.TimeRangeStart.Unix()
  521. logWithIndex.TimeRangeEnd = idx.TimeRangeEnd.Unix()
  522. }
  523. }
  524. // Apply filters
  525. include := true
  526. for _, filter := range filters {
  527. if !filter(logWithIndex) {
  528. include = false
  529. break
  530. }
  531. }
  532. if include {
  533. result = append(result, logWithIndex)
  534. }
  535. }
  536. return result
  537. }