log_file_manager.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. package indexer
  2. import (
  3. "fmt"
  4. "os"
  5. "path/filepath"
  6. "regexp"
  7. "sort"
  8. "strings"
  9. "sync"
  10. "time"
  11. "github.com/0xJacky/Nginx-UI/model"
  12. "github.com/uozi-tech/cosy/logger"
  13. )
  14. // Legacy constants for backward compatibility - use IndexStatus enum in types.go instead
  15. // NginxLogCache represents a cached log entry from nginx configuration
  16. type NginxLogCache struct {
  17. Path string `json:"path"` // Path to the log file
  18. Type string `json:"type"` // Type of log: "access" or "error"
  19. Name string `json:"name"` // Name of the log file
  20. ConfigFile string `json:"config_file"` // Path to the configuration file that contains this log directive
  21. }
  22. // NginxLogWithIndex represents a log file with its index status information
  23. type NginxLogWithIndex struct {
  24. Path string `json:"path"` // Path to the log file
  25. Type string `json:"type"` // Type of log: "access" or "error"
  26. Name string `json:"name"` // Name of the log file
  27. ConfigFile string `json:"config_file"` // Path to the configuration file
  28. IndexStatus string `json:"index_status"` // Index status: indexed, indexing, not_indexed, queued, error
  29. LastModified int64 `json:"last_modified,omitempty"` // Unix timestamp of last modification time
  30. LastSize int64 `json:"last_size,omitempty"` // Last known size of the file
  31. LastIndexed int64 `json:"last_indexed,omitempty"` // Unix timestamp when the file was last indexed
  32. IndexStartTime int64 `json:"index_start_time,omitempty"` // Unix timestamp when the last indexing operation started
  33. IndexDuration int64 `json:"index_duration,omitempty"` // Duration of last indexing operation in milliseconds
  34. IsCompressed bool `json:"is_compressed"` // Whether the file is compressed
  35. HasTimeRange bool `json:"has_timerange"` // Whether time range is available
  36. TimeRangeStart int64 `json:"timerange_start,omitempty"` // Unix timestamp of start of time range in the log
  37. TimeRangeEnd int64 `json:"timerange_end,omitempty"` // Unix timestamp of end of time range in the log
  38. DocumentCount uint64 `json:"document_count,omitempty"` // Number of indexed documents from this file
  39. // Enhanced status tracking fields
  40. ErrorMessage string `json:"error_message,omitempty"` // Error message if indexing failed
  41. ErrorTime int64 `json:"error_time,omitempty"` // Unix timestamp when error occurred
  42. RetryCount int `json:"retry_count,omitempty"` // Number of retry attempts
  43. QueuePosition int `json:"queue_position,omitempty"` // Position in indexing queue
  44. }
  45. // LogFileManager manages nginx log file discovery and index status
  46. type LogFileManager struct {
  47. logCache map[string]*NginxLogCache
  48. cacheMutex sync.RWMutex
  49. persistence *PersistenceManager
  50. indexingStatus map[string]bool
  51. indexingMutex sync.RWMutex
  52. }
  53. // NewLogFileManager creates a new log file manager
  54. func NewLogFileManager() *LogFileManager {
  55. return &LogFileManager{
  56. logCache: make(map[string]*NginxLogCache),
  57. persistence: NewPersistenceManager(DefaultIncrementalConfig()),
  58. indexingStatus: make(map[string]bool),
  59. }
  60. }
  61. // AddLogPath adds a log path to the log cache with the source config file
  62. func (lm *LogFileManager) AddLogPath(path, logType, name, configFile string) {
  63. lm.cacheMutex.Lock()
  64. defer lm.cacheMutex.Unlock()
  65. lm.logCache[path] = &NginxLogCache{
  66. Path: path,
  67. Type: logType,
  68. Name: name,
  69. ConfigFile: configFile,
  70. }
  71. }
  72. // RemoveLogPathsFromConfig removes all log paths associated with a specific config file
  73. func (lm *LogFileManager) RemoveLogPathsFromConfig(configFile string) {
  74. lm.cacheMutex.Lock()
  75. defer lm.cacheMutex.Unlock()
  76. for path, logEntry := range lm.logCache {
  77. if logEntry.ConfigFile == configFile {
  78. delete(lm.logCache, path)
  79. }
  80. }
  81. }
  82. // GetAllLogPaths returns all cached log paths, optionally filtered
  83. func (lm *LogFileManager) GetAllLogPaths(filters ...func(*NginxLogCache) bool) []*NginxLogCache {
  84. lm.cacheMutex.RLock()
  85. defer lm.cacheMutex.RUnlock()
  86. var logs []*NginxLogCache
  87. for _, logEntry := range lm.logCache {
  88. // Apply all filters
  89. include := true
  90. for _, filter := range filters {
  91. if !filter(logEntry) {
  92. include = false
  93. break
  94. }
  95. }
  96. if include {
  97. // Create a copy to avoid race conditions
  98. logCopy := *logEntry
  99. logs = append(logs, &logCopy)
  100. }
  101. }
  102. return logs
  103. }
  104. // SetIndexingStatus sets the indexing status for a specific file path
  105. func (lm *LogFileManager) SetIndexingStatus(path string, isIndexing bool) {
  106. lm.indexingMutex.Lock()
  107. defer lm.indexingMutex.Unlock()
  108. if isIndexing {
  109. lm.indexingStatus[path] = true
  110. } else {
  111. delete(lm.indexingStatus, path)
  112. }
  113. }
  114. // GetIndexingFiles returns a list of files currently being indexed
  115. func (lm *LogFileManager) GetIndexingFiles() []string {
  116. lm.indexingMutex.RLock()
  117. defer lm.indexingMutex.RUnlock()
  118. var files []string
  119. for path := range lm.indexingStatus {
  120. files = append(files, path)
  121. }
  122. return files
  123. }
  124. // getBaseLogName determines the base log file name for grouping rotated files
  125. func getBaseLogName(filePath string) string {
  126. dir := filepath.Dir(filePath)
  127. filename := filepath.Base(filePath)
  128. // Remove compression extensions first
  129. filename = strings.TrimSuffix(filename, ".gz")
  130. filename = strings.TrimSuffix(filename, ".bz2")
  131. // Handle numbered rotation (access.log.1, access.log.2, etc.)
  132. if match := regexp.MustCompile(`^(.+)\.(\d+)$`).FindStringSubmatch(filename); len(match) > 1 {
  133. baseFilename := match[1]
  134. return filepath.Join(dir, baseFilename)
  135. }
  136. // Handle date rotation suffixes
  137. parts := strings.Split(filename, ".")
  138. if len(parts) >= 2 {
  139. lastPart := parts[len(parts)-1]
  140. if isDatePattern(lastPart) {
  141. baseFilename := strings.Join(parts[:len(parts)-1], ".")
  142. // If the base doesn't end with .log, add it
  143. if !strings.HasSuffix(baseFilename, ".log") {
  144. baseFilename += ".log"
  145. }
  146. return filepath.Join(dir, baseFilename)
  147. }
  148. }
  149. // If it already looks like a base log file, return as-is
  150. return filePath
  151. }
  152. // GetAllLogsWithIndexGrouped returns logs grouped by their base name (e.g., access.log includes access.log.1, access.log.2.gz etc.)
  153. func (lm *LogFileManager) GetAllLogsWithIndexGrouped(filters ...func(*NginxLogWithIndex) bool) []*NginxLogWithIndex {
  154. lm.cacheMutex.RLock()
  155. defer lm.cacheMutex.RUnlock()
  156. // Get all logs from both cache (config files) and persistence (indexed files)
  157. allLogsMap := make(map[string]*NginxLogWithIndex)
  158. // First, get logs from the cache (these are from nginx config)
  159. for _, cache := range lm.logCache {
  160. logWithIndex := &NginxLogWithIndex{
  161. Path: cache.Path,
  162. Type: cache.Type,
  163. Name: cache.Name,
  164. ConfigFile: cache.ConfigFile,
  165. IndexStatus: string(IndexStatusNotIndexed),
  166. IsCompressed: false,
  167. HasTimeRange: false,
  168. }
  169. allLogsMap[cache.Path] = logWithIndex
  170. }
  171. // Get persistence indexes and update status
  172. persistenceIndexes, err := lm.persistence.GetAllLogIndexes()
  173. if err != nil {
  174. logger.Warnf("Failed to get persistence indexes: %v", err)
  175. persistenceIndexes = []*model.NginxLogIndex{}
  176. }
  177. // Add all indexed files from persistence (including rotated files)
  178. for _, idx := range persistenceIndexes {
  179. if _, exists := allLogsMap[idx.Path]; !exists {
  180. // This is a rotated file not in config cache, create entry for it
  181. logType := "access"
  182. if strings.Contains(idx.Path, "error") {
  183. logType = "error"
  184. }
  185. logWithIndex := &NginxLogWithIndex{
  186. Path: idx.Path,
  187. Type: logType,
  188. Name: filepath.Base(idx.Path),
  189. ConfigFile: "",
  190. IndexStatus: string(IndexStatusNotIndexed),
  191. }
  192. allLogsMap[idx.Path] = logWithIndex
  193. }
  194. // Update index status from persistence data
  195. logWithIndex := allLogsMap[idx.Path]
  196. logWithIndex.LastModified = idx.LastModified.Unix()
  197. logWithIndex.LastSize = idx.LastSize
  198. logWithIndex.LastIndexed = idx.LastIndexed.Unix()
  199. if idx.IndexStartTime != nil {
  200. logWithIndex.IndexStartTime = idx.IndexStartTime.Unix()
  201. }
  202. if idx.IndexDuration != nil {
  203. logWithIndex.IndexDuration = *idx.IndexDuration
  204. }
  205. logWithIndex.DocumentCount = idx.DocumentCount
  206. // Set queue position if available
  207. logWithIndex.QueuePosition = idx.QueuePosition
  208. // Set error message if available
  209. logWithIndex.ErrorMessage = idx.ErrorMessage
  210. if idx.ErrorTime != nil {
  211. logWithIndex.ErrorTime = idx.ErrorTime.Unix()
  212. }
  213. logWithIndex.RetryCount = idx.RetryCount
  214. // Use the index status from the database if it's set
  215. if idx.IndexStatus != "" {
  216. logWithIndex.IndexStatus = idx.IndexStatus
  217. } else {
  218. // Fallback to determining status if not set in DB
  219. lm.indexingMutex.RLock()
  220. isIndexing := lm.indexingStatus[idx.Path]
  221. lm.indexingMutex.RUnlock()
  222. if isIndexing {
  223. logWithIndex.IndexStatus = string(IndexStatusIndexing)
  224. } else if !idx.LastIndexed.IsZero() {
  225. // If file has been indexed (regardless of document count), it's indexed
  226. logWithIndex.IndexStatus = string(IndexStatusIndexed)
  227. }
  228. }
  229. // Set time range if available
  230. if idx.TimeRangeStart != nil && idx.TimeRangeEnd != nil && !idx.TimeRangeStart.IsZero() && !idx.TimeRangeEnd.IsZero() {
  231. logWithIndex.HasTimeRange = true
  232. logWithIndex.TimeRangeStart = idx.TimeRangeStart.Unix()
  233. logWithIndex.TimeRangeEnd = idx.TimeRangeEnd.Unix()
  234. }
  235. logWithIndex.IsCompressed = strings.HasSuffix(idx.Path, ".gz") || strings.HasSuffix(idx.Path, ".bz2")
  236. }
  237. // Convert to slice and apply filters
  238. var logs []*NginxLogWithIndex
  239. for _, log := range allLogsMap {
  240. // Apply all filters
  241. include := true
  242. for _, filter := range filters {
  243. if !filter(log) {
  244. include = false
  245. break
  246. }
  247. }
  248. if include {
  249. logs = append(logs, log)
  250. }
  251. }
  252. // Group by base log name with stable aggregation
  253. groupedMap := make(map[string]*NginxLogWithIndex)
  254. // Sort logs by path first to ensure consistent processing order
  255. sort.Slice(logs, func(i, j int) bool {
  256. return logs[i].Path < logs[j].Path
  257. })
  258. for _, log := range logs {
  259. baseLogName := getBaseLogName(log.Path)
  260. if existing, exists := groupedMap[baseLogName]; exists {
  261. // Check if current log is a main log path record (already aggregated)
  262. // or if existing record is a main log path record
  263. logIsMainPath := (log.Path == baseLogName)
  264. existingIsMainPath := (existing.Path == baseLogName)
  265. if logIsMainPath && !existingIsMainPath {
  266. // Current log is the main aggregated record, replace existing
  267. groupedLog := *log
  268. groupedLog.Path = baseLogName
  269. groupedLog.Name = filepath.Base(baseLogName)
  270. groupedMap[baseLogName] = &groupedLog
  271. } else if !logIsMainPath && existingIsMainPath {
  272. // Existing is main record, keep it, don't accumulate
  273. // Only update status if needed
  274. if log.IndexStatus == string(IndexStatusIndexing) {
  275. existing.IndexStatus = string(IndexStatusIndexing)
  276. }
  277. } else if !logIsMainPath && !existingIsMainPath {
  278. // Both are individual files, accumulate normally
  279. if log.LastIndexed > existing.LastIndexed {
  280. existing.LastModified = log.LastModified
  281. existing.LastIndexed = log.LastIndexed
  282. existing.IndexStartTime = log.IndexStartTime
  283. existing.IndexDuration = log.IndexDuration
  284. }
  285. existing.DocumentCount += log.DocumentCount
  286. existing.LastSize += log.LastSize
  287. // Update status with priority: indexing > queued > indexed > error > not_indexed
  288. if log.IndexStatus == string(IndexStatusIndexing) {
  289. existing.IndexStatus = string(IndexStatusIndexing)
  290. } else if log.IndexStatus == string(IndexStatusQueued) &&
  291. existing.IndexStatus != string(IndexStatusIndexing) {
  292. existing.IndexStatus = string(IndexStatusQueued)
  293. // Keep the queue position from the queued log
  294. if log.QueuePosition > 0 {
  295. existing.QueuePosition = log.QueuePosition
  296. }
  297. } else if log.IndexStatus == string(IndexStatusIndexed) &&
  298. existing.IndexStatus != string(IndexStatusIndexing) &&
  299. existing.IndexStatus != string(IndexStatusQueued) {
  300. existing.IndexStatus = string(IndexStatusIndexed)
  301. } else if log.IndexStatus == string(IndexStatusError) &&
  302. existing.IndexStatus != string(IndexStatusIndexing) &&
  303. existing.IndexStatus != string(IndexStatusQueued) &&
  304. existing.IndexStatus != string(IndexStatusIndexed) {
  305. existing.IndexStatus = string(IndexStatusError)
  306. existing.ErrorMessage = log.ErrorMessage
  307. existing.ErrorTime = log.ErrorTime
  308. }
  309. if log.HasTimeRange {
  310. if !existing.HasTimeRange {
  311. existing.HasTimeRange = true
  312. existing.TimeRangeStart = log.TimeRangeStart
  313. existing.TimeRangeEnd = log.TimeRangeEnd
  314. } else {
  315. if log.TimeRangeStart > 0 && (existing.TimeRangeStart == 0 || log.TimeRangeStart < existing.TimeRangeStart) {
  316. existing.TimeRangeStart = log.TimeRangeStart
  317. }
  318. if log.TimeRangeEnd > existing.TimeRangeEnd {
  319. existing.TimeRangeEnd = log.TimeRangeEnd
  320. }
  321. }
  322. }
  323. } else if logIsMainPath && existingIsMainPath {
  324. // If both are main paths, use the one with more recent LastIndexed
  325. if log.LastIndexed > existing.LastIndexed {
  326. groupedLog := *log
  327. groupedLog.Path = baseLogName
  328. groupedLog.Name = filepath.Base(baseLogName)
  329. groupedMap[baseLogName] = &groupedLog
  330. }
  331. }
  332. } else {
  333. // Create new entry with base log name as path for grouping
  334. groupedLog := *log
  335. groupedLog.Path = baseLogName
  336. groupedLog.Name = filepath.Base(baseLogName)
  337. // Preserve queue position and error info for the grouped log
  338. groupedLog.QueuePosition = log.QueuePosition
  339. groupedLog.ErrorMessage = log.ErrorMessage
  340. groupedLog.ErrorTime = log.ErrorTime
  341. groupedLog.RetryCount = log.RetryCount
  342. groupedMap[baseLogName] = &groupedLog
  343. }
  344. }
  345. // Convert map to slice with consistent ordering
  346. var result []*NginxLogWithIndex
  347. // Create a sorted list of keys to ensure consistent order
  348. var keys []string
  349. for key := range groupedMap {
  350. keys = append(keys, key)
  351. }
  352. sort.Strings(keys)
  353. // Build result in consistent order
  354. for _, key := range keys {
  355. result = append(result, groupedMap[key])
  356. }
  357. // --- START DIAGNOSTIC LOGGING ---
  358. logger.Debugf("===== FINAL GROUPED LIST =====")
  359. for _, fLog := range result {
  360. logger.Debugf("Final Group: Path=%s, DocCount=%d, Status=%s", fLog.Path, fLog.DocumentCount, fLog.IndexStatus)
  361. }
  362. logger.Debugf("===============================")
  363. // --- END DIAGNOSTIC LOGGING ---
  364. return result
  365. }
  366. // SaveIndexMetadata saves the metadata for a log group after an indexing operation.
  367. // It creates a new record for the base log path.
  368. func (lm *LogFileManager) SaveIndexMetadata(basePath string, documentCount uint64, startTime time.Time, duration time.Duration, minTime *time.Time, maxTime *time.Time) error {
  369. // We want to save the metadata against the base path (the "log group").
  370. // We get or create a record for this specific path.
  371. logIndex, err := lm.persistence.GetLogIndex(basePath)
  372. if err != nil {
  373. // If the error is anything other than "not found", it's a real problem.
  374. // GetLogIndex is designed to return a new object if not found, so this should be rare.
  375. return fmt.Errorf("could not get or create log index for '%s': %w", basePath, err)
  376. }
  377. // Get file stats to update LastModified and LastSize
  378. if fileInfo, err := os.Stat(basePath); err == nil {
  379. logIndex.LastModified = fileInfo.ModTime()
  380. logIndex.LastSize = fileInfo.Size()
  381. }
  382. // Update the record with the new metadata
  383. logIndex.DocumentCount = documentCount
  384. logIndex.LastIndexed = time.Now()
  385. logIndex.IndexStartTime = &startTime
  386. durationMs := duration.Milliseconds()
  387. logIndex.IndexDuration = &durationMs
  388. // Set the time range from the parsed logs
  389. logIndex.TimeRangeStart = minTime
  390. logIndex.TimeRangeEnd = maxTime
  391. // Save the updated record to the database
  392. return lm.persistence.SaveLogIndex(logIndex)
  393. }
  394. // DeleteIndexMetadataByGroup deletes all database records for a given log group.
  395. func (lm *LogFileManager) DeleteIndexMetadataByGroup(basePath string) error {
  396. // The basePath is the main log path for the group.
  397. return lm.persistence.DeleteLogIndexesByGroup(basePath)
  398. }
  399. // DeleteAllIndexMetadata deletes all index metadata from the database.
  400. func (lm *LogFileManager) DeleteAllIndexMetadata() error {
  401. return lm.persistence.DeleteAllLogIndexes()
  402. }
  403. // GetLogByPath returns the full NginxLogWithIndex struct for a given base path.
  404. func (lm *LogFileManager) GetLogByPath(basePath string) (*NginxLogWithIndex, error) {
  405. // This is not the most efficient way, but it's reliable.
  406. // It ensures we get the same grouped and aggregated data the UI sees.
  407. allLogs := lm.GetAllLogsWithIndexGrouped()
  408. for _, log := range allLogs {
  409. if log.Path == basePath {
  410. return log, nil
  411. }
  412. }
  413. return nil, fmt.Errorf("log group with base path not found: %s", basePath)
  414. }
  415. // GetFilePathsForGroup returns all physical file paths for a given log group base path.
  416. func (lm *LogFileManager) GetFilePathsForGroup(basePath string) ([]string, error) {
  417. // Query the database for all log indexes with matching main_log_path
  418. logIndexes, err := lm.persistence.GetLogIndexesByGroup(basePath)
  419. if err != nil {
  420. return nil, fmt.Errorf("failed to get log indexes for group %s: %w", basePath, err)
  421. }
  422. // Extract file paths from the database records
  423. filePaths := make([]string, 0, len(logIndexes))
  424. for _, logIndex := range logIndexes {
  425. filePaths = append(filePaths, logIndex.Path)
  426. }
  427. return filePaths, nil
  428. }
  429. // GetPersistence returns the persistence manager for advanced operations
  430. func (lm *LogFileManager) GetPersistence() *PersistenceManager {
  431. return lm.persistence
  432. }
  433. // maxInt64 returns the maximum of two int64 values
  434. func maxInt64(a, b int64) int64 {
  435. if a > b {
  436. return a
  437. }
  438. return b
  439. }
  440. // GetAllLogsWithIndex returns all cached log paths with their index status (non-grouped)
  441. func (lm *LogFileManager) GetAllLogsWithIndex(filters ...func(*NginxLogWithIndex) bool) []*NginxLogWithIndex {
  442. lm.cacheMutex.RLock()
  443. defer lm.cacheMutex.RUnlock()
  444. result := make([]*NginxLogWithIndex, 0, len(lm.logCache))
  445. // Get persistence indexes
  446. persistenceIndexes, err := lm.persistence.GetAllLogIndexes()
  447. if err != nil {
  448. logger.Warnf("Failed to get persistence indexes: %v", err)
  449. persistenceIndexes = []*model.NginxLogIndex{}
  450. }
  451. // Create a map of persistence indexes for quick lookup
  452. persistenceMap := make(map[string]*model.NginxLogIndex)
  453. for _, idx := range persistenceIndexes {
  454. persistenceMap[idx.Path] = idx
  455. }
  456. // Process cached logs (from nginx config)
  457. for _, cache := range lm.logCache {
  458. logWithIndex := &NginxLogWithIndex{
  459. Path: cache.Path,
  460. Type: cache.Type,
  461. Name: cache.Name,
  462. ConfigFile: cache.ConfigFile,
  463. IndexStatus: string(IndexStatusNotIndexed),
  464. IsCompressed: strings.HasSuffix(cache.Path, ".gz") || strings.HasSuffix(cache.Path, ".bz2"),
  465. }
  466. // Update with persistence data if available
  467. if idx, exists := persistenceMap[cache.Path]; exists {
  468. logWithIndex.LastModified = idx.LastModified.Unix()
  469. logWithIndex.LastSize = idx.LastSize
  470. logWithIndex.LastIndexed = idx.LastIndexed.Unix()
  471. if idx.IndexStartTime != nil {
  472. logWithIndex.IndexStartTime = idx.IndexStartTime.Unix()
  473. }
  474. if idx.IndexDuration != nil {
  475. logWithIndex.IndexDuration = *idx.IndexDuration
  476. }
  477. logWithIndex.DocumentCount = idx.DocumentCount
  478. // Determine status
  479. lm.indexingMutex.RLock()
  480. isIndexing := lm.indexingStatus[cache.Path]
  481. lm.indexingMutex.RUnlock()
  482. if isIndexing {
  483. logWithIndex.IndexStatus = string(IndexStatusIndexing)
  484. } else if !idx.LastIndexed.IsZero() {
  485. // If file has been indexed (regardless of document count), it's indexed
  486. logWithIndex.IndexStatus = string(IndexStatusIndexed)
  487. }
  488. // Set time range if available
  489. if idx.TimeRangeStart != nil && idx.TimeRangeEnd != nil && !idx.TimeRangeStart.IsZero() && !idx.TimeRangeEnd.IsZero() {
  490. logWithIndex.HasTimeRange = true
  491. logWithIndex.TimeRangeStart = idx.TimeRangeStart.Unix()
  492. logWithIndex.TimeRangeEnd = idx.TimeRangeEnd.Unix()
  493. }
  494. }
  495. // Apply filters
  496. include := true
  497. for _, filter := range filters {
  498. if !filter(logWithIndex) {
  499. include = false
  500. break
  501. }
  502. }
  503. if include {
  504. result = append(result, logWithIndex)
  505. }
  506. }
  507. return result
  508. }