persistence.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. package indexer
  2. import (
  3. "context"
  4. "fmt"
  5. "path/filepath"
  6. "regexp"
  7. "strings"
  8. "time"
  9. "github.com/0xJacky/Nginx-UI/model"
  10. "github.com/0xJacky/Nginx-UI/query"
  11. "github.com/uozi-tech/cosy"
  12. "github.com/uozi-tech/cosy/logger"
  13. "gorm.io/gen/field"
  14. )
  15. // PersistenceManager handles database operations for log index positions
  16. // Enhanced for incremental indexing with position tracking
  17. type PersistenceManager struct {
  18. // Configuration for incremental indexing
  19. maxBatchSize int
  20. flushInterval time.Duration
  21. enabledPaths map[string]bool // Cache for enabled paths
  22. lastFlushTime time.Time
  23. }
  24. // LogFileInfo represents information about a log file for incremental indexing
  25. type LogFileInfo struct {
  26. Path string
  27. LastModified int64 // Unix timestamp
  28. LastSize int64 // File size at last index
  29. LastIndexed int64 // Unix timestamp of last indexing
  30. LastPosition int64 // Byte position where indexing left off
  31. }
  32. // IncrementalIndexConfig configuration for incremental indexing
  33. type IncrementalIndexConfig struct {
  34. MaxBatchSize int `yaml:"max_batch_size" json:"max_batch_size"`
  35. FlushInterval time.Duration `yaml:"flush_interval" json:"flush_interval"`
  36. CheckInterval time.Duration `yaml:"check_interval" json:"check_interval"`
  37. MaxAge time.Duration `yaml:"max_age" json:"max_age"`
  38. }
  39. // DefaultIncrementalConfig returns the default configuration for incremental indexing
  40. func DefaultIncrementalConfig() *IncrementalIndexConfig {
  41. return &IncrementalIndexConfig{
  42. MaxBatchSize: 1000,
  43. FlushInterval: 30 * time.Second,
  44. CheckInterval: 5 * time.Minute,
  45. MaxAge: 30 * 24 * time.Hour, // 30 days
  46. }
  47. }
  48. // NewPersistenceManager creates a new persistence manager with incremental indexing support
  49. func NewPersistenceManager(config *IncrementalIndexConfig) *PersistenceManager {
  50. if config == nil {
  51. config = DefaultIncrementalConfig()
  52. }
  53. return &PersistenceManager{
  54. maxBatchSize: config.MaxBatchSize,
  55. flushInterval: config.FlushInterval,
  56. enabledPaths: make(map[string]bool),
  57. lastFlushTime: time.Now(),
  58. }
  59. }
  60. // GetLogIndex retrieves the index record for a log file path
  61. func (pm *PersistenceManager) GetLogIndex(path string) (*model.NginxLogIndex, error) {
  62. q := query.NginxLogIndex
  63. // Determine main log path for grouping
  64. mainLogPath := getMainLogPathFromFile(path)
  65. // Use FirstOrCreate to get existing record or create a new one
  66. logIndex, err := q.Where(q.Path.Eq(path)).
  67. Assign(field.Attrs(&model.NginxLogIndex{
  68. Path: path,
  69. MainLogPath: mainLogPath,
  70. Enabled: true,
  71. })).
  72. FirstOrCreate()
  73. if err != nil {
  74. return nil, fmt.Errorf("failed to get or create log index: %w", err)
  75. }
  76. return logIndex, nil
  77. }
  78. // SaveLogIndex saves or updates the index record with incremental indexing support
  79. func (pm *PersistenceManager) SaveLogIndex(logIndex *model.NginxLogIndex) error {
  80. logIndex.Enabled = true
  81. // Ensure MainLogPath is set
  82. if logIndex.MainLogPath == "" {
  83. logIndex.MainLogPath = getMainLogPathFromFile(logIndex.Path)
  84. }
  85. // Update last indexed time
  86. logIndex.LastIndexed = time.Now()
  87. // If we are saving metadata, it implies the task is complete.
  88. if logIndex.IndexDuration != nil && *logIndex.IndexDuration > 0 {
  89. logIndex.IndexStatus = string(IndexStatusIndexed)
  90. }
  91. q := query.NginxLogIndex
  92. savedRecord, err := q.Where(q.Path.Eq(logIndex.Path)).
  93. Assign(field.Attrs(logIndex)).
  94. FirstOrCreate()
  95. if err != nil {
  96. return fmt.Errorf("failed to save log index: %w", err)
  97. }
  98. // Update the passed object with the saved record data
  99. *logIndex = *savedRecord
  100. // Update cache
  101. pm.enabledPaths[logIndex.Path] = logIndex.Enabled
  102. return nil
  103. }
  104. // GetIncrementalInfo retrieves incremental indexing information for a log file
  105. func (pm *PersistenceManager) GetIncrementalInfo(path string) (*LogFileInfo, error) {
  106. logIndex, err := pm.GetLogIndex(path)
  107. if err != nil {
  108. return nil, err
  109. }
  110. return &LogFileInfo{
  111. Path: logIndex.Path,
  112. LastModified: logIndex.LastModified.Unix(),
  113. LastSize: logIndex.LastSize,
  114. LastIndexed: logIndex.LastIndexed.Unix(),
  115. LastPosition: logIndex.LastPosition,
  116. }, nil
  117. }
  118. // UpdateIncrementalInfo updates incremental indexing information
  119. func (pm *PersistenceManager) UpdateIncrementalInfo(path string, info *LogFileInfo) error {
  120. logIndex, err := pm.GetLogIndex(path)
  121. if err != nil {
  122. return err
  123. }
  124. logIndex.LastModified = time.Unix(info.LastModified, 0)
  125. logIndex.LastSize = info.LastSize
  126. logIndex.LastIndexed = time.Unix(info.LastIndexed, 0)
  127. logIndex.LastPosition = info.LastPosition
  128. return pm.SaveLogIndex(logIndex)
  129. }
  130. // IsPathEnabled checks if indexing is enabled for a path (with caching)
  131. func (pm *PersistenceManager) IsPathEnabled(path string) (bool, error) {
  132. // Check cache first
  133. if enabled, exists := pm.enabledPaths[path]; exists {
  134. return enabled, nil
  135. }
  136. // Query database
  137. logIndex, err := pm.GetLogIndex(path)
  138. if err != nil {
  139. return false, err
  140. }
  141. // Update cache
  142. pm.enabledPaths[path] = logIndex.Enabled
  143. return logIndex.Enabled, nil
  144. }
  145. // GetChangedFiles returns files that have been modified since last indexing
  146. func (pm *PersistenceManager) GetChangedFiles(mainLogPath string) ([]*model.NginxLogIndex, error) {
  147. q := query.NginxLogIndex
  148. indexes, err := q.Where(
  149. q.MainLogPath.Eq(mainLogPath),
  150. q.Enabled.Is(true),
  151. ).Find()
  152. if err != nil {
  153. return nil, fmt.Errorf("failed to get changed files: %w", err)
  154. }
  155. return indexes, nil
  156. }
  157. // GetFilesForFullReindex returns files that need full reindexing
  158. func (pm *PersistenceManager) GetFilesForFullReindex(mainLogPath string, maxAge time.Duration) ([]*model.NginxLogIndex, error) {
  159. cutoff := time.Now().Add(-maxAge)
  160. q := query.NginxLogIndex
  161. indexes, err := q.Where(
  162. q.MainLogPath.Eq(mainLogPath),
  163. q.Enabled.Is(true),
  164. q.LastIndexed.Lt(cutoff),
  165. ).Find()
  166. if err != nil {
  167. return nil, fmt.Errorf("failed to get files for full reindex: %w", err)
  168. }
  169. return indexes, nil
  170. }
  171. // MarkFileAsIndexed marks a file as successfully indexed with current timestamp and position
  172. func (pm *PersistenceManager) MarkFileAsIndexed(path string, documentCount uint64, lastPosition int64) error {
  173. logIndex, err := pm.GetLogIndex(path)
  174. if err != nil {
  175. return err
  176. }
  177. now := time.Now()
  178. logIndex.LastIndexed = now
  179. logIndex.LastPosition = lastPosition
  180. logIndex.DocumentCount = documentCount
  181. return pm.SaveLogIndex(logIndex)
  182. }
  183. // GetAllLogIndexes retrieves all log index records
  184. func (pm *PersistenceManager) GetAllLogIndexes() ([]*model.NginxLogIndex, error) {
  185. q := query.NginxLogIndex
  186. indexes, err := q.Where(q.Enabled.Is(true)).Order(q.Path).Find()
  187. if err != nil {
  188. return nil, fmt.Errorf("failed to get log indexes: %w", err)
  189. }
  190. return indexes, nil
  191. }
  192. // GetLogGroupIndexes retrieves all log index records for a specific log group
  193. func (pm *PersistenceManager) GetLogGroupIndexes(mainLogPath string) ([]*model.NginxLogIndex, error) {
  194. q := query.NginxLogIndex
  195. indexes, err := q.Where(
  196. q.MainLogPath.Eq(mainLogPath),
  197. q.Enabled.Is(true),
  198. ).Order(q.Path).Find()
  199. if err != nil {
  200. return nil, fmt.Errorf("failed to get log group indexes: %w", err)
  201. }
  202. return indexes, nil
  203. }
  204. // DeleteLogIndex deletes a log index record (hard delete)
  205. func (pm *PersistenceManager) DeleteLogIndex(path string) error {
  206. q := query.NginxLogIndex
  207. _, err := q.Unscoped().Where(q.Path.Eq(path)).Delete()
  208. if err != nil {
  209. return fmt.Errorf("failed to delete log index: %w", err)
  210. }
  211. // Remove from cache
  212. delete(pm.enabledPaths, path)
  213. logger.Infof("Hard deleted log index for path: %s", path)
  214. return nil
  215. }
  216. // DisableLogIndex disables indexing for a log file
  217. func (pm *PersistenceManager) DisableLogIndex(path string) error {
  218. q := query.NginxLogIndex
  219. _, err := q.Where(q.Path.Eq(path)).Update(q.Enabled, false)
  220. if err != nil {
  221. return fmt.Errorf("failed to disable log index: %w", err)
  222. }
  223. // Update cache
  224. pm.enabledPaths[path] = false
  225. logger.Infof("Disabled log index for path: %s", path)
  226. return nil
  227. }
  228. // EnableLogIndex enables indexing for a log file
  229. func (pm *PersistenceManager) EnableLogIndex(path string) error {
  230. q := query.NginxLogIndex
  231. _, err := q.Where(q.Path.Eq(path)).Update(q.Enabled, true)
  232. if err != nil {
  233. return fmt.Errorf("failed to enable log index: %w", err)
  234. }
  235. // Update cache
  236. pm.enabledPaths[path] = true
  237. logger.Infof("Enabled log index for path: %s", path)
  238. return nil
  239. }
  240. // CleanupOldIndexes removes index records for files that haven't been indexed in a long time
  241. func (pm *PersistenceManager) CleanupOldIndexes(maxAge time.Duration) error {
  242. cutoff := time.Now().Add(-maxAge)
  243. q := query.NginxLogIndex
  244. result, err := q.Unscoped().Where(q.LastIndexed.Lt(cutoff)).Delete()
  245. if err != nil {
  246. return fmt.Errorf("failed to cleanup old indexes: %w", err)
  247. }
  248. if result.RowsAffected > 0 {
  249. logger.Infof("Cleaned up %d old log index records", result.RowsAffected)
  250. // Clear cache for cleaned up entries
  251. pm.enabledPaths = make(map[string]bool)
  252. }
  253. return nil
  254. }
  255. // PersistenceStats represents statistics about stored index records
  256. type PersistenceStats struct {
  257. TotalFiles int64 `json:"total_files"`
  258. EnabledFiles int64 `json:"enabled_files"`
  259. TotalDocuments uint64 `json:"total_documents"`
  260. ChangedFiles int64 `json:"changed_files"`
  261. }
  262. // GetPersistenceStats returns statistics about stored index records
  263. func (pm *PersistenceManager) GetPersistenceStats() (*PersistenceStats, error) {
  264. q := query.NginxLogIndex
  265. // Count total records
  266. totalCount, err := q.Count()
  267. if err != nil {
  268. return nil, fmt.Errorf("failed to count total indexes: %w", err)
  269. }
  270. // Count enabled records
  271. enabledCount, err := q.Where(q.Enabled.Is(true)).Count()
  272. if err != nil {
  273. return nil, fmt.Errorf("failed to count enabled indexes: %w", err)
  274. }
  275. // Sum document counts
  276. var result struct {
  277. Total uint64
  278. }
  279. if err := q.Select(q.DocumentCount.Sum().As("total")).Scan(&result); err != nil {
  280. return nil, fmt.Errorf("failed to sum document counts: %w", err)
  281. }
  282. // Count files needing incremental update
  283. cutoff := time.Now().Add(-time.Hour) // Files modified in last hour
  284. changedCount, err := q.Where(
  285. q.Enabled.Is(true),
  286. q.LastModified.Gt(cutoff),
  287. ).Count()
  288. if err != nil {
  289. return nil, fmt.Errorf("failed to count changed files: %w", err)
  290. }
  291. return &PersistenceStats{
  292. TotalFiles: totalCount,
  293. EnabledFiles: enabledCount,
  294. TotalDocuments: result.Total,
  295. ChangedFiles: changedCount,
  296. }, nil
  297. }
  298. // GetLogFileInfo retrieves the log file info for a given path.
  299. func (pm *PersistenceManager) GetLogFileInfo(path string) (*LogFileInfo, error) {
  300. return pm.GetIncrementalInfo(path)
  301. }
  302. // SaveLogFileInfo saves the log file info for a given path.
  303. func (pm *PersistenceManager) SaveLogFileInfo(path string, info *LogFileInfo) error {
  304. return pm.UpdateIncrementalInfo(path, info)
  305. }
  306. // SetIndexStatus updates the index status for a specific file path with enhanced status support
  307. func (pm *PersistenceManager) SetIndexStatus(path, status string, queuePosition int, errorMessage string) error {
  308. logIndex, err := pm.GetLogIndex(path)
  309. if err != nil {
  310. return fmt.Errorf("failed to get log index for status update: %w", err)
  311. }
  312. // Update status based on the new status
  313. switch status {
  314. case string(IndexStatusQueued):
  315. logIndex.SetQueuedStatus(queuePosition)
  316. case string(IndexStatusIndexing):
  317. logIndex.SetIndexingStatus(status)
  318. case string(IndexStatusIndexed):
  319. logIndex.SetCompletedStatus()
  320. case string(IndexStatusError):
  321. logIndex.SetErrorStatus(errorMessage)
  322. default:
  323. logIndex.IndexStatus = status
  324. }
  325. err = pm.SaveLogIndex(logIndex)
  326. if err != nil {
  327. return err
  328. }
  329. // For status updates, we need to notify the frontend to refresh
  330. // But we shouldn't use progress events for this
  331. // Instead, trigger a data refresh through a different mechanism
  332. // For now, we'll rely on the auto-refresh mechanism in the frontend
  333. return nil
  334. }
  335. // GetIncompleteIndexingTasks returns all files that have incomplete indexing tasks
  336. func (pm *PersistenceManager) GetIncompleteIndexingTasks() ([]*model.NginxLogIndex, error) {
  337. // Use direct database query since query fields are not generated yet
  338. db := cosy.UseDB(context.Background())
  339. var indexes []*model.NginxLogIndex
  340. err := db.Where("enabled = ? AND index_status IN ?", true, []string{
  341. string(IndexStatusIndexing),
  342. string(IndexStatusQueued),
  343. }).Order("queue_position").Find(&indexes).Error
  344. if err != nil {
  345. return nil, fmt.Errorf("failed to get incomplete indexing tasks: %w", err)
  346. }
  347. return indexes, nil
  348. }
  349. // GetQueuedTasks returns all queued indexing tasks ordered by queue position
  350. func (pm *PersistenceManager) GetQueuedTasks() ([]*model.NginxLogIndex, error) {
  351. // Use direct database query since query fields are not generated yet
  352. db := cosy.UseDB(context.Background())
  353. var indexes []*model.NginxLogIndex
  354. err := db.Where("enabled = ? AND index_status = ?", true, string(IndexStatusQueued)).Order("queue_position").Find(&indexes).Error
  355. if err != nil {
  356. return nil, fmt.Errorf("failed to get queued tasks: %w", err)
  357. }
  358. return indexes, nil
  359. }
  360. // ResetIndexingTasks resets all indexing and queued tasks to not_indexed state
  361. // This is useful during startup to clear stale states
  362. func (pm *PersistenceManager) ResetIndexingTasks() error {
  363. // Use direct database query
  364. db := cosy.UseDB(context.Background())
  365. err := db.Model(&model.NginxLogIndex{}).Where("index_status IN ?", []string{
  366. string(IndexStatusIndexing),
  367. string(IndexStatusQueued),
  368. }).Updates(map[string]interface{}{
  369. "index_status": string(IndexStatusNotIndexed),
  370. "queue_position": 0,
  371. "error_message": "",
  372. "error_time": nil,
  373. "index_start_time": nil,
  374. }).Error
  375. if err != nil {
  376. return fmt.Errorf("failed to reset indexing tasks: %w", err)
  377. }
  378. // Clear cache
  379. pm.enabledPaths = make(map[string]bool)
  380. logger.Info("Reset all incomplete indexing tasks")
  381. return nil
  382. }
  383. // GetIndexingTaskStats returns statistics about indexing tasks
  384. func (pm *PersistenceManager) GetIndexingTaskStats() (map[string]int64, error) {
  385. // Use direct database query
  386. db := cosy.UseDB(context.Background())
  387. stats := make(map[string]int64)
  388. // Count by status
  389. statuses := []string{
  390. string(IndexStatusNotIndexed),
  391. string(IndexStatusQueued),
  392. string(IndexStatusIndexing),
  393. string(IndexStatusIndexed),
  394. string(IndexStatusError),
  395. }
  396. for _, status := range statuses {
  397. var count int64
  398. err := db.Model(&model.NginxLogIndex{}).Where("enabled = ? AND index_status = ?", true, status).Count(&count).Error
  399. if err != nil {
  400. return nil, fmt.Errorf("failed to count status %s: %w", status, err)
  401. }
  402. stats[status] = count
  403. }
  404. return stats, nil
  405. }
  406. // Close flushes any pending operations and cleans up resources
  407. func (pm *PersistenceManager) Close() error {
  408. // Flush any pending operations
  409. pm.enabledPaths = nil
  410. return nil
  411. }
  412. // DeleteAllLogIndexes deletes all log index records
  413. func (pm *PersistenceManager) DeleteAllLogIndexes() error {
  414. // GORM's `Delete` requires a WHERE clause for safety. To delete all records,
  415. // we use a raw Exec call, which is the standard way to perform bulk operations.
  416. db := cosy.UseDB(context.Background())
  417. if err := db.Exec("DELETE FROM nginx_log_indices").Error; err != nil {
  418. return fmt.Errorf("failed to delete all log indexes: %w", err)
  419. }
  420. // Clear cache
  421. pm.enabledPaths = make(map[string]bool)
  422. logger.Infof("Hard deleted all log index records")
  423. return nil
  424. }
  425. // DeleteLogIndexesByGroup deletes all log index records for a specific log group.
  426. // GetLogIndexesByGroup retrieves all log index records for a given main log path
  427. func (pm *PersistenceManager) GetLogIndexesByGroup(mainLogPath string) ([]*model.NginxLogIndex, error) {
  428. q := query.NginxLogIndex
  429. logIndexes, err := q.Where(q.MainLogPath.Eq(mainLogPath)).Find()
  430. if err != nil {
  431. return nil, fmt.Errorf("failed to get log indexes for group %s: %w", mainLogPath, err)
  432. }
  433. return logIndexes, nil
  434. }
  435. func (pm *PersistenceManager) DeleteLogIndexesByGroup(mainLogPath string) error {
  436. q := query.NginxLogIndex
  437. result, err := q.Unscoped().Where(q.MainLogPath.Eq(mainLogPath)).Delete()
  438. if err != nil {
  439. return fmt.Errorf("failed to delete log indexes for group %s: %w", mainLogPath, err)
  440. }
  441. logger.Infof("Deleted %d log index records for group: %s", result.RowsAffected, mainLogPath)
  442. return nil
  443. }
  444. // RefreshCache refreshes the enabled paths cache
  445. func (pm *PersistenceManager) RefreshCache() error {
  446. q := query.NginxLogIndex
  447. indexes, err := q.Select(q.Path, q.Enabled).Find()
  448. if err != nil {
  449. return fmt.Errorf("failed to refresh cache: %w", err)
  450. }
  451. // Rebuild cache
  452. pm.enabledPaths = make(map[string]bool)
  453. for _, index := range indexes {
  454. pm.enabledPaths[index.Path] = index.Enabled
  455. }
  456. return nil
  457. }
  458. // IncrementalIndexStats represents statistics specific to incremental indexing
  459. type IncrementalIndexStats struct {
  460. GroupFiles int64 `json:"group_files"`
  461. ChangedFiles int `json:"changed_files"`
  462. OldFiles int `json:"old_files"`
  463. NeedsReindex int `json:"needs_reindex"`
  464. }
  465. // GetIncrementalIndexStats returns statistics specific to incremental indexing
  466. func (pm *PersistenceManager) GetIncrementalIndexStats(mainLogPath string) (*IncrementalIndexStats, error) {
  467. q := query.NginxLogIndex
  468. // Files in this log group
  469. groupCount, err := q.Where(q.MainLogPath.Eq(mainLogPath), q.Enabled.Is(true)).Count()
  470. if err != nil {
  471. return nil, fmt.Errorf("failed to count group files: %w", err)
  472. }
  473. // Files needing incremental update
  474. changedFiles, err := pm.GetChangedFiles(mainLogPath)
  475. if err != nil {
  476. return nil, fmt.Errorf("failed to get changed files: %w", err)
  477. }
  478. // Files needing full reindex (older than 7 days)
  479. oldFiles, err := pm.GetFilesForFullReindex(mainLogPath, 7*24*time.Hour)
  480. if err != nil {
  481. return nil, fmt.Errorf("failed to get old files: %w", err)
  482. }
  483. return &IncrementalIndexStats{
  484. GroupFiles: groupCount,
  485. ChangedFiles: len(changedFiles),
  486. OldFiles: len(oldFiles),
  487. NeedsReindex: len(changedFiles) + len(oldFiles),
  488. }, nil
  489. }
  490. // getMainLogPathFromFile extracts the main log path from a file (including rotated files)
  491. // Enhanced for better rotation pattern detection
  492. func getMainLogPathFromFile(filePath string) string {
  493. dir := filepath.Dir(filePath)
  494. filename := filepath.Base(filePath)
  495. // Remove compression extensions (.gz, .bz2, .xz, .lz4)
  496. for _, ext := range []string{".gz", ".bz2", ".xz", ".lz4"} {
  497. filename = strings.TrimSuffix(filename, ext)
  498. }
  499. // Check if it's a dot-separated date rotation FIRST (access.log.YYYYMMDD or access.log.YYYY.MM.DD)
  500. // This must come before numbered rotation check to avoid false positives
  501. parts := strings.Split(filename, ".")
  502. if len(parts) >= 3 {
  503. // First check for multi-part date patterns like YYYY.MM.DD (need at least 4 parts total)
  504. if len(parts) >= 4 {
  505. // Try to match the last 3 parts as a date
  506. lastThreeParts := strings.Join(parts[len(parts)-3:], ".")
  507. // Check if this looks like YYYY.MM.DD pattern
  508. if matched, _ := regexp.MatchString(`^\d{4}\.\d{2}\.\d{2}$`, lastThreeParts); matched {
  509. // Remove the date parts (last 3 parts)
  510. basenameParts := parts[:len(parts)-3]
  511. baseFilename := strings.Join(basenameParts, ".")
  512. return filepath.Join(dir, baseFilename)
  513. }
  514. }
  515. // Then check for single-part date patterns in the last part
  516. lastPart := parts[len(parts)-1]
  517. if isFullDatePattern(lastPart) { // Only match full date patterns, not partial ones
  518. // Remove the date part
  519. basenameParts := parts[:len(parts)-1]
  520. baseFilename := strings.Join(basenameParts, ".")
  521. return filepath.Join(dir, baseFilename)
  522. }
  523. }
  524. // Handle numbered rotation (access.log.1, access.log.2, etc.)
  525. // This comes AFTER date pattern checks to avoid matching date components as rotation numbers
  526. if match := regexp.MustCompile(`^(.+)\.(\d{1,3})$`).FindStringSubmatch(filename); len(match) > 1 {
  527. baseFilename := match[1]
  528. return filepath.Join(dir, baseFilename)
  529. }
  530. // Handle middle-numbered rotation (access.1.log, access.2.log)
  531. if match := regexp.MustCompile(`^(.+)\.(\d{1,3})\.log$`).FindStringSubmatch(filename); len(match) > 1 {
  532. baseName := match[1]
  533. return filepath.Join(dir, baseName+".log")
  534. }
  535. // Handle date-based rotation (access.20231201, access.2023-12-01, etc.)
  536. if isDatePattern(filename) {
  537. // This is a date-based rotation, return the parent directory
  538. // as we can't determine the exact base name
  539. return filepath.Join(dir, "access.log") // Default assumption
  540. }
  541. // If no rotation pattern is found, return the original path
  542. return filePath
  543. }
  544. // isDatePattern checks if a string looks like a date pattern (including multi-part)
  545. func isDatePattern(s string) bool {
  546. // Check for full date patterns first
  547. if isFullDatePattern(s) {
  548. return true
  549. }
  550. // Check for multi-part date patterns like YYYY.MM.DD
  551. if matched, _ := regexp.MatchString(`^2\d{3}\.\d{2}\.\d{2}$`, s); matched {
  552. return true
  553. }
  554. return false
  555. }
  556. // isFullDatePattern checks if a string is a complete date pattern (not partial)
  557. func isFullDatePattern(s string) bool {
  558. // Complete date patterns for log rotation
  559. patterns := []string{
  560. `^\d{8}$`, // YYYYMMDD
  561. `^\d{4}-\d{2}-\d{2}$`, // YYYY-MM-DD
  562. `^\d{6}$`, // YYMMDD
  563. }
  564. for _, pattern := range patterns {
  565. if matched, _ := regexp.MatchString(pattern, s); matched {
  566. return true
  567. }
  568. }
  569. return false
  570. }