nginx_log.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. package cache
  2. import (
  3. "os"
  4. "path/filepath"
  5. "regexp"
  6. "strings"
  7. "sync"
  8. "time"
  9. "github.com/0xJacky/Nginx-UI/internal/helper"
  10. "github.com/0xJacky/Nginx-UI/internal/nginx"
  11. "github.com/0xJacky/Nginx-UI/settings"
  12. "github.com/fsnotify/fsnotify"
  13. "github.com/uozi-tech/cosy/logger"
  14. )
  15. // NginxLogCache represents a cached log entry from nginx configuration
  16. type NginxLogCache struct {
  17. Path string `json:"path"` // Path to the log file
  18. Type string `json:"type"` // Type of log: "access" or "error"
  19. Name string `json:"name"` // Name of the log file
  20. }
  21. // NginxLogScanner is responsible for scanning and watching nginx config files for log directives
  22. type NginxLogScanner struct {
  23. logCache map[string]*NginxLogCache // Map of log path to cache entry
  24. cacheMutex sync.RWMutex // Mutex for protecting the cache
  25. watcher *fsnotify.Watcher // File system watcher
  26. scanTicker *time.Ticker // Ticker for periodic scanning
  27. initialized bool // Whether the scanner has been initialized
  28. scanning bool // Whether a scan is currently in progress
  29. scanMutex sync.RWMutex // Mutex for protecting the scanning state
  30. statusChan chan bool // Channel to broadcast scanning status changes
  31. subscribers map[chan bool]struct{} // Set of subscribers
  32. subscriberMux sync.RWMutex // Mutex for protecting the subscribers map
  33. }
  34. // Add regex constants at package level
  35. var (
  36. // logScanner is the singleton instance of NginxLogScanner
  37. logScanner *NginxLogScanner
  38. scannerInitMux sync.Mutex
  39. )
  40. // Compile the regular expressions for matching log directives
  41. var (
  42. // This regex matches: access_log or error_log, followed by a path, and optional parameters ending with semicolon
  43. logDirectiveRegex = regexp.MustCompile(`(?m)(access_log|error_log)\s+([^\s;]+)(?:\s+[^;]+)?;`)
  44. )
  45. // InitNginxLogScanner initializes the nginx log scanner
  46. func InitNginxLogScanner() {
  47. scanner := GetNginxLogScanner()
  48. err := scanner.Initialize()
  49. if err != nil {
  50. logger.Error("Failed to initialize nginx log scanner:", err)
  51. }
  52. }
  53. // GetNginxLogScanner returns the singleton instance of NginxLogScanner
  54. func GetNginxLogScanner() *NginxLogScanner {
  55. scannerInitMux.Lock()
  56. defer scannerInitMux.Unlock()
  57. if logScanner == nil {
  58. logScanner = &NginxLogScanner{
  59. logCache: make(map[string]*NginxLogCache),
  60. statusChan: make(chan bool, 10), // Buffer to prevent blocking
  61. subscribers: make(map[chan bool]struct{}),
  62. }
  63. // Start broadcaster goroutine
  64. go logScanner.broadcastStatus()
  65. }
  66. return logScanner
  67. }
  68. // broadcastStatus listens for status changes and broadcasts to all subscribers
  69. func (s *NginxLogScanner) broadcastStatus() {
  70. for status := range s.statusChan {
  71. s.subscriberMux.RLock()
  72. for ch := range s.subscribers {
  73. // Non-blocking send to prevent slow subscribers from blocking others
  74. select {
  75. case ch <- status:
  76. default:
  77. // Skip if channel buffer is full
  78. }
  79. }
  80. s.subscriberMux.RUnlock()
  81. }
  82. }
  83. // SubscribeStatusChanges allows a client to subscribe to scanning status changes
  84. func SubscribeStatusChanges() chan bool {
  85. s := GetNginxLogScanner()
  86. ch := make(chan bool, 5) // Buffer to prevent blocking
  87. // Add to subscribers
  88. s.subscriberMux.Lock()
  89. s.subscribers[ch] = struct{}{}
  90. s.subscriberMux.Unlock()
  91. // Send current status immediately
  92. s.scanMutex.RLock()
  93. currentStatus := s.scanning
  94. s.scanMutex.RUnlock()
  95. // Non-blocking send
  96. select {
  97. case ch <- currentStatus:
  98. default:
  99. }
  100. return ch
  101. }
  102. // UnsubscribeStatusChanges removes a subscriber from receiving status updates
  103. func UnsubscribeStatusChanges(ch chan bool) {
  104. s := GetNginxLogScanner()
  105. s.subscriberMux.Lock()
  106. delete(s.subscribers, ch)
  107. s.subscriberMux.Unlock()
  108. // Close the channel so the client knows it's unsubscribed
  109. close(ch)
  110. }
  111. // Initialize sets up the log scanner and starts watching for file changes
  112. func (s *NginxLogScanner) Initialize() error {
  113. if s.initialized {
  114. return nil
  115. }
  116. // Create a new watcher
  117. watcher, err := fsnotify.NewWatcher()
  118. if err != nil {
  119. return err
  120. }
  121. s.watcher = watcher
  122. // Scan for the first time
  123. err = s.ScanAllConfigs()
  124. if err != nil {
  125. return err
  126. }
  127. // Setup watcher for config directory
  128. configDir := filepath.Dir(nginx.GetConfPath("", ""))
  129. availableDir := nginx.GetConfPath("sites-available", "")
  130. enabledDir := nginx.GetConfPath("sites-enabled", "")
  131. streamAvailableDir := nginx.GetConfPath("stream-available", "")
  132. streamEnabledDir := nginx.GetConfPath("stream-enabled", "")
  133. // Watch the main directories
  134. err = s.watcher.Add(configDir)
  135. if err != nil {
  136. logger.Error("Failed to watch config directory:", err)
  137. }
  138. // Watch sites-available and sites-enabled if they exist
  139. if _, err := os.Stat(availableDir); err == nil {
  140. err = s.watcher.Add(availableDir)
  141. if err != nil {
  142. logger.Error("Failed to watch sites-available directory:", err)
  143. }
  144. }
  145. if _, err := os.Stat(enabledDir); err == nil {
  146. err = s.watcher.Add(enabledDir)
  147. if err != nil {
  148. logger.Error("Failed to watch sites-enabled directory:", err)
  149. }
  150. }
  151. // Watch stream-available and stream-enabled if they exist
  152. if _, err := os.Stat(streamAvailableDir); err == nil {
  153. err = s.watcher.Add(streamAvailableDir)
  154. if err != nil {
  155. logger.Error("Failed to watch stream-available directory:", err)
  156. }
  157. }
  158. if _, err := os.Stat(streamEnabledDir); err == nil {
  159. err = s.watcher.Add(streamEnabledDir)
  160. if err != nil {
  161. logger.Error("Failed to watch stream-enabled directory:", err)
  162. }
  163. }
  164. // Start the watcher goroutine
  165. go s.watchForChanges()
  166. // Setup a ticker for periodic scanning (every 5 minutes)
  167. s.scanTicker = time.NewTicker(5 * time.Minute)
  168. go func() {
  169. for range s.scanTicker.C {
  170. err := s.ScanAllConfigs()
  171. if err != nil {
  172. logger.Error("Periodic config scan failed:", err)
  173. }
  174. }
  175. }()
  176. s.initialized = true
  177. return nil
  178. }
  179. // watchForChanges handles the fsnotify events and triggers rescans when necessary
  180. func (s *NginxLogScanner) watchForChanges() {
  181. for {
  182. select {
  183. case event, ok := <-s.watcher.Events:
  184. if !ok {
  185. return
  186. }
  187. // Check if this is a relevant event (create, write, rename, remove)
  188. if event.Has(fsnotify.Create) || event.Has(fsnotify.Write) ||
  189. event.Has(fsnotify.Rename) || event.Has(fsnotify.Remove) {
  190. // If it's a directory, add it to the watch list
  191. if event.Has(fsnotify.Create) {
  192. fi, err := os.Stat(event.Name)
  193. if err == nil && fi.IsDir() {
  194. _ = s.watcher.Add(event.Name)
  195. }
  196. }
  197. // Process file changes - no .conf restriction anymore
  198. if !event.Has(fsnotify.Remove) {
  199. logger.Debug("Config file changed:", event.Name)
  200. // Give the system a moment to finish writing the file
  201. time.Sleep(100 * time.Millisecond)
  202. // Only scan the changed file instead of all configs
  203. err := s.scanSingleFile(event.Name)
  204. if err != nil {
  205. logger.Error("Failed to scan changed file:", err)
  206. }
  207. } else {
  208. // For removed files, we need to clean up any log entries that came from this file
  209. // This would require tracking which logs came from which config files
  210. // For now, we'll do a full rescan which is simpler but less efficient
  211. err := s.ScanAllConfigs()
  212. if err != nil {
  213. logger.Error("Failed to rescan configs after file removal:", err)
  214. }
  215. }
  216. }
  217. case err, ok := <-s.watcher.Errors:
  218. if !ok {
  219. return
  220. }
  221. logger.Error("Watcher error:", err)
  222. }
  223. }
  224. }
  225. // scanSingleFile scans a single file and updates the log cache accordingly
  226. func (s *NginxLogScanner) scanSingleFile(filePath string) error {
  227. // Set scanning state to true
  228. s.scanMutex.Lock()
  229. wasScanning := s.scanning
  230. s.scanning = true
  231. if !wasScanning {
  232. // Only broadcast if status changed from not scanning to scanning
  233. s.statusChan <- true
  234. }
  235. s.scanMutex.Unlock()
  236. // Ensure we reset scanning state when done
  237. defer func() {
  238. s.scanMutex.Lock()
  239. s.scanning = false
  240. // Broadcast the completion
  241. s.statusChan <- false
  242. s.scanMutex.Unlock()
  243. }()
  244. // Create a temporary cache for new entries from this file
  245. newEntries := make(map[string]*NginxLogCache)
  246. // Scan the file
  247. err := s.scanConfigFile(filePath, newEntries)
  248. if err != nil {
  249. return err
  250. }
  251. // Update the main cache with new entries
  252. s.cacheMutex.Lock()
  253. for path, entry := range newEntries {
  254. s.logCache[path] = entry
  255. }
  256. s.cacheMutex.Unlock()
  257. return nil
  258. }
  259. // ScanAllConfigs scans all nginx config files for log directives
  260. func (s *NginxLogScanner) ScanAllConfigs() error {
  261. // Set scanning state to true
  262. s.scanMutex.Lock()
  263. wasScanning := s.scanning
  264. s.scanning = true
  265. if !wasScanning {
  266. // Only broadcast if status changed from not scanning to scanning
  267. s.statusChan <- true
  268. }
  269. s.scanMutex.Unlock()
  270. // Ensure we reset scanning state when done
  271. defer func() {
  272. s.scanMutex.Lock()
  273. s.scanning = false
  274. // Broadcast the completion
  275. s.statusChan <- false
  276. s.scanMutex.Unlock()
  277. }()
  278. // Initialize a new cache to replace the old one
  279. newCache := make(map[string]*NginxLogCache)
  280. // Get the main config file
  281. mainConfigPath := nginx.GetConfPath("", "nginx.conf")
  282. err := s.scanConfigFile(mainConfigPath, newCache)
  283. if err != nil {
  284. logger.Error("Failed to scan main config:", err)
  285. }
  286. // Scan sites-available directory - no .conf restriction anymore
  287. sitesAvailablePath := nginx.GetConfPath("sites-available", "")
  288. sitesAvailableFiles, err := os.ReadDir(sitesAvailablePath)
  289. if err == nil {
  290. for _, file := range sitesAvailableFiles {
  291. if !file.IsDir() {
  292. configPath := filepath.Join(sitesAvailablePath, file.Name())
  293. err := s.scanConfigFile(configPath, newCache)
  294. if err != nil {
  295. logger.Error("Failed to scan config:", configPath, err)
  296. }
  297. }
  298. }
  299. }
  300. // Scan stream-available directory if it exists
  301. streamAvailablePath := nginx.GetConfPath("stream-available", "")
  302. streamAvailableFiles, err := os.ReadDir(streamAvailablePath)
  303. if err == nil {
  304. for _, file := range streamAvailableFiles {
  305. if !file.IsDir() {
  306. configPath := filepath.Join(streamAvailablePath, file.Name())
  307. err := s.scanConfigFile(configPath, newCache)
  308. if err != nil {
  309. logger.Error("Failed to scan stream config:", configPath, err)
  310. }
  311. }
  312. }
  313. }
  314. // Replace the old cache with the new one
  315. s.cacheMutex.Lock()
  316. s.logCache = newCache
  317. s.cacheMutex.Unlock()
  318. return nil
  319. }
  320. // scanConfigFile scans a single config file for log directives using regex
  321. func (s *NginxLogScanner) scanConfigFile(configPath string, cache map[string]*NginxLogCache) error {
  322. // Open the file
  323. file, err := os.Open(configPath)
  324. if err != nil {
  325. return err
  326. }
  327. defer file.Close()
  328. // Read the entire file content
  329. content, err := os.ReadFile(configPath)
  330. if err != nil {
  331. return err
  332. }
  333. // Find all matches of log directives
  334. matches := logDirectiveRegex.FindAllSubmatch(content, -1)
  335. for _, match := range matches {
  336. if len(match) >= 3 {
  337. directiveType := string(match[1]) // "access_log" or "error_log"
  338. logPath := string(match[2]) // The log file path
  339. // Validate the log path
  340. if isValidLogPath(logPath) {
  341. logType := "access"
  342. if directiveType == "error_log" {
  343. logType = "error"
  344. }
  345. cache[logPath] = &NginxLogCache{
  346. Path: logPath,
  347. Type: logType,
  348. Name: filepath.Base(logPath),
  349. }
  350. }
  351. }
  352. }
  353. // Look for include directives to process included files
  354. includeRegex := regexp.MustCompile(`include\s+([^;]+);`)
  355. includeMatches := includeRegex.FindAllSubmatch(content, -1)
  356. for _, match := range includeMatches {
  357. if len(match) >= 2 {
  358. includePath := string(match[1])
  359. // Handle glob patterns in include directives
  360. if strings.Contains(includePath, "*") {
  361. // If it's a relative path, make it absolute based on nginx config dir
  362. if !filepath.IsAbs(includePath) {
  363. configDir := filepath.Dir(nginx.GetConfPath("", ""))
  364. includePath = filepath.Join(configDir, includePath)
  365. }
  366. // Expand the glob pattern
  367. matchedFiles, err := filepath.Glob(includePath)
  368. if err != nil {
  369. logger.Error("Error expanding glob pattern:", includePath, err)
  370. continue
  371. }
  372. // Process each matched file
  373. for _, matchedFile := range matchedFiles {
  374. fileInfo, err := os.Stat(matchedFile)
  375. if err == nil && !fileInfo.IsDir() {
  376. err = s.scanConfigFile(matchedFile, cache)
  377. if err != nil {
  378. logger.Error("Failed to scan included file:", matchedFile, err)
  379. }
  380. }
  381. }
  382. } else {
  383. // Handle single file include
  384. // If it's a relative path, make it absolute based on nginx config dir
  385. if !filepath.IsAbs(includePath) {
  386. configDir := filepath.Dir(nginx.GetConfPath("", ""))
  387. includePath = filepath.Join(configDir, includePath)
  388. }
  389. fileInfo, err := os.Stat(includePath)
  390. if err == nil && !fileInfo.IsDir() {
  391. err = s.scanConfigFile(includePath, cache)
  392. if err != nil {
  393. logger.Error("Failed to scan included file:", includePath, err)
  394. }
  395. }
  396. }
  397. }
  398. }
  399. return nil
  400. }
  401. // isLogPathUnderWhiteList checks if the log path is under one of the paths in LogDirWhiteList
  402. // This is a duplicate of the function in nginx_log package to avoid import cycle
  403. func isLogPathUnderWhiteList(path string) bool {
  404. // deep copy
  405. logDirWhiteList := append([]string{}, settings.NginxSettings.LogDirWhiteList...)
  406. accessLogPath := nginx.GetAccessLogPath()
  407. errorLogPath := nginx.GetErrorLogPath()
  408. if accessLogPath != "" {
  409. logDirWhiteList = append(logDirWhiteList, filepath.Dir(accessLogPath))
  410. }
  411. if errorLogPath != "" {
  412. logDirWhiteList = append(logDirWhiteList, filepath.Dir(errorLogPath))
  413. }
  414. for _, whitePath := range logDirWhiteList {
  415. if helper.IsUnderDirectory(path, whitePath) {
  416. return true
  417. }
  418. }
  419. return false
  420. }
  421. // isValidLogPath checks if a log path is valid:
  422. // 1. It must be a regular file or a symlink to a regular file
  423. // 2. It must not point to a console or special device
  424. // 3. It must be under the whitelist directories
  425. func isValidLogPath(logPath string) bool {
  426. // First check if the path is under the whitelist
  427. if !isLogPathUnderWhiteList(logPath) {
  428. logger.Warn("Log path is not under whitelist:", logPath)
  429. return false
  430. }
  431. // Check if the path exists
  432. fileInfo, err := os.Lstat(logPath)
  433. if err != nil {
  434. // If file doesn't exist, it might be created later
  435. // We'll assume it's valid for now
  436. return true
  437. }
  438. // If it's a symlink, follow it
  439. if fileInfo.Mode()&os.ModeSymlink != 0 {
  440. linkTarget, err := os.Readlink(logPath)
  441. if err != nil {
  442. return false
  443. }
  444. // Make absolute path if the link target is relative
  445. if !filepath.IsAbs(linkTarget) {
  446. linkTarget = filepath.Join(filepath.Dir(logPath), linkTarget)
  447. }
  448. // Check the target file
  449. targetInfo, err := os.Stat(linkTarget)
  450. if err != nil {
  451. return false
  452. }
  453. // Only accept regular files as targets
  454. return targetInfo.Mode().IsRegular()
  455. }
  456. // For non-symlinks, just check if it's a regular file
  457. return fileInfo.Mode().IsRegular()
  458. }
  459. // Shutdown cleans up resources used by the scanner
  460. func (s *NginxLogScanner) Shutdown() {
  461. if s.watcher != nil {
  462. s.watcher.Close()
  463. }
  464. if s.scanTicker != nil {
  465. s.scanTicker.Stop()
  466. }
  467. // Clean up subscriber resources
  468. s.subscriberMux.Lock()
  469. // Close all subscriber channels
  470. for ch := range s.subscribers {
  471. close(ch)
  472. }
  473. // Clear the map
  474. s.subscribers = make(map[chan bool]struct{})
  475. s.subscriberMux.Unlock()
  476. // Close the status channel
  477. close(s.statusChan)
  478. }
  479. // GetAllLogPaths returns all cached log paths
  480. func GetAllLogPaths(filters ...func(*NginxLogCache) bool) []*NginxLogCache {
  481. s := GetNginxLogScanner()
  482. s.cacheMutex.RLock()
  483. defer s.cacheMutex.RUnlock()
  484. result := make([]*NginxLogCache, 0, len(s.logCache))
  485. for _, cache := range s.logCache {
  486. flag := true
  487. if len(filters) > 0 {
  488. for _, filter := range filters {
  489. if !filter(cache) {
  490. flag = false
  491. break
  492. }
  493. }
  494. }
  495. if flag {
  496. result = append(result, cache)
  497. }
  498. }
  499. return result
  500. }
  501. // IsScanning returns whether a scan is currently in progress
  502. func IsScanning() bool {
  503. s := GetNginxLogScanner()
  504. s.scanMutex.RLock()
  505. defer s.scanMutex.RUnlock()
  506. return s.scanning
  507. }