Browse Source

refactor: add bleve search integration and enhance config list #1207

Jacky 1 day ago
parent
commit
544d2badec

+ 98 - 5
api/config/list.go

@@ -3,18 +3,45 @@ package config
 import (
 	"net/http"
 	"os"
+	"path/filepath"
+	"strconv"
 	"strings"
 
 	"github.com/0xJacky/Nginx-UI/internal/config"
 	"github.com/0xJacky/Nginx-UI/internal/helper"
+	"github.com/0xJacky/Nginx-UI/internal/nginx"
+	"github.com/0xJacky/Nginx-UI/model"
 	"github.com/gin-gonic/gin"
 	"github.com/uozi-tech/cosy"
 )
 
+// ConfigFileEntity represents a generic configuration file entity
+type ConfigFileEntity struct {
+	path       string
+	envGroupID uint64
+	envGroup   *model.EnvGroup
+}
+
+// GetPath implements ConfigEntity interface
+func (c *ConfigFileEntity) GetPath() string {
+	return c.path
+}
+
+// GetEnvGroupID implements ConfigEntity interface
+func (c *ConfigFileEntity) GetEnvGroupID() uint64 {
+	return c.envGroupID
+}
+
+// GetEnvGroup implements ConfigEntity interface
+func (c *ConfigFileEntity) GetEnvGroup() *model.EnvGroup {
+	return c.envGroup
+}
+
 func GetConfigs(c *gin.Context) {
-	name := c.Query("name")
+	search := c.Query("search")
 	sortBy := c.Query("sort_by")
 	order := c.DefaultQuery("order", "desc")
+	envGroupIDStr := c.Query("env_group_id")
 
 	// Get directory parameter
 	encodedDir := c.DefaultQuery("dir", "/")
@@ -28,17 +55,83 @@ func GetConfigs(c *gin.Context) {
 		dir = strings.TrimSuffix(dir, "/")
 	}
 
-	configs, err := config.GetConfigList(dir, func(file os.FileInfo) bool {
-		return name == "" || strings.Contains(file.Name(), name)
-	})
+	// Parse env_group_id
+	var envGroupID uint64
+	if envGroupIDStr != "" {
+		if id, err := strconv.ParseUint(envGroupIDStr, 10, 64); err == nil {
+			envGroupID = id
+		}
+	}
+
+	// Create options
+	options := &config.GenericListOptions{
+		Search:      search,
+		OrderBy:     sortBy,
+		Sort:        order,
+		EnvGroupID:  envGroupID,
+		IncludeDirs: true, // Keep directories for the list.go endpoint
+	}
+
+	// Get config files from directory and create entities
+	configFiles, err := os.ReadDir(nginx.GetConfPath(dir))
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return
 	}
 
-	configs = config.Sort(sortBy, order, configs)
+	// Create entities for each config file
+	var entities []*ConfigFileEntity
+	for _, file := range configFiles {
+		// Skip directories only if IncludeDirs is false
+		if file.IsDir() && !options.IncludeDirs {
+			continue
+		}
+
+		// For generic config files, we don't have database records
+		// so envGroupID and envGroup will be 0 and nil
+		entity := &ConfigFileEntity{
+			path:       filepath.Join(nginx.GetConfPath(dir), file.Name()),
+			envGroupID: 0,
+			envGroup:   nil,
+		}
+		entities = append(entities, entity)
+	}
+
+	// Create processor for generic config files
+	processor := &config.GenericConfigProcessor{
+		Paths: config.ConfigPaths{
+			AvailableDir: dir,
+			EnabledDir:   dir, // For generic configs, available and enabled are the same
+		},
+		StatusMapBuilder: config.DefaultStatusMapBuilder,
+		ConfigBuilder:    createConfigBuilder(dir),
+		FilterMatcher:    config.DefaultFilterMatcher,
+	}
+
+	// Get configurations using the generic processor
+	configs, err := config.GetGenericConfigs(c, options, entities, processor)
+	if err != nil {
+		cosy.ErrHandler(c, err)
+		return
+	}
 
 	c.JSON(http.StatusOK, gin.H{
 		"data": configs,
 	})
 }
+
+// createConfigBuilder creates a custom config builder for generic config files
+func createConfigBuilder(dir string) config.ConfigBuilder {
+	return func(fileName string, fileInfo os.FileInfo, status config.ConfigStatus, envGroupID uint64, envGroup *model.EnvGroup) config.Config {
+		return config.Config{
+			Name:       fileName,
+			ModifiedAt: fileInfo.ModTime(),
+			Size:       fileInfo.Size(),
+			IsDir:      fileInfo.IsDir(),
+			Status:     status,
+			EnvGroupID: envGroupID,
+			EnvGroup:   envGroup,
+			Dir:        dir,
+		}
+	}
+}

+ 16 - 104
api/sites/list.go

@@ -2,132 +2,44 @@ package sites
 
 import (
 	"net/http"
-	"os"
-	"path/filepath"
-	"strings"
 
-	"github.com/0xJacky/Nginx-UI/internal/config"
-	"github.com/0xJacky/Nginx-UI/internal/nginx"
 	"github.com/0xJacky/Nginx-UI/internal/site"
-	"github.com/0xJacky/Nginx-UI/model"
 	"github.com/0xJacky/Nginx-UI/query"
 	"github.com/gin-gonic/gin"
-	"github.com/samber/lo"
 	"github.com/spf13/cast"
 	"github.com/uozi-tech/cosy"
 )
 
 func GetSiteList(c *gin.Context) {
-	name := c.Query("name")
-	status := c.Query("status")
-	orderBy := c.Query("sort_by")
-	sort := c.DefaultQuery("order", "desc")
-	queryEnvGroupId := cast.ToUint64(c.Query("env_group_id"))
-
-	configFiles, err := os.ReadDir(nginx.GetConfPath("sites-available"))
-	if err != nil {
-		cosy.ErrHandler(c, cosy.WrapErrorWithParams(site.ErrReadDirFailed, err.Error()))
-		return
-	}
-
-	enabledConfig, err := os.ReadDir(nginx.GetConfPath("sites-enabled"))
-	if err != nil {
-		cosy.ErrHandler(c, cosy.WrapErrorWithParams(site.ErrReadDirFailed, err.Error()))
-		return
+	// Parse query parameters
+	options := &site.ListOptions{
+		Search:     c.Query("search"),
+		Status:     c.Query("status"),
+		OrderBy:    c.Query("sort_by"),
+		Sort:       c.DefaultQuery("order", "desc"),
+		EnvGroupID: cast.ToUint64(c.Query("env_group_id")),
 	}
 
+	// Get sites from database
 	s := query.Site
 	sTx := s.Preload(s.EnvGroup)
-	if queryEnvGroupId != 0 {
-		sTx.Where(s.EnvGroupID.Eq(queryEnvGroupId))
+	if options.EnvGroupID != 0 {
+		sTx = sTx.Where(s.EnvGroupID.Eq(options.EnvGroupID))
 	}
+
 	sites, err := sTx.Find()
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return
 	}
-	sitesMap := lo.SliceToMap(sites, func(item *model.Site) (string, *model.Site) {
-		return filepath.Base(item.Path), item
-	})
-
-	configStatusMap := make(map[string]config.ConfigStatus)
-	for _, site := range configFiles {
-		configStatusMap[site.Name()] = config.StatusDisabled
-	}
-
-	// Check for enabled sites and maintenance mode sites
-	for _, enabledSite := range enabledConfig {
-		name := enabledSite.Name()
 
-		// Check if this is a maintenance mode configuration
-		if strings.HasSuffix(name, site.MaintenanceSuffix) {
-			// Extract the original site name by removing maintenance suffix
-			originalName := strings.TrimSuffix(name, site.MaintenanceSuffix)
-			configStatusMap[originalName] = config.StatusMaintenance
-		} else {
-			configStatusMap[nginx.GetConfNameBySymlinkName(name)] = config.StatusEnabled
-		}
-	}
-
-	var configs []config.Config
-
-	for i := range configFiles {
-		file := configFiles[i]
-		fileInfo, _ := file.Info()
-		if file.IsDir() {
-			continue
-		}
-		// name filter
-		if name != "" && !strings.Contains(file.Name(), name) {
-			continue
-		}
-		// status filter
-		if status != "" && configStatusMap[file.Name()] != config.ConfigStatus(status) {
-			continue
-		}
-
-		var (
-			envGroupId uint64
-			envGroup   *model.EnvGroup
-		)
-
-		if site, ok := sitesMap[file.Name()]; ok {
-			envGroupId = site.EnvGroupID
-			envGroup = site.EnvGroup
-		}
-
-		// env group filter
-		if queryEnvGroupId != 0 && envGroupId != queryEnvGroupId {
-			continue
-		}
-
-		indexedSite := site.GetIndexedSite(file.Name())
-
-		// Convert site.ProxyTarget to config.ProxyTarget
-		var proxyTargets []config.ProxyTarget
-		for _, target := range indexedSite.ProxyTargets {
-			proxyTargets = append(proxyTargets, config.ProxyTarget{
-				Host: target.Host,
-				Port: target.Port,
-				Type: target.Type,
-			})
-		}
-
-		configs = append(configs, config.Config{
-			Name:         file.Name(),
-			ModifiedAt:   fileInfo.ModTime(),
-			Size:         fileInfo.Size(),
-			IsDir:        fileInfo.IsDir(),
-			Status:       configStatusMap[file.Name()],
-			EnvGroupID:   envGroupId,
-			EnvGroup:     envGroup,
-			Urls:         indexedSite.Urls,
-			ProxyTargets: proxyTargets,
-		})
+	// Get site configurations using the internal logic
+	configs, err := site.GetSiteConfigs(c, options, sites)
+	if err != nil {
+		cosy.ErrHandler(c, err)
+		return
 	}
 
-	configs = config.Sort(orderBy, sort, configs)
-
 	c.JSON(http.StatusOK, gin.H{
 		"data": configs,
 	})

+ 50 - 190
api/streams/streams.go

@@ -2,9 +2,6 @@ package streams
 
 import (
 	"net/http"
-	"os"
-	"path/filepath"
-	"strings"
 	"time"
 
 	"github.com/0xJacky/Nginx-UI/internal/config"
@@ -35,39 +32,35 @@ type Stream struct {
 }
 
 func GetStreams(c *gin.Context) {
-	name := c.Query("name")
-	status := c.Query("status")
-	orderBy := c.Query("order_by")
-	sort := c.DefaultQuery("sort", "desc")
-	queryEnvGroupId := cast.ToUint64(c.Query("env_group_id"))
-
-	configFiles, err := os.ReadDir(nginx.GetConfPath("streams-available"))
-	if err != nil {
-		cosy.ErrHandler(c, cosy.WrapErrorWithParams(stream.ErrReadDirFailed, err.Error()))
-		return
+	// Parse query parameters
+	options := &stream.ListOptions{
+		Search:     c.Query("search"),
+		Status:     c.Query("status"),
+		OrderBy:    c.Query("order_by"),
+		Sort:       c.DefaultQuery("sort", "desc"),
+		EnvGroupID: cast.ToUint64(c.Query("env_group_id")),
 	}
 
-	enabledConfig, err := os.ReadDir(nginx.GetConfPath("streams-enabled"))
+	// Get streams from database
+	s := query.Stream
+	eg := query.EnvGroup
+
+	// Get environment groups for association
+	envGroups, err := eg.Find()
 	if err != nil {
-		cosy.ErrHandler(c, cosy.WrapErrorWithParams(stream.ErrReadDirFailed, err.Error()))
+		cosy.ErrHandler(c, err)
 		return
 	}
 
-	enabledConfigMap := make(map[string]config.ConfigStatus)
-	for _, file := range configFiles {
-		enabledConfigMap[file.Name()] = config.StatusDisabled
-	}
-	for i := range enabledConfig {
-		enabledConfigMap[nginx.GetConfNameBySymlinkName(enabledConfig[i].Name())] = config.StatusEnabled
-	}
-
-	var configs []config.Config
+	// Create environment group map for quick lookup
+	envGroupMap := lo.SliceToMap(envGroups, func(item *model.EnvGroup) (uint64, *model.EnvGroup) {
+		return item.ID, item
+	})
 
-	// Get all streams map for Node Group lookup
-	s := query.Stream
+	// Get streams with optional filtering
 	var streams []*model.Stream
-	if queryEnvGroupId != 0 {
-		streams, err = s.Where(s.EnvGroupID.Eq(queryEnvGroupId)).Find()
+	if options.EnvGroupID != 0 {
+		streams, err = s.Where(s.EnvGroupID.Eq(options.EnvGroupID)).Find()
 	} else {
 		streams, err = s.Find()
 	}
@@ -76,88 +69,19 @@ func GetStreams(c *gin.Context) {
 		return
 	}
 
-	// Retrieve Node Groups data
-	eg := query.EnvGroup
-	envGroups, err := eg.Find()
+	// Associate streams with their environment groups
+	for _, stream := range streams {
+		if stream.EnvGroupID > 0 {
+			stream.EnvGroup = envGroupMap[stream.EnvGroupID]
+		}
+	}
+
+	// Get stream configurations using the internal logic
+	configs, err := stream.GetStreamConfigs(c, options, streams)
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return
 	}
-	// Create a map of Node Groups for quick lookup by ID
-	envGroupMap := lo.SliceToMap(envGroups, func(item *model.EnvGroup) (uint64, *model.EnvGroup) {
-		return item.ID, item
-	})
-
-	// Convert streams slice to map for efficient lookups
-	streamsMap := lo.SliceToMap(streams, func(item *model.Stream) (string, *model.Stream) {
-		// Associate each stream with its corresponding Node Group
-		if item.EnvGroupID > 0 {
-			item.EnvGroup = envGroupMap[item.EnvGroupID]
-		}
-		return filepath.Base(item.Path), item
-	})
-
-	for i := range configFiles {
-		file := configFiles[i]
-		fileInfo, _ := file.Info()
-		if file.IsDir() {
-			continue
-		}
-
-		// Apply name filter if specified
-		if name != "" && !strings.Contains(file.Name(), name) {
-			continue
-		}
-
-		// Apply enabled status filter if specified
-		if status != "" && enabledConfigMap[file.Name()] != config.ConfigStatus(status) {
-			continue
-		}
-
-		var (
-			envGroupId uint64
-			envGroup   *model.EnvGroup
-		)
-
-		// Lookup stream in the streams map to get Node Group info
-		if stream, ok := streamsMap[file.Name()]; ok {
-			envGroupId = stream.EnvGroupID
-			envGroup = stream.EnvGroup
-		}
-
-		// Apply Node Group filter if specified
-		if queryEnvGroupId != 0 && envGroupId != queryEnvGroupId {
-			continue
-		}
-
-		// Get indexed stream for proxy targets
-		indexedStream := stream.GetIndexedStream(file.Name())
-
-		// Convert stream.ProxyTarget to config.ProxyTarget
-		var proxyTargets []config.ProxyTarget
-		for _, target := range indexedStream.ProxyTargets {
-			proxyTargets = append(proxyTargets, config.ProxyTarget{
-				Host: target.Host,
-				Port: target.Port,
-				Type: target.Type,
-			})
-		}
-
-		// Add the config to the result list after passing all filters
-		configs = append(configs, config.Config{
-			Name:         file.Name(),
-			ModifiedAt:   fileInfo.ModTime(),
-			Size:         fileInfo.Size(),
-			IsDir:        fileInfo.IsDir(),
-			Status:       enabledConfigMap[file.Name()],
-			EnvGroupID:   envGroupId,
-			EnvGroup:     envGroup,
-			ProxyTargets: proxyTargets,
-		})
-	}
-
-	// Sort the configs based on the provided sort parameters
-	configs = config.Sort(orderBy, sort, configs)
 
 	c.JSON(http.StatusOK, gin.H{
 		"data": configs,
@@ -167,71 +91,33 @@ func GetStreams(c *gin.Context) {
 func GetStream(c *gin.Context) {
 	name := helper.UnescapeURL(c.Param("name"))
 
-	// Get the absolute path to the stream configuration file
-	path := nginx.GetConfPath("streams-available", name)
-	file, err := os.Stat(path)
-	if os.IsNotExist(err) {
-		c.JSON(http.StatusNotFound, gin.H{
-			"message": "file not found",
-		})
-		return
-	}
-
-	// Check if the stream is enabled
-	status := config.StatusEnabled
-	if _, err := os.Stat(nginx.GetConfPath("streams-enabled", name)); os.IsNotExist(err) {
-		status = config.StatusDisabled
-	}
-
-	// Retrieve or create stream model from database
-	s := query.Stream
-	streamModel, err := s.Where(s.Path.Eq(path)).FirstOrCreate()
+	// Get stream information using internal logic
+	info, err := stream.GetStreamInfo(name)
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return
 	}
 
-	// For advanced mode, return the raw content
-	if streamModel.Advanced {
-		origContent, err := os.ReadFile(path)
-		if err != nil {
-			cosy.ErrHandler(c, err)
-			return
-		}
-
-		c.JSON(http.StatusOK, Stream{
-			ModifiedAt:  file.ModTime(),
-			Advanced:    streamModel.Advanced,
-			Status:      status,
-			Name:        name,
-			Config:      string(origContent),
-			Filepath:    path,
-			EnvGroupID:  streamModel.EnvGroupID,
-			EnvGroup:    streamModel.EnvGroup,
-			SyncNodeIDs: streamModel.SyncNodeIDs,
-		})
-		return
+	// Build response based on advanced mode
+	response := Stream{
+		ModifiedAt:  info.FileInfo.ModTime(),
+		Advanced:    info.Model.Advanced,
+		Status:      info.Status,
+		Name:        name,
+		Filepath:    info.Path,
+		EnvGroupID:  info.Model.EnvGroupID,
+		EnvGroup:    info.Model.EnvGroup,
+		SyncNodeIDs: info.Model.SyncNodeIDs,
 	}
 
-	// For normal mode, parse and tokenize the configuration
-	nginxConfig, err := nginx.ParseNgxConfig(path)
-	if err != nil {
-		cosy.ErrHandler(c, err)
-		return
+	if info.Model.Advanced {
+		response.Config = info.RawContent
+	} else {
+		response.Config = info.NgxConfig.FmtCode()
+		response.Tokenized = info.NgxConfig
 	}
 
-	c.JSON(http.StatusOK, Stream{
-		ModifiedAt:  file.ModTime(),
-		Advanced:    streamModel.Advanced,
-		Status:      status,
-		Name:        name,
-		Config:      nginxConfig.FmtCode(),
-		Tokenized:   nginxConfig,
-		Filepath:    path,
-		EnvGroupID:  streamModel.EnvGroupID,
-		EnvGroup:    streamModel.EnvGroup,
-		SyncNodeIDs: streamModel.SyncNodeIDs,
-	})
+	c.JSON(http.StatusOK, response)
 }
 
 func SaveStream(c *gin.Context) {
@@ -250,34 +136,8 @@ func SaveStream(c *gin.Context) {
 		return
 	}
 
-	// Get stream from database or create if not exists
-	path := nginx.GetConfPath("streams-available", name)
-	s := query.Stream
-	streamModel, err := s.Where(s.Path.Eq(path)).FirstOrCreate()
-	if err != nil {
-		cosy.ErrHandler(c, err)
-		return
-	}
-
-	// Update Node Group ID if provided
-	if json.EnvGroupID > 0 {
-		streamModel.EnvGroupID = json.EnvGroupID
-	}
-
-	// Update synchronization node IDs if provided
-	if json.SyncNodeIDs != nil {
-		streamModel.SyncNodeIDs = json.SyncNodeIDs
-	}
-
-	// Save the updated stream model to database
-	_, err = s.Where(s.ID.Eq(streamModel.ID)).Updates(streamModel)
-	if err != nil {
-		cosy.ErrHandler(c, err)
-		return
-	}
-
-	// Save the stream configuration file
-	err = stream.Save(name, json.Content, json.Overwrite, json.SyncNodeIDs, json.PostAction)
+	// Save stream configuration using internal logic
+	err := stream.SaveStreamConfig(name, json.Content, json.EnvGroupID, json.SyncNodeIDs, json.Overwrite, json.PostAction)
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return

+ 1 - 1
app/src/constants/errors/config.ts

@@ -3,7 +3,7 @@ export default {
   50007: () => $gettext('Destination file: {0} already exists'),
   50008: () => $gettext('Nginx test failed: {0}'),
   50009: () => $gettext('Nginx reload failed: {0}'),
-  50010: () => $gettext('Cannot delete protected path: {0}'),
+  50010: () => $gettext('Cannot delete protected path'),
   50011: () => $gettext('File or directory not found: {0}'),
   50012: () => $gettext('You are not allowed to delete a file outside of the nginx config path'),
 }

+ 12 - 3
app/src/views/config/configColumns.tsx

@@ -2,13 +2,22 @@ import type { CustomRenderArgs, StdTableColumn } from '@uozi-admin/curd'
 import { datetimeRender } from '@uozi-admin/curd'
 
 const configColumns: StdTableColumn[] = [{
+  title: () => $gettext('Search'),
+  dataIndex: 'search',
+  search: {
+    type: 'input',
+    input: {
+      placeholder: $gettext('Name or content'),
+    },
+  },
+  hiddenInEdit: true,
+  hiddenInTable: true,
+  hiddenInDetail: true,
+}, {
   title: () => $gettext('Name'),
   dataIndex: 'name',
   sorter: true,
   pure: true,
-  search: {
-    type: 'input',
-  },
   customRender: ({ text, record }: CustomRenderArgs) => {
     function renderIcon(isDir: boolean) {
       return (

+ 13 - 1
app/src/views/site/site_list/columns.tsx

@@ -13,6 +13,19 @@ import envGroupColumns from '@/views/environments/group/columns'
 import SiteStatusSelect from '@/views/site/components/SiteStatusSelect.vue'
 
 const columns: StdTableColumn[] = [{
+  title: () => $gettext('Search'),
+  dataIndex: 'search',
+  search: {
+    type: 'input',
+    input: {
+      placeholder: $gettext('Name or content'),
+    },
+  },
+  width: 150,
+  hiddenInEdit: true,
+  hiddenInTable: true,
+  hiddenInDetail: true,
+}, {
   title: () => $gettext('Name'),
   dataIndex: 'name',
   sorter: true,
@@ -20,7 +33,6 @@ const columns: StdTableColumn[] = [{
   edit: {
     type: 'input',
   },
-  search: true,
   width: 150,
   customRender: ({ text, record }: CustomRenderArgs) => {
     const template: JSXElements = []

+ 13 - 1
app/src/views/stream/columns.tsx

@@ -9,6 +9,19 @@ import envGroupColumns from '@/views/environments/group/columns'
 import StreamStatusSelect from '@/views/stream/components/StreamStatusSelect.vue'
 
 const columns: StdTableColumn[] = [{
+  title: () => $gettext('Search'),
+  dataIndex: 'search',
+  search: {
+    type: 'input',
+    input: {
+      placeholder: $gettext('Name or content'),
+    },
+  },
+  width: 150,
+  hiddenInEdit: true,
+  hiddenInTable: true,
+  hiddenInDetail: true,
+}, {
   title: () => $gettext('Name'),
   dataIndex: 'name',
   sorter: true,
@@ -16,7 +29,6 @@ const columns: StdTableColumn[] = [{
   edit: {
     type: 'input',
   },
-  search: true,
   width: 150,
   customRender: ({ text }: CustomRenderArgs<Stream>) => {
     const template: JSXElements = []

+ 1 - 1
go.mod

@@ -10,6 +10,7 @@ require (
 	github.com/aws/aws-sdk-go-v2/config v1.29.17
 	github.com/aws/aws-sdk-go-v2/credentials v1.17.70
 	github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
+	github.com/blevesearch/bleve/v2 v2.5.2
 	github.com/caarlos0/env/v11 v11.3.1
 	github.com/casdoor/casdoor-go-sdk v1.7.0
 	github.com/creack/pty v1.1.24
@@ -106,7 +107,6 @@ require (
 	github.com/baidubce/bce-sdk-go v0.9.233 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/bits-and-blooms/bitset v1.22.0 // indirect
-	github.com/blevesearch/bleve/v2 v2.5.2 // indirect
 	github.com/blevesearch/bleve_index_api v1.2.8 // indirect
 	github.com/blevesearch/geo v0.2.3 // indirect
 	github.com/blevesearch/go-faiss v1.0.25 // indirect

+ 17 - 7
internal/cache/cache.go

@@ -8,33 +8,43 @@ import (
 	"github.com/uozi-tech/cosy/logger"
 )
 
+// Global cache instance
 var cache *ristretto.Cache[string, any]
 
+// Init initializes the cache system with search indexing and config scanning
 func Init(ctx context.Context) {
+	// Initialize the main cache
 	var err error
 	cache, err = ristretto.NewCache(&ristretto.Config[string, any]{
-		NumCounters: 1e7,     // number of keys to track frequency of (10M).
-		MaxCost:     1 << 30, // maximum cost of cache (1GB).
-		BufferItems: 64,      // number of keys per Get buffer.
+		NumCounters: 1e7,     // Track frequency of 10M keys
+		MaxCost:     1 << 30, // Maximum cache size: 1GB
+		BufferItems: 64,      // Keys per Get buffer
 	})
-
 	if err != nil {
-		logger.Fatal("initializing local cache err", err)
+		logger.Fatal("Failed to initialize cache:", err)
+	}
+
+	// Initialize search index
+	if err = InitSearchIndex(ctx); err != nil {
+		logger.Error("Failed to initialize search index:", err)
 	}
 
-	// Initialize the config scanner
+	// Initialize config file scanner
 	InitScanner(ctx)
 }
 
+// Set stores a value in cache with TTL
 func Set(key string, value interface{}, ttl time.Duration) {
 	cache.SetWithTTL(key, value, 0, ttl)
 	cache.Wait()
 }
 
-func Get(key string) (value interface{}, ok bool) {
+// Get retrieves a value from cache
+func Get(key string) (interface{}, bool) {
 	return cache.Get(key)
 }
 
+// Del removes a value from cache
 func Del(key string) {
 	cache.Del(key)
 }

+ 158 - 321
internal/cache/index.go

@@ -2,9 +2,9 @@ package cache
 
 import (
 	"context"
+	"io/fs"
 	"os"
 	"path/filepath"
-	"regexp"
 	"strings"
 	"sync"
 	"time"
@@ -15,30 +15,21 @@ import (
 	"github.com/uozi-tech/cosy/logger"
 )
 
-// ScanCallback is a function that gets called during config scanning
-// It receives the config file path and contents
+// ScanCallback is called during config scanning with file path and content
 type ScanCallback func(configPath string, content []byte) error
 
-// Scanner is responsible for scanning and watching nginx config files
+// Scanner watches and scans nginx config files
 type Scanner struct {
-	ctx         context.Context   // Context for the scanner
-	watcher     *fsnotify.Watcher // File system watcher
-	scanTicker  *time.Ticker      // Ticker for periodic scanning
-	initialized bool              // Whether the scanner has been initialized
-	scanning    bool              // Whether a scan is currently in progress
-	scanMutex   sync.RWMutex      // Mutex for protecting the scanning state
+	ctx        context.Context
+	watcher    *fsnotify.Watcher
+	scanTicker *time.Ticker
+	scanning   bool
+	scanMutex  sync.RWMutex
 }
 
-// Global variables
 var (
-	// scanner is the singleton instance of Scanner
-	scanner              *Scanner
-	configScannerInitMux sync.Mutex
-
-	// This regex matches: include directives in nginx config files
-	includeRegex = regexp.MustCompile(`include\s+([^;]+);`)
-
-	// Global callbacks that will be executed during config file scanning
+	scanner            *Scanner
+	scannerInitMutex   sync.Mutex
 	scanCallbacks      = make([]ScanCallback, 0)
 	scanCallbacksMutex sync.RWMutex
 )
@@ -50,17 +41,16 @@ func InitScanner(ctx context.Context) {
 		return
 	}
 
-	s := GetScanner()
-	err := s.Initialize(ctx)
-	if err != nil {
+	scanner := GetScanner()
+	if err := scanner.Initialize(ctx); err != nil {
 		logger.Error("Failed to initialize config scanner:", err)
 	}
 }
 
-// GetScanner returns the singleton instance of Scanner
+// GetScanner returns the singleton scanner instance
 func GetScanner() *Scanner {
-	configScannerInitMux.Lock()
-	defer configScannerInitMux.Unlock()
+	scannerInitMutex.Lock()
+	defer scannerInitMutex.Unlock()
 
 	if scanner == nil {
 		scanner = &Scanner{}
@@ -68,29 +58,15 @@ func GetScanner() *Scanner {
 	return scanner
 }
 
-// RegisterCallback adds a callback function to be executed during scans
-// This function can be called before Scanner is initialized
+// RegisterCallback adds a callback to be executed during scans
 func RegisterCallback(callback ScanCallback) {
 	scanCallbacksMutex.Lock()
 	defer scanCallbacksMutex.Unlock()
 	scanCallbacks = append(scanCallbacks, callback)
 }
 
-// publishScanningStatus publishes the scanning status to the event bus
-func (s *Scanner) publishScanningStatus(scanning bool) {
-	event.Publish(event.Event{
-		Type: event.EventTypeIndexScanning,
-		Data: scanning,
-	})
-}
-
-// Initialize sets up the scanner and starts watching for file changes
+// Initialize sets up the scanner and starts watching
 func (s *Scanner) Initialize(ctx context.Context) error {
-	if s.initialized {
-		return nil
-	}
-
-	// Create a new watcher
 	watcher, err := fsnotify.NewWatcher()
 	if err != nil {
 		return err
@@ -98,86 +74,75 @@ func (s *Scanner) Initialize(ctx context.Context) error {
 	s.watcher = watcher
 	s.ctx = ctx
 
-	// Scan for the first time
-	err = s.ScanAllConfigs()
-	if err != nil {
+	// Initial scan
+	if err := s.ScanAllConfigs(); err != nil {
 		return err
 	}
 
-	// Setup watcher for config directory
-	configDir := filepath.Dir(nginx.GetConfPath())
-	availableDir := nginx.GetConfPath("sites-available")
-	enabledDir := nginx.GetConfPath("sites-enabled")
-	streamAvailableDir := nginx.GetConfPath("streams-available")
-	streamEnabledDir := nginx.GetConfPath("streams-enabled")
-
-	// Watch the main directories
-	err = s.watcher.Add(configDir)
-	if err != nil {
-		logger.Error("Failed to watch config directory:", err)
+	// Watch all directories recursively
+	if err := s.watchAllDirectories(); err != nil {
+		return err
 	}
 
-	// Watch sites-available and sites-enabled if they exist
-	if _, err := os.Stat(availableDir); err == nil {
-		err = s.watcher.Add(availableDir)
-		if err != nil {
-			logger.Error("Failed to watch sites-available directory:", err)
-		}
-	}
+	// Start background processes
+	go s.watchForChanges()
+	go s.periodicScan()
+	go s.handleShutdown()
 
-	if _, err := os.Stat(enabledDir); err == nil {
-		err = s.watcher.Add(enabledDir)
-		if err != nil {
-			logger.Error("Failed to watch sites-enabled directory:", err)
-		}
-	}
+	return nil
+}
 
-	// Watch streams-available and streams-enabled if they exist
-	if _, err := os.Stat(streamAvailableDir); err == nil {
-		err = s.watcher.Add(streamAvailableDir)
-		if err != nil {
-			logger.Error("Failed to watch streams-available directory:", err)
-		}
-	}
+// watchAllDirectories recursively adds all directories under nginx config path to watcher
+func (s *Scanner) watchAllDirectories() error {
+	root := nginx.GetConfPath()
+	sslDir := nginx.GetConfPath("ssl")
 
-	if _, err := os.Stat(streamEnabledDir); err == nil {
-		err = s.watcher.Add(streamEnabledDir)
+	return filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
 		if err != nil {
-			logger.Error("Failed to watch streams-enabled directory:", err)
+			return err
 		}
-	}
 
-	// Start the watcher goroutine
-	go s.watchForChanges()
+		if d.IsDir() {
+			// Skip ssl directory
+			if path == sslDir {
+				return filepath.SkipDir
+			}
 
-	// Setup a ticker for periodic scanning (every 5 minutes)
-	s.scanTicker = time.NewTicker(5 * time.Minute)
-	go func() {
-		for {
-			select {
-			case <-s.ctx.Done():
-				return
-			case <-s.scanTicker.C:
-				err := s.ScanAllConfigs()
-				if err != nil {
-					logger.Error("Periodic config scan failed:", err)
-				}
+			if err := s.watcher.Add(path); err != nil {
+				logger.Error("Failed to watch directory:", path, err)
+				return err
 			}
+			logger.Debug("Watching directory:", path)
 		}
-	}()
+		return nil
+	})
+}
 
-	// Start a goroutine to listen for context cancellation
-	go func() {
-		<-s.ctx.Done()
-		logger.Debug("Context cancelled, shutting down scanner")
-		s.Shutdown()
-	}()
+// periodicScan runs periodic scans every 5 minutes
+func (s *Scanner) periodicScan() {
+	s.scanTicker = time.NewTicker(5 * time.Minute)
+	defer s.scanTicker.Stop()
 
-	s.initialized = true
-	return nil
+	for {
+		select {
+		case <-s.ctx.Done():
+			return
+		case <-s.scanTicker.C:
+			if err := s.ScanAllConfigs(); err != nil {
+				logger.Error("Periodic scan failed:", err)
+			}
+		}
+	}
+}
+
+// handleShutdown listens for context cancellation and shuts down gracefully
+func (s *Scanner) handleShutdown() {
+	<-s.ctx.Done()
+	logger.Debug("Shutting down scanner")
+	s.Shutdown()
 }
 
-// watchForChanges handles the fsnotify events and triggers rescans when necessary
+// watchForChanges handles file system events
 func (s *Scanner) watchForChanges() {
 	for {
 		select {
@@ -187,52 +152,7 @@ func (s *Scanner) watchForChanges() {
 			if !ok {
 				return
 			}
-
-			// Skip irrelevant events
-			if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) &&
-				!event.Has(fsnotify.Rename) && !event.Has(fsnotify.Remove) {
-				continue
-			}
-
-			// Add newly created directories to the watch list
-			if event.Has(fsnotify.Create) {
-				if fi, err := os.Stat(event.Name); err == nil && fi.IsDir() {
-					_ = s.watcher.Add(event.Name)
-				}
-			}
-
-			// For remove events, perform a full scan
-			if event.Has(fsnotify.Remove) {
-				logger.Debug("Config item removed:", event.Name)
-				if err := s.ScanAllConfigs(); err != nil {
-					logger.Error("Failed to rescan configs after removal:", err)
-				}
-				continue
-			}
-
-			// Handle non-remove events
-			fi, err := os.Stat(event.Name)
-			if err != nil {
-				logger.Error("Failed to stat changed path:", err)
-				continue
-			}
-
-			if fi.IsDir() {
-				// Directory change, perform full scan
-				logger.Debug("Config directory changed:", event.Name)
-				if err := s.ScanAllConfigs(); err != nil {
-					logger.Error("Failed to rescan configs after directory change:", err)
-				}
-			} else {
-				// File change, scan only the single file
-				logger.Debug("Config file changed:", event.Name)
-				// Give the system a moment to finish writing the file
-				time.Sleep(100 * time.Millisecond)
-				if err := s.scanSingleFile(event.Name); err != nil {
-					logger.Error("Failed to scan changed file:", err)
-				}
-			}
-
+			s.handleFileEvent(event)
 		case err, ok := <-s.watcher.Errors:
 			if !ok {
 				return
@@ -242,221 +162,138 @@ func (s *Scanner) watchForChanges() {
 	}
 }
 
-// scanSingleFile scans a single file and executes all registered callbacks
-func (s *Scanner) scanSingleFile(filePath string) error {
-	visited := make(map[string]bool)
-	return s.scanSingleFileWithDepth(filePath, visited, 0)
-}
+// handleFileEvent processes individual file system events
+func (s *Scanner) handleFileEvent(event fsnotify.Event) {
+	// Only handle relevant events
+	if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) &&
+		!event.Has(fsnotify.Rename) && !event.Has(fsnotify.Remove) {
+		return
+	}
 
-// scanSingleFileWithDepth scans a single file with recursion protection
-func (s *Scanner) scanSingleFileWithDepth(filePath string, visited map[string]bool, depth int) error {
-	// Maximum recursion depth to prevent infinite recursion
-	const maxDepth = 5
+	// Skip ssl directory
+	sslDir := nginx.GetConfPath("ssl")
+	if strings.HasPrefix(event.Name, sslDir) {
+		return
+	}
 
-	if depth > maxDepth {
-		logger.Warn("Maximum recursion depth reached for file:", filePath)
-		return nil
+	// Add new directories to watch
+	if event.Has(fsnotify.Create) {
+		if fi, err := os.Stat(event.Name); err == nil && fi.IsDir() {
+			if err := s.watcher.Add(event.Name); err != nil {
+				logger.Error("Failed to add new directory to watcher:", event.Name, err)
+			} else {
+				logger.Debug("Added new directory to watcher:", event.Name)
+			}
+		}
 	}
 
-	// Resolve the absolute path to handle symlinks properly
-	absPath, err := filepath.Abs(filePath)
-	if err != nil {
-		logger.Error("Failed to resolve absolute path for:", filePath, err)
-		return err
+	// Handle file changes
+	if event.Has(fsnotify.Remove) {
+		logger.Debug("Config removed:", event.Name)
+		return
 	}
 
-	// Check for circular includes
-	if visited[absPath] {
-		// Circular include detected, skip this file
-		return nil
+	fi, err := os.Stat(event.Name)
+	if err != nil {
+		return
 	}
 
-	// Mark this file as visited
-	visited[absPath] = true
-
-	// Set scanning state to true only for the root call (depth 0)
-	var wasScanning bool
-	if depth == 0 {
-		s.scanMutex.Lock()
-		wasScanning = s.scanning
-		s.scanning = true
-		if !wasScanning {
-			// Only publish if status changed from not scanning to scanning
-			s.publishScanningStatus(true)
-		}
-		s.scanMutex.Unlock()
-
-		// Ensure we reset scanning state when done (only for root call)
-		defer func() {
-			s.scanMutex.Lock()
-			s.scanning = false
-			// Publish the completion
-			s.publishScanningStatus(false)
-			s.scanMutex.Unlock()
-		}()
+	if fi.IsDir() {
+		logger.Debug("Directory changed:", event.Name)
+	} else {
+		logger.Debug("File changed:", event.Name)
+		time.Sleep(100 * time.Millisecond) // Allow file write to complete
+		s.scanSingleFile(event.Name)
 	}
+}
+
+// scanSingleFile scans a single config file without recursion
+func (s *Scanner) scanSingleFile(filePath string) error {
+	s.setScanningState(true)
+	defer s.setScanningState(false)
 
-	// Open the file
-	file, err := os.Open(absPath)
+	// Read file content
+	content, err := os.ReadFile(filePath)
 	if err != nil {
 		return err
 	}
-	defer file.Close()
 
-	// Read the entire file content
-	content, err := os.ReadFile(absPath)
-	if err != nil {
-		return err
+	// Execute callbacks
+	s.executeCallbacks(filePath, content)
+
+	return nil
+}
+
+// setScanningState updates the scanning state and publishes events
+func (s *Scanner) setScanningState(scanning bool) {
+	s.scanMutex.Lock()
+	defer s.scanMutex.Unlock()
+
+	if s.scanning != scanning {
+		s.scanning = scanning
+		event.Publish(event.Event{
+			Type: event.EventTypeIndexScanning,
+			Data: scanning,
+		})
 	}
+}
 
-	// Execute all registered callbacks
+// executeCallbacks runs all registered callbacks
+func (s *Scanner) executeCallbacks(filePath string, content []byte) {
 	scanCallbacksMutex.RLock()
+	defer scanCallbacksMutex.RUnlock()
+
 	for _, callback := range scanCallbacks {
-		err := callback(absPath, content)
-		if err != nil {
-			logger.Error("Callback error for file", absPath, ":", err)
-		}
-	}
-	scanCallbacksMutex.RUnlock()
-
-	// Look for include directives to process included files
-	includeMatches := includeRegex.FindAllSubmatch(content, -1)
-
-	for _, match := range includeMatches {
-		if len(match) >= 2 {
-			includePath := string(match[1])
-			// Handle glob patterns in include directives
-			if strings.Contains(includePath, "*") {
-				// If it's a relative path, make it absolute based on nginx config dir
-				if !filepath.IsAbs(includePath) {
-					configDir := filepath.Dir(nginx.GetConfPath())
-					includePath = filepath.Join(configDir, includePath)
-				}
-
-				// Expand the glob pattern
-				matchedFiles, err := filepath.Glob(includePath)
-				if err != nil {
-					logger.Error("Error expanding glob pattern:", includePath, err)
-					continue
-				}
-
-				// Process each matched file
-				for _, matchedFile := range matchedFiles {
-					fileInfo, err := os.Stat(matchedFile)
-					if err == nil && !fileInfo.IsDir() {
-						err = s.scanSingleFileWithDepth(matchedFile, visited, depth+1)
-						if err != nil {
-							logger.Error("Failed to scan included file:", matchedFile, err)
-						}
-					}
-				}
-			} else {
-				// Handle single file include
-				// If it's a relative path, make it absolute based on nginx config dir
-				if !filepath.IsAbs(includePath) {
-					configDir := filepath.Dir(nginx.GetConfPath())
-					includePath = filepath.Join(configDir, includePath)
-				}
-
-				fileInfo, err := os.Stat(includePath)
-				if err == nil && !fileInfo.IsDir() {
-					err = s.scanSingleFileWithDepth(includePath, visited, depth+1)
-					if err != nil {
-						logger.Error("Failed to scan included file:", includePath, err)
-					}
-				}
-			}
+		if err := callback(filePath, content); err != nil {
+			logger.Error("Callback error for", filePath, ":", err)
 		}
 	}
-
-	return nil
 }
 
-// ScanAllConfigs scans all nginx config files and executes all registered callbacks
+// ScanAllConfigs scans all nginx configuration files
 func (s *Scanner) ScanAllConfigs() error {
-	// Set scanning state to true
-	s.scanMutex.Lock()
-	wasScanning := s.scanning
-	s.scanning = true
-	if !wasScanning {
-		// Only publish if status changed from not scanning to scanning
-		s.publishScanningStatus(true)
-	}
-	s.scanMutex.Unlock()
-
-	// Ensure we reset scanning state when done
-	defer func() {
-		s.scanMutex.Lock()
-		s.scanning = false
-		// Publish the completion
-		s.publishScanningStatus(false)
-		s.scanMutex.Unlock()
-	}()
-
-	// Get the main config file
-	mainConfigPath := nginx.GetConfEntryPath()
-	err := s.scanSingleFile(mainConfigPath)
-	if err != nil {
-		logger.Error("Failed to scan main config:", err)
-	}
+	s.setScanningState(true)
+	defer s.setScanningState(false)
 
-	// Scan sites-available directory
-	sitesAvailablePath := nginx.GetConfPath("sites-available", "")
-	sitesAvailableFiles, err := os.ReadDir(sitesAvailablePath)
-	if err == nil {
-		for _, file := range sitesAvailableFiles {
-			if !file.IsDir() {
-				configPath := filepath.Join(sitesAvailablePath, file.Name())
-				err := s.scanSingleFile(configPath)
-				if err != nil {
-					logger.Error("Failed to scan config:", configPath, err)
-				}
-			}
+	root := nginx.GetConfPath()
+	sslDir := nginx.GetConfPath("ssl")
+
+	// Scan all files in the config directory and subdirectories
+	return filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
+		if err != nil {
+			return err
 		}
-	}
 
-	// Scan streams-available directory if it exists
-	streamAvailablePath := nginx.GetConfPath("streams-available", "")
-	streamAvailableFiles, err := os.ReadDir(streamAvailablePath)
-	if err == nil {
-		for _, file := range streamAvailableFiles {
-			if !file.IsDir() {
-				configPath := filepath.Join(streamAvailablePath, file.Name())
-				err := s.scanSingleFile(configPath)
-				if err != nil {
-					logger.Error("Failed to scan stream config:", configPath, err)
-				}
+		// Skip ssl directory
+		if d.IsDir() && path == sslDir {
+			return filepath.SkipDir
+		}
+
+		// Only process regular files
+		if !d.IsDir() {
+			if err := s.scanSingleFile(path); err != nil {
+				logger.Error("Failed to scan config:", path, err)
 			}
 		}
-	}
 
-	return nil
+		return nil
+	})
 }
 
-// Shutdown cleans up resources used by the scanner
+// Shutdown cleans up scanner resources
 func (s *Scanner) Shutdown() {
 	if s.watcher != nil {
 		s.watcher.Close()
 	}
-
 	if s.scanTicker != nil {
 		s.scanTicker.Stop()
 	}
 }
 
-// IsScanningInProgress returns whether a scan is currently in progress
+// IsScanningInProgress returns whether a scan is currently running
 func IsScanningInProgress() bool {
 	s := GetScanner()
 	s.scanMutex.RLock()
 	defer s.scanMutex.RUnlock()
 	return s.scanning
 }
-
-// WithContext sets a context for the scanner that will be used to control its lifecycle
-func (s *Scanner) WithContext(ctx context.Context) *Scanner {
-	// Create a context with cancel if not already done in Initialize
-	if s.ctx == nil {
-		s.ctx = ctx
-	}
-	return s
-}

+ 499 - 0
internal/cache/search.go

@@ -0,0 +1,499 @@
+package cache
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/blevesearch/bleve/v2"
+	"github.com/blevesearch/bleve/v2/analysis/lang/en"
+	"github.com/blevesearch/bleve/v2/mapping"
+	"github.com/blevesearch/bleve/v2/search/query"
+	"github.com/uozi-tech/cosy/logger"
+)
+
+// SearchDocument represents a document in the search index
+type SearchDocument struct {
+	ID        string    `json:"id"`
+	Type      string    `json:"type"`    // "site", "stream", or "config"
+	Name      string    `json:"name"`    // extracted from filename
+	Path      string    `json:"path"`    // file path
+	Content   string    `json:"content"` // file content
+	UpdatedAt time.Time `json:"updated_at"`
+}
+
+// SearchResult represents a search result
+type SearchResult struct {
+	Document SearchDocument `json:"document"`
+	Score    float64        `json:"score"`
+}
+
+// SearchIndexer manages the Bleve search index
+type SearchIndexer struct {
+	index       bleve.Index
+	indexPath   string
+	indexMutex  sync.RWMutex
+	ctx         context.Context
+	cancel      context.CancelFunc
+	cleanupOnce sync.Once
+}
+
+var (
+	searchIndexer     *SearchIndexer
+	searchIndexerOnce sync.Once
+)
+
+// GetSearchIndexer returns the singleton search indexer instance
+func GetSearchIndexer() *SearchIndexer {
+	searchIndexerOnce.Do(func() {
+		// Create a temporary directory for the index
+		tempDir, err := os.MkdirTemp("", "nginx-ui-search-index-*")
+		if err != nil {
+			logger.Fatalf("Failed to create temp directory for search index: %v", err)
+		}
+
+		searchIndexer = &SearchIndexer{
+			indexPath: tempDir,
+		}
+	})
+	return searchIndexer
+}
+
+// InitSearchIndex initializes the search index
+func InitSearchIndex(ctx context.Context) error {
+	indexer := GetSearchIndexer()
+	return indexer.Initialize(ctx)
+}
+
+// Initialize sets up the Bleve search index
+func (si *SearchIndexer) Initialize(ctx context.Context) error {
+	si.indexMutex.Lock()
+	defer si.indexMutex.Unlock()
+
+	// Create a derived context for cleanup
+	si.ctx, si.cancel = context.WithCancel(ctx)
+
+	// Check if context is cancelled
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	// Try to open existing index, create new if it fails
+	var err error
+	si.index, err = bleve.Open(si.indexPath)
+	if err != nil {
+		// Check context again before creating new index
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		logger.Info("Creating new search index at:", si.indexPath)
+		si.index, err = bleve.New(si.indexPath, si.createIndexMapping())
+		if err != nil {
+			return fmt.Errorf("failed to create search index: %w", err)
+		}
+	}
+
+	// Register callback for config scanning
+	RegisterCallback(si.handleConfigScan)
+
+	// Start cleanup goroutine
+	go si.watchContext()
+
+	logger.Info("Search index initialized successfully")
+	return nil
+}
+
+// watchContext monitors the context and cleans up when it's cancelled
+func (si *SearchIndexer) watchContext() {
+	<-si.ctx.Done()
+	si.cleanup()
+}
+
+// cleanup closes the index and removes the temporary directory
+func (si *SearchIndexer) cleanup() {
+	si.cleanupOnce.Do(func() {
+		logger.Info("Cleaning up search index...")
+
+		si.indexMutex.Lock()
+		defer si.indexMutex.Unlock()
+
+		if si.index != nil {
+			si.index.Close()
+			si.index = nil
+		}
+
+		// Remove the temporary directory
+		if err := os.RemoveAll(si.indexPath); err != nil {
+			logger.Error("Failed to remove search index directory:", err)
+		} else {
+			logger.Info("Search index directory removed successfully")
+		}
+	})
+}
+
+// createIndexMapping creates the mapping for the search index
+func (si *SearchIndexer) createIndexMapping() mapping.IndexMapping {
+	docMapping := bleve.NewDocumentMapping()
+
+	// Text fields with standard analyzer
+	textField := bleve.NewTextFieldMapping()
+	textField.Analyzer = en.AnalyzerName
+	textField.Store = true
+	textField.Index = true
+
+	// Keyword fields for exact match
+	keywordField := bleve.NewKeywordFieldMapping()
+	keywordField.Store = true
+	keywordField.Index = true
+
+	// Date field
+	dateField := bleve.NewDateTimeFieldMapping()
+	dateField.Store = true
+	dateField.Index = true
+
+	// Map fields to types
+	fieldMappings := map[string]*mapping.FieldMapping{
+		"id":         keywordField,
+		"type":       keywordField,
+		"path":       keywordField,
+		"name":       textField,
+		"content":    textField,
+		"updated_at": dateField,
+	}
+
+	for field, fieldMapping := range fieldMappings {
+		docMapping.AddFieldMappingsAt(field, fieldMapping)
+	}
+
+	indexMapping := bleve.NewIndexMapping()
+	indexMapping.DefaultMapping = docMapping
+	indexMapping.DefaultAnalyzer = en.AnalyzerName
+
+	return indexMapping
+}
+
+// handleConfigScan processes scanned config files and indexes them
+func (si *SearchIndexer) handleConfigScan(configPath string, content []byte) error {
+	docType := si.determineConfigType(configPath)
+	if docType == "" {
+		return nil // Skip unsupported file types
+	}
+
+	doc := SearchDocument{
+		ID:        configPath,
+		Type:      docType,
+		Name:      filepath.Base(configPath),
+		Path:      configPath,
+		Content:   string(content),
+		UpdatedAt: time.Now(),
+	}
+	return si.IndexDocument(doc)
+}
+
+// determineConfigType determines the type of config file based on path
+func (si *SearchIndexer) determineConfigType(configPath string) string {
+	normalizedPath := filepath.ToSlash(configPath)
+
+	switch {
+	case strings.Contains(normalizedPath, "sites-available") || strings.Contains(normalizedPath, "sites-enabled"):
+		return "site"
+	case strings.Contains(normalizedPath, "streams-available") || strings.Contains(normalizedPath, "streams-enabled"):
+		return "stream"
+	default:
+		return "config"
+	}
+}
+
+// IndexDocument indexes a single document
+func (si *SearchIndexer) IndexDocument(doc SearchDocument) error {
+	si.indexMutex.RLock()
+	defer si.indexMutex.RUnlock()
+
+	if si.index == nil {
+		return fmt.Errorf("search index not initialized")
+	}
+
+	logger.Debugf("Indexing document: ID=%s, Type=%s, Name=%s, Path=%s",
+		doc.ID, doc.Type, doc.Name, doc.Path)
+
+	return si.index.Index(doc.ID, doc)
+}
+
+// Search performs a search query
+func (si *SearchIndexer) Search(ctx context.Context, queryStr string, limit int) ([]SearchResult, error) {
+	return si.searchWithType(ctx, queryStr, "", limit)
+}
+
+// SearchByType performs a search filtered by document type
+func (si *SearchIndexer) SearchByType(ctx context.Context, queryStr string, docType string, limit int) ([]SearchResult, error) {
+	return si.searchWithType(ctx, queryStr, docType, limit)
+}
+
+// searchWithType performs the actual search with optional type filtering
+func (si *SearchIndexer) searchWithType(ctx context.Context, queryStr string, docType string, limit int) ([]SearchResult, error) {
+	si.indexMutex.RLock()
+	defer si.indexMutex.RUnlock()
+
+	// Check if context is cancelled
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	default:
+	}
+
+	if si.index == nil {
+		return nil, fmt.Errorf("search index not initialized")
+	}
+
+	if limit <= 0 {
+		limit = 500 // Increase default limit to handle more results
+	}
+
+	query := si.buildQuery(queryStr, docType)
+	searchRequest := bleve.NewSearchRequest(query)
+	searchRequest.Size = limit
+	searchRequest.Fields = []string{"*"}
+
+	// Use a channel to handle search with context cancellation
+	type searchResult struct {
+		result *bleve.SearchResult
+		err    error
+	}
+
+	resultChan := make(chan searchResult, 1)
+	go func() {
+		result, err := si.index.Search(searchRequest)
+		resultChan <- searchResult{result: result, err: err}
+	}()
+
+	// Wait for search result or context cancellation
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case res := <-resultChan:
+		if res.err != nil {
+			return nil, fmt.Errorf("search execution failed: %w", res.err)
+		}
+		results := si.convertResults(res.result)
+
+		// Debug log the search execution
+		logger.Debugf("Search index query '%s' (type: %s, limit: %d) returned %d results",
+			queryStr, docType, limit, len(results))
+
+		return results, nil
+	}
+}
+
+// buildQuery builds a search query with optional type filtering
+func (si *SearchIndexer) buildQuery(queryStr string, docType string) query.Query {
+	mainQuery := bleve.NewBooleanQuery()
+
+	// Add type filter if specified
+	if docType != "" {
+		typeQuery := bleve.NewTermQuery(docType)
+		typeQuery.SetField("type")
+		mainQuery.AddMust(typeQuery)
+	}
+
+	// Add text search across name and content fields only
+	textQuery := bleve.NewBooleanQuery()
+	searchFields := []string{"name", "content"}
+
+	for _, field := range searchFields {
+		// Create a boolean query for this field to combine multiple query types
+		fieldQuery := bleve.NewBooleanQuery()
+
+		// 1. Exact match query (highest priority)
+		matchQuery := bleve.NewMatchQuery(queryStr)
+		matchQuery.SetField(field)
+		matchQuery.SetBoost(3.0) // Higher boost for exact matches
+		fieldQuery.AddShould(matchQuery)
+
+		// 2. Prefix query for partial matches (e.g., "access" matches "access_log")
+		prefixQuery := bleve.NewPrefixQuery(queryStr)
+		prefixQuery.SetField(field)
+		prefixQuery.SetBoost(2.0) // Medium boost for prefix matches
+		fieldQuery.AddShould(prefixQuery)
+
+		// 3. Wildcard query for more flexible matching
+		wildcardQuery := bleve.NewWildcardQuery("*" + queryStr + "*")
+		wildcardQuery.SetField(field)
+		wildcardQuery.SetBoost(1.5) // Lower boost for wildcard matches
+		fieldQuery.AddShould(wildcardQuery)
+
+		// 4. Fuzzy match query (allows 1 character difference)
+		fuzzyQuery := bleve.NewFuzzyQuery(queryStr)
+		fuzzyQuery.SetField(field)
+		fuzzyQuery.SetFuzziness(1)
+		fuzzyQuery.SetBoost(1.0) // Lowest boost for fuzzy matches
+		fieldQuery.AddShould(fuzzyQuery)
+
+		textQuery.AddShould(fieldQuery)
+	}
+
+	if docType != "" {
+		mainQuery.AddMust(textQuery)
+	} else {
+		return textQuery
+	}
+
+	return mainQuery
+}
+
+// convertResults converts Bleve search results to our SearchResult format
+func (si *SearchIndexer) convertResults(searchResult *bleve.SearchResult) []SearchResult {
+	results := make([]SearchResult, 0, len(searchResult.Hits))
+
+	for _, hit := range searchResult.Hits {
+		doc := SearchDocument{
+			ID:      si.getStringField(hit.Fields, "id"),
+			Type:    si.getStringField(hit.Fields, "type"),
+			Name:    si.getStringField(hit.Fields, "name"),
+			Path:    si.getStringField(hit.Fields, "path"),
+			Content: si.getStringField(hit.Fields, "content"),
+		}
+
+		// Parse updated_at if present
+		if updatedAtStr := si.getStringField(hit.Fields, "updated_at"); updatedAtStr != "" {
+			if updatedAt, err := time.Parse(time.RFC3339, updatedAtStr); err == nil {
+				doc.UpdatedAt = updatedAt
+			}
+		}
+
+		results = append(results, SearchResult{
+			Document: doc,
+			Score:    hit.Score,
+		})
+	}
+
+	return results
+}
+
+// getStringField safely gets a string field from search results
+func (si *SearchIndexer) getStringField(fields map[string]interface{}, fieldName string) string {
+	if value, ok := fields[fieldName]; ok {
+		if str, ok := value.(string); ok {
+			return str
+		}
+	}
+	return ""
+}
+
+// DeleteDocument removes a document from the index
+func (si *SearchIndexer) DeleteDocument(docID string) error {
+	si.indexMutex.RLock()
+	defer si.indexMutex.RUnlock()
+
+	if si.index == nil {
+		return fmt.Errorf("search index not initialized")
+	}
+
+	return si.index.Delete(docID)
+}
+
+// RebuildIndex rebuilds the entire search index
+func (si *SearchIndexer) RebuildIndex(ctx context.Context) error {
+	si.indexMutex.Lock()
+	defer si.indexMutex.Unlock()
+
+	// Check if context is cancelled
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	if si.index != nil {
+		si.index.Close()
+	}
+
+	// Check context before removing old index
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	// Remove old index
+	if err := os.RemoveAll(si.indexPath); err != nil {
+		logger.Error("Failed to remove old index:", err)
+	}
+
+	// Check context before creating new index
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	// Create new index
+	var err error
+	si.index, err = bleve.New(si.indexPath, si.createIndexMapping())
+	if err != nil {
+		return fmt.Errorf("failed to create new index: %w", err)
+	}
+
+	logger.Info("Search index rebuilt successfully")
+	return nil
+}
+
+// GetIndexStats returns statistics about the search index
+func (si *SearchIndexer) GetIndexStats() (map[string]interface{}, error) {
+	si.indexMutex.RLock()
+	defer si.indexMutex.RUnlock()
+
+	if si.index == nil {
+		return nil, fmt.Errorf("search index not initialized")
+	}
+
+	docCount, err := si.index.DocCount()
+	if err != nil {
+		return nil, err
+	}
+
+	return map[string]interface{}{
+		"document_count": docCount,
+		"index_path":     si.indexPath,
+	}, nil
+}
+
+// Close closes the search index and triggers cleanup
+func (si *SearchIndexer) Close() error {
+	if si.cancel != nil {
+		si.cancel()
+	}
+
+	si.cleanup()
+	return nil
+}
+
+// Convenience functions for different search types
+
+// SearchSites searches only site configurations
+func SearchSites(ctx context.Context, query string, limit int) ([]SearchResult, error) {
+	return GetSearchIndexer().SearchByType(ctx, query, "site", limit)
+}
+
+// SearchStreams searches only stream configurations
+func SearchStreams(ctx context.Context, query string, limit int) ([]SearchResult, error) {
+	return GetSearchIndexer().SearchByType(ctx, query, "stream", limit)
+}
+
+// SearchConfigs searches only general configurations
+func SearchConfigs(ctx context.Context, query string, limit int) ([]SearchResult, error) {
+	return GetSearchIndexer().SearchByType(ctx, query, "config", limit)
+}
+
+// SearchAll searches across all configuration types
+func SearchAll(ctx context.Context, query string, limit int) ([]SearchResult, error) {
+	return GetSearchIndexer().Search(ctx, query, limit)
+}

+ 1 - 1
internal/config/errors.go

@@ -8,7 +8,7 @@ var (
 	ErrDstFileExists                  = e.New(50007, "destination file: {0} already exists")
 	ErrNginxTestFailed                = e.New(50008, "nginx test failed: {0}")
 	ErrNginxReloadFailed              = e.New(50009, "nginx reload failed: {0}")
-	ErrCannotDeleteProtectedPath      = e.New(50010, "cannot delete protected path: {0}")
+	ErrCannotDeleteProtectedPath      = e.New(50010, "cannot delete protected path")
 	ErrFileNotFound                   = e.New(50011, "file or directory not found: {0}")
 	ErrDeletePathNotUnderNginxConfDir = e.New(50012, "you are not allowed to delete a file outside of the nginx config path")
 )

+ 307 - 0
internal/config/generic_list.go

@@ -0,0 +1,307 @@
+package config
+
+import (
+	"context"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/0xJacky/Nginx-UI/internal/cache"
+	"github.com/0xJacky/Nginx-UI/internal/nginx"
+	"github.com/0xJacky/Nginx-UI/model"
+	"github.com/samber/lo"
+	"github.com/uozi-tech/cosy/logger"
+)
+
+// GenericListOptions represents the options for listing configurations
+type GenericListOptions struct {
+	Search      string
+	Status      string
+	OrderBy     string
+	Sort        string
+	EnvGroupID  uint64
+	IncludeDirs bool // Whether to include directories in the results, default is false (filter out directories)
+}
+
+// ConfigEntity represents a generic configuration entity interface
+type ConfigEntity interface {
+	GetPath() string
+	GetEnvGroupID() uint64
+	GetEnvGroup() *model.EnvGroup
+}
+
+// ConfigPaths holds the directory paths for available and enabled configurations
+type ConfigPaths struct {
+	AvailableDir string
+	EnabledDir   string
+}
+
+// StatusMapBuilder is a function type for building status maps with custom logic
+type StatusMapBuilder func(configFiles, enabledConfig []os.DirEntry) map[string]ConfigStatus
+
+// ConfigBuilder is a function type for building Config objects with custom logic
+type ConfigBuilder func(fileName string, fileInfo os.FileInfo, status ConfigStatus, envGroupID uint64, envGroup *model.EnvGroup) Config
+
+// FilterMatcher is a function type for custom filtering logic
+type FilterMatcher func(fileName string, status ConfigStatus, envGroupID uint64, options *GenericListOptions) bool
+
+// GenericConfigProcessor holds all the custom functions for processing configurations
+type GenericConfigProcessor struct {
+	Paths            ConfigPaths
+	StatusMapBuilder StatusMapBuilder
+	ConfigBuilder    ConfigBuilder
+	FilterMatcher    FilterMatcher
+}
+
+// GetGenericConfigs is a unified function for retrieving and processing configurations
+func GetGenericConfigs[T ConfigEntity](
+	ctx context.Context,
+	options *GenericListOptions,
+	entities []T,
+	processor *GenericConfigProcessor,
+) ([]Config, error) {
+	// Read configuration directories
+	configFiles, err := os.ReadDir(nginx.GetConfPath(processor.Paths.AvailableDir))
+	if err != nil {
+		return nil, err
+	}
+
+	enabledConfig, err := os.ReadDir(nginx.GetConfPath(processor.Paths.EnabledDir))
+	if err != nil {
+		return nil, err
+	}
+
+	// Build configuration status map using custom logic
+	statusMap := processor.StatusMapBuilder(configFiles, enabledConfig)
+
+	// Create entities map for quick lookup
+	entitiesMap := lo.SliceToMap(entities, func(item T) (string, T) {
+		return filepath.Base(item.GetPath()), item
+	})
+
+	// If fuzzy search is enabled, use search index to filter files
+	var searchFilteredFiles []string
+	var hasSearchResults bool
+	if options.Search != "" {
+		logger.Debugf("Starting fuzzy search for query '%s' in directory '%s'", options.Search, processor.Paths.AvailableDir)
+		searchFilteredFiles, err = performFuzzySearch(ctx, options.Search, processor.Paths.AvailableDir)
+		if err != nil {
+			// Fallback to original behavior if search fails
+			logger.Debugf("Fuzzy search failed, falling back to simple string matching: %v", err)
+			searchFilteredFiles = nil
+			hasSearchResults = false
+		} else {
+			hasSearchResults = true
+			logger.Debugf("Fuzzy search completed, found %d matching files", len(searchFilteredFiles))
+		}
+	}
+
+	// Process and filter configurations
+	var configs []Config
+	for _, file := range configFiles {
+		if file.IsDir() && !options.IncludeDirs {
+			continue
+		}
+
+		fileInfo, err := file.Info()
+		if err != nil {
+			continue
+		}
+
+		fileName := file.Name()
+		status := statusMap[fileName]
+
+		// Get environment group info from database
+		var envGroupID uint64
+		var envGroup *model.EnvGroup
+		if entity, ok := entitiesMap[fileName]; ok {
+			envGroupID = entity.GetEnvGroupID()
+			envGroup = entity.GetEnvGroup()
+		}
+
+		// Apply filters using custom logic
+		if !processor.FilterMatcher(fileName, status, envGroupID, options) {
+			continue
+		}
+
+		// Apply fuzzy search filter if enabled
+		if hasSearchResults {
+			// Check if the file is in the search results
+			if !contains(searchFilteredFiles, fileName) {
+				// For directories, perform simple string matching since they are not indexed
+				if fileInfo.IsDir() {
+					// Only include directories if IncludeDirs is true and they match the search
+					if options.IncludeDirs {
+						// Perform case-insensitive substring matching for directories
+						if !strings.Contains(strings.ToLower(fileName), strings.ToLower(options.Search)) {
+							continue
+						}
+					} else {
+						// Directories should have been filtered out earlier, but skip just in case
+						continue
+					}
+				} else {
+					// For regular files, if they're not in the search results, skip them
+					continue
+				}
+			}
+		} else if options.Search != "" {
+			// Fallback to simple string matching if search index failed or returned no results
+			if !strings.Contains(strings.ToLower(fileName), strings.ToLower(options.Search)) {
+				continue
+			}
+		}
+
+		// Build configuration using custom logic
+		configs = append(configs, processor.ConfigBuilder(fileName, fileInfo, status, envGroupID, envGroup))
+	}
+
+	// Sort and return
+	sortedConfigs := Sort(options.OrderBy, options.Sort, configs)
+
+	// Debug log the final results
+	if options.Search != "" {
+		logger.Debugf("Final search results for query '%s': returning %d configs out of %d total files",
+			options.Search, len(sortedConfigs), len(configFiles))
+	}
+
+	return sortedConfigs, nil
+}
+
+// performFuzzySearch performs fuzzy search using the search index
+func performFuzzySearch(ctx context.Context, query, availableDir string) ([]string, error) {
+
+	// Determine search type based on directory
+	var searchType string
+	switch {
+	case strings.Contains(availableDir, "sites"):
+		searchType = "site"
+	case strings.Contains(availableDir, "streams"):
+		searchType = "stream"
+	default:
+		searchType = "config"
+	}
+
+	// Perform search with the determined type
+	var results []cache.SearchResult
+	var err error
+
+	// Use a larger limit to ensure we get all matching results
+	// Since we're filtering by filename, we want to get all possible matches
+	// Set a reasonable upper limit to prevent performance issues
+	searchLimit := 5000
+
+	if searchType != "" {
+		results, err = cache.GetSearchIndexer().SearchByType(ctx, query, searchType, searchLimit)
+	} else {
+		results, err = cache.GetSearchIndexer().Search(ctx, query, searchLimit)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	// Extract filenames from search results
+	var filenames []string
+	for _, result := range results {
+		filename := filepath.Base(result.Document.Path)
+		filenames = append(filenames, filename)
+	}
+
+	// Debug log the search results
+	logger.Debugf("Search engine returned files for query '%s' in dir '%s': %v (total: %d)",
+		query, availableDir, filenames, len(filenames))
+
+	return filenames, nil
+}
+
+// contains checks if a string slice contains a specific string
+func contains(slice []string, item string) bool {
+	for _, s := range slice {
+		if s == item {
+			return true
+		}
+	}
+	return false
+}
+
+// DefaultStatusMapBuilder provides the basic status map building logic
+func DefaultStatusMapBuilder(configFiles, enabledConfig []os.DirEntry) map[string]ConfigStatus {
+	statusMap := make(map[string]ConfigStatus)
+
+	// Initialize all as disabled
+	for _, file := range configFiles {
+		statusMap[file.Name()] = StatusDisabled
+	}
+
+	// Update enabled status
+	for _, enabledFile := range enabledConfig {
+		name := nginx.GetConfNameBySymlinkName(enabledFile.Name())
+		statusMap[name] = StatusEnabled
+	}
+
+	return statusMap
+}
+
+// SiteStatusMapBuilder provides status map building logic with maintenance support
+func SiteStatusMapBuilder(maintenanceSuffix string) StatusMapBuilder {
+	return func(configFiles, enabledConfig []os.DirEntry) map[string]ConfigStatus {
+		statusMap := make(map[string]ConfigStatus)
+
+		// Initialize all as disabled
+		for _, file := range configFiles {
+			statusMap[file.Name()] = StatusDisabled
+		}
+
+		// Update enabled and maintenance status
+		for _, enabledSite := range enabledConfig {
+			name := enabledSite.Name()
+			if strings.HasSuffix(name, maintenanceSuffix) {
+				originalName := strings.TrimSuffix(name, maintenanceSuffix)
+				statusMap[originalName] = StatusMaintenance
+			} else {
+				statusMap[nginx.GetConfNameBySymlinkName(name)] = StatusEnabled
+			}
+		}
+
+		return statusMap
+	}
+}
+
+// DefaultFilterMatcher provides the standard filtering logic without name search
+func DefaultFilterMatcher(fileName string, status ConfigStatus, envGroupID uint64, options *GenericListOptions) bool {
+	// Remove name filtering as it's now handled by fuzzy search
+	if options.Status != "" && status != ConfigStatus(options.Status) {
+		return false
+	}
+	if options.EnvGroupID != 0 && envGroupID != options.EnvGroupID {
+		return false
+	}
+	return true
+}
+
+// FuzzyFilterMatcher provides filtering logic with fuzzy search support
+func FuzzyFilterMatcher(fileName string, status ConfigStatus, envGroupID uint64, options *GenericListOptions) bool {
+	// Name filtering is handled by fuzzy search in GetGenericConfigs
+	// Only apply other filters here
+	if options.Status != "" && status != ConfigStatus(options.Status) {
+		return false
+	}
+	if options.EnvGroupID != 0 && envGroupID != options.EnvGroupID {
+		return false
+	}
+	return true
+}
+
+// DefaultConfigBuilder provides basic config building logic
+func DefaultConfigBuilder(fileName string, fileInfo os.FileInfo, status ConfigStatus, envGroupID uint64, envGroup *model.EnvGroup) Config {
+	return Config{
+		Name:       fileName,
+		ModifiedAt: fileInfo.ModTime(),
+		Size:       fileInfo.Size(),
+		IsDir:      fileInfo.IsDir(),
+		Status:     status,
+		EnvGroupID: envGroupID,
+		EnvGroup:   envGroup,
+	}
+}

+ 71 - 0
internal/site/list.go

@@ -0,0 +1,71 @@
+package site
+
+import (
+	"context"
+	"os"
+
+	"github.com/0xJacky/Nginx-UI/internal/config"
+	"github.com/0xJacky/Nginx-UI/model"
+)
+
+// ListOptions represents the options for listing sites
+type ListOptions struct {
+	Search     string
+	Status     string
+	OrderBy    string
+	Sort       string
+	EnvGroupID uint64
+}
+
+// GetSiteConfigs retrieves and processes site configurations with database integration
+func GetSiteConfigs(ctx context.Context, options *ListOptions, sites []*model.Site) ([]config.Config, error) {
+	// Convert to generic options
+	genericOptions := &config.GenericListOptions{
+		Search:      options.Search,
+		Status:      options.Status,
+		OrderBy:     options.OrderBy,
+		Sort:        options.Sort,
+		EnvGroupID:  options.EnvGroupID,
+		IncludeDirs: false, // Filter out directories for site configurations
+	}
+
+	// Create processor with site-specific logic
+	processor := &config.GenericConfigProcessor{
+		Paths: config.ConfigPaths{
+			AvailableDir: "sites-available",
+			EnabledDir:   "sites-enabled",
+		},
+		StatusMapBuilder: config.SiteStatusMapBuilder(MaintenanceSuffix),
+		ConfigBuilder:    buildConfig,
+		FilterMatcher:    config.DefaultFilterMatcher,
+	}
+
+	return config.GetGenericConfigs(ctx, genericOptions, sites, processor)
+}
+
+// buildConfig creates a config.Config from file information with site-specific data
+func buildConfig(fileName string, fileInfo os.FileInfo, status config.ConfigStatus, envGroupID uint64, envGroup *model.EnvGroup) config.Config {
+	indexedSite := GetIndexedSite(fileName)
+
+	// Convert proxy targets
+	proxyTargets := make([]config.ProxyTarget, len(indexedSite.ProxyTargets))
+	for i, target := range indexedSite.ProxyTargets {
+		proxyTargets[i] = config.ProxyTarget{
+			Host: target.Host,
+			Port: target.Port,
+			Type: target.Type,
+		}
+	}
+
+	return config.Config{
+		Name:         fileName,
+		ModifiedAt:   fileInfo.ModTime(),
+		Size:         fileInfo.Size(),
+		IsDir:        fileInfo.IsDir(),
+		Status:       status,
+		EnvGroupID:   envGroupID,
+		EnvGroup:     envGroup,
+		Urls:         indexedSite.Urls,
+		ProxyTargets: proxyTargets,
+	}
+}

+ 101 - 0
internal/stream/get.go

@@ -0,0 +1,101 @@
+package stream
+
+import (
+	"os"
+
+	"github.com/0xJacky/Nginx-UI/internal/config"
+	"github.com/0xJacky/Nginx-UI/internal/nginx"
+	"github.com/0xJacky/Nginx-UI/model"
+	"github.com/0xJacky/Nginx-UI/query"
+)
+
+// StreamInfo represents stream information
+type StreamInfo struct {
+	Path       string
+	Status     config.ConfigStatus
+	Model      *model.Stream
+	FileInfo   os.FileInfo
+	RawContent string
+	NgxConfig  *nginx.NgxConfig
+}
+
+// GetStreamInfo retrieves comprehensive information about a stream
+func GetStreamInfo(name string) (*StreamInfo, error) {
+	// Get the absolute path to the stream configuration file
+	path := nginx.GetConfPath("streams-available", name)
+	fileInfo, err := os.Stat(path)
+	if os.IsNotExist(err) {
+		return nil, ErrStreamNotFound
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// Check if the stream is enabled
+	status := config.StatusEnabled
+	if _, err := os.Stat(nginx.GetConfPath("streams-enabled", name)); os.IsNotExist(err) {
+		status = config.StatusDisabled
+	}
+
+	// Retrieve or create stream model from database
+	s := query.Stream
+	streamModel, err := s.Where(s.Path.Eq(path)).FirstOrCreate()
+	if err != nil {
+		return nil, err
+	}
+
+	// Read raw content
+	rawContent, err := os.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+
+	info := &StreamInfo{
+		Path:       path,
+		Status:     status,
+		Model:      streamModel,
+		FileInfo:   fileInfo,
+		RawContent: string(rawContent),
+	}
+
+	// Parse configuration if not in advanced mode
+	if !streamModel.Advanced {
+		nginxConfig, err := nginx.ParseNgxConfig(path)
+		if err != nil {
+			return nil, err
+		}
+		info.NgxConfig = nginxConfig
+	}
+
+	return info, nil
+}
+
+// SaveStreamConfig saves stream configuration with database update
+func SaveStreamConfig(name, content string, envGroupID uint64, syncNodeIDs []uint64, overwrite bool, postAction string) error {
+	// Get stream from database or create if not exists
+	path := nginx.GetConfPath("streams-available", name)
+	s := query.Stream
+	streamModel, err := s.Where(s.Path.Eq(path)).FirstOrCreate()
+	if err != nil {
+		return err
+	}
+
+	// Update Node Group ID if provided
+	if envGroupID > 0 {
+		streamModel.EnvGroupID = envGroupID
+	}
+
+	// Update synchronization node IDs if provided
+	if syncNodeIDs != nil {
+		streamModel.SyncNodeIDs = syncNodeIDs
+	}
+
+	// Save the updated stream model to database
+	_, err = s.Where(s.ID.Eq(streamModel.ID)).Updates(streamModel)
+	if err != nil {
+		return err
+	}
+
+	// Save the stream configuration file
+	return Save(name, content, overwrite, syncNodeIDs, postAction)
+}

+ 70 - 0
internal/stream/list.go

@@ -0,0 +1,70 @@
+package stream
+
+import (
+	"context"
+	"os"
+
+	"github.com/0xJacky/Nginx-UI/internal/config"
+	"github.com/0xJacky/Nginx-UI/model"
+)
+
+// ListOptions represents the options for listing streams
+type ListOptions struct {
+	Search     string
+	Status     string
+	OrderBy    string
+	Sort       string
+	EnvGroupID uint64
+}
+
+// GetStreamConfigs retrieves and processes stream configurations with database integration
+func GetStreamConfigs(ctx context.Context, options *ListOptions, streams []*model.Stream) ([]config.Config, error) {
+	// Convert to generic options
+	genericOptions := &config.GenericListOptions{
+		Search:      options.Search,
+		Status:      options.Status,
+		OrderBy:     options.OrderBy,
+		Sort:        options.Sort,
+		EnvGroupID:  options.EnvGroupID,
+		IncludeDirs: false, // Filter out directories for stream configurations
+	}
+
+	// Create processor with stream-specific logic
+	processor := &config.GenericConfigProcessor{
+		Paths: config.ConfigPaths{
+			AvailableDir: "streams-available",
+			EnabledDir:   "streams-enabled",
+		},
+		StatusMapBuilder: config.DefaultStatusMapBuilder,
+		ConfigBuilder:    buildConfig,
+		FilterMatcher:    config.DefaultFilterMatcher,
+	}
+
+	return config.GetGenericConfigs(ctx, genericOptions, streams, processor)
+}
+
+// buildConfig creates a config.Config from file information with stream-specific data
+func buildConfig(fileName string, fileInfo os.FileInfo, status config.ConfigStatus, envGroupID uint64, envGroup *model.EnvGroup) config.Config {
+	indexedStream := GetIndexedStream(fileName)
+
+	// Convert proxy targets
+	proxyTargets := make([]config.ProxyTarget, len(indexedStream.ProxyTargets))
+	for i, target := range indexedStream.ProxyTargets {
+		proxyTargets[i] = config.ProxyTarget{
+			Host: target.Host,
+			Port: target.Port,
+			Type: target.Type,
+		}
+	}
+
+	return config.Config{
+		Name:         fileName,
+		ModifiedAt:   fileInfo.ModTime(),
+		Size:         fileInfo.Size(),
+		IsDir:        fileInfo.IsDir(),
+		Status:       status,
+		EnvGroupID:   envGroupID,
+		EnvGroup:     envGroup,
+		ProxyTargets: proxyTargets,
+	}
+}

+ 15 - 0
model/site.go

@@ -8,3 +8,18 @@ type Site struct {
 	EnvGroup    *EnvGroup `json:"env_group,omitempty"`
 	SyncNodeIDs []uint64  `json:"sync_node_ids" gorm:"serializer:json"`
 }
+
+// GetPath implements ConfigEntity interface
+func (s *Site) GetPath() string {
+	return s.Path
+}
+
+// GetEnvGroupID implements ConfigEntity interface
+func (s *Site) GetEnvGroupID() uint64 {
+	return s.EnvGroupID
+}
+
+// GetEnvGroup implements ConfigEntity interface
+func (s *Site) GetEnvGroup() *EnvGroup {
+	return s.EnvGroup
+}

+ 15 - 0
model/stream.go

@@ -8,3 +8,18 @@ type Stream struct {
 	EnvGroup    *EnvGroup `json:"env_group,omitempty"`
 	SyncNodeIDs []uint64  `json:"sync_node_ids" gorm:"serializer:json"`
 }
+
+// GetPath implements ConfigEntity interface
+func (s *Stream) GetPath() string {
+	return s.Path
+}
+
+// GetEnvGroupID implements ConfigEntity interface
+func (s *Stream) GetEnvGroupID() uint64 {
+	return s.EnvGroupID
+}
+
+// GetEnvGroup implements ConfigEntity interface
+func (s *Stream) GetEnvGroup() *EnvGroup {
+	return s.EnvGroup
+}