shard_manager.go 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. package indexer
  2. import (
  3. "crypto/md5"
  4. "fmt"
  5. "hash/fnv"
  6. "os"
  7. "path/filepath"
  8. "sync"
  9. "github.com/blevesearch/bleve/v2"
  10. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  11. )
  12. // DefaultShardManager implements sharding logic for distributed indexing
  13. type DefaultShardManager struct {
  14. config *Config
  15. shards map[int]bleve.Index
  16. shardPaths map[int]string
  17. mu sync.RWMutex
  18. hashFunc ShardHashFunc
  19. }
  20. // ShardHashFunc defines how to determine which shard a document belongs to
  21. type ShardHashFunc func(key string, shardCount int) int
  22. // NewDefaultShardManager creates a new shard manager
  23. func NewDefaultShardManager(config *Config) *DefaultShardManager {
  24. return &DefaultShardManager{
  25. config: config,
  26. shards: make(map[int]bleve.Index),
  27. shardPaths: make(map[int]string),
  28. hashFunc: DefaultHashFunc,
  29. }
  30. }
  31. // Initialize sets up all shards
  32. func (sm *DefaultShardManager) Initialize() error {
  33. sm.mu.Lock()
  34. defer sm.mu.Unlock()
  35. for i := 0; i < sm.config.ShardCount; i++ {
  36. if err := sm.createShardLocked(i); err != nil {
  37. return fmt.Errorf("failed to create shard %d: %w", i, err)
  38. }
  39. }
  40. return nil
  41. }
  42. // GetShard returns the appropriate shard for a given key
  43. func (sm *DefaultShardManager) GetShard(key string) (bleve.Index, int, error) {
  44. shardID := sm.hashFunc(key, sm.config.ShardCount)
  45. index, err := sm.GetShardByID(shardID)
  46. return index, shardID, err
  47. }
  48. // GetShardByID returns the shard with the given ID
  49. func (sm *DefaultShardManager) GetShardByID(id int) (bleve.Index, error) {
  50. sm.mu.RLock()
  51. shard, exists := sm.shards[id]
  52. sm.mu.RUnlock()
  53. if !exists {
  54. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, id)
  55. }
  56. return shard, nil
  57. }
  58. // GetAllShards returns all active shards
  59. func (sm *DefaultShardManager) GetAllShards() []bleve.Index {
  60. sm.mu.RLock()
  61. defer sm.mu.RUnlock()
  62. shards := make([]bleve.Index, 0, len(sm.shards))
  63. for i := 0; i < sm.config.ShardCount; i++ {
  64. if shard, exists := sm.shards[i]; exists {
  65. shards = append(shards, shard)
  66. }
  67. }
  68. return shards
  69. }
  70. // GetShardStats returns statistics for all shards
  71. func (sm *DefaultShardManager) GetShardStats() []*ShardInfo {
  72. sm.mu.RLock()
  73. defer sm.mu.RUnlock()
  74. stats := make([]*ShardInfo, 0, len(sm.shards))
  75. for id, shard := range sm.shards {
  76. if shard == nil {
  77. continue
  78. }
  79. docCount, _ := shard.DocCount()
  80. var size int64
  81. if path, exists := sm.shardPaths[id]; exists {
  82. if stat, err := os.Stat(path); err == nil {
  83. size = stat.Size()
  84. }
  85. }
  86. stats = append(stats, &ShardInfo{
  87. ID: id,
  88. Path: sm.shardPaths[id],
  89. DocumentCount: docCount,
  90. Size: size,
  91. LastUpdated: 0, // TODO: Track last update time
  92. })
  93. }
  94. return stats
  95. }
  96. // CreateShard creates a new shard with the given ID
  97. func (sm *DefaultShardManager) CreateShard(id int, path string) error {
  98. sm.mu.Lock()
  99. defer sm.mu.Unlock()
  100. return sm.createShardLocked(id)
  101. }
  102. // createShardLocked creates a shard while holding the lock
  103. func (sm *DefaultShardManager) createShardLocked(id int) error {
  104. // Use efficient string building for shard path
  105. shardNameBuf := make([]byte, 0, 16)
  106. shardNameBuf = append(shardNameBuf, "shard_"...)
  107. shardNameBuf = utils.AppendInt(shardNameBuf, id)
  108. shardName := utils.BytesToStringUnsafe(shardNameBuf)
  109. shardPath := filepath.Join(sm.config.IndexPath, shardName)
  110. // Ensure directory exists
  111. if err := os.MkdirAll(shardPath, 0755); err != nil {
  112. return fmt.Errorf("failed to create shard directory: %w", err)
  113. }
  114. // Create or open the shard index
  115. var shard bleve.Index
  116. var err error
  117. if _, statErr := os.Stat(filepath.Join(shardPath, "index_meta.json")); os.IsNotExist(statErr) {
  118. // Create new index with optimized disk space configuration
  119. mapping := CreateLogIndexMapping()
  120. // Optimize FloorSegmentFileSize for better disk space usage
  121. // FloorSegmentFileSize controls the minimum size of segment files.
  122. // Larger values reduce file fragmentation and improve I/O efficiency,
  123. // which can save disk space by reducing metadata overhead.
  124. // 5MB provides a good balance between space efficiency and performance.
  125. kvConfig := map[string]interface{}{
  126. "scorchMergePlanOptions": map[string]interface{}{
  127. "FloorSegmentFileSize": 5000000, // 5MB minimum segment file size
  128. },
  129. }
  130. shard, err = bleve.NewUsing(shardPath, mapping, bleve.Config.DefaultIndexType, bleve.Config.DefaultMemKVStore, kvConfig)
  131. if err != nil {
  132. return fmt.Errorf("failed to create new shard index: %w", err)
  133. }
  134. } else {
  135. // Open existing index
  136. shard, err = bleve.Open(shardPath)
  137. if err != nil {
  138. return fmt.Errorf("failed to open existing shard index: %w", err)
  139. }
  140. }
  141. sm.shards[id] = shard
  142. sm.shardPaths[id] = shardPath
  143. return nil
  144. }
  145. // CloseShard closes a shard and removes it from the manager
  146. func (sm *DefaultShardManager) CloseShard(id int) error {
  147. sm.mu.Lock()
  148. defer sm.mu.Unlock()
  149. return sm.closeShardLocked(id)
  150. }
  151. // closeShardLocked closes a shard while already holding the lock
  152. func (sm *DefaultShardManager) closeShardLocked(id int) error {
  153. shard, exists := sm.shards[id]
  154. if !exists {
  155. return fmt.Errorf("%s: %d", ErrShardNotFound, id)
  156. }
  157. if err := shard.Close(); err != nil {
  158. return fmt.Errorf("failed to close shard %d: %w", id, err)
  159. }
  160. delete(sm.shards, id)
  161. delete(sm.shardPaths, id)
  162. return nil
  163. }
  164. // OptimizeShard optimizes a specific shard
  165. func (sm *DefaultShardManager) OptimizeShard(id int) error {
  166. shard, err := sm.GetShardByID(id)
  167. if err != nil {
  168. return err
  169. }
  170. // Bleve doesn't have a direct optimize method, but we can trigger
  171. // internal optimizations by forcing a merge
  172. return shard.SetInternal([]byte("_optimize"), []byte("trigger"))
  173. }
  174. // Close closes all shards
  175. func (sm *DefaultShardManager) Close() error {
  176. sm.mu.Lock()
  177. defer sm.mu.Unlock()
  178. var errs []error
  179. for id, shard := range sm.shards {
  180. if err := shard.Close(); err != nil {
  181. errs = append(errs, fmt.Errorf("failed to close shard %d: %w", id, err))
  182. }
  183. }
  184. sm.shards = make(map[int]bleve.Index)
  185. sm.shardPaths = make(map[int]string)
  186. if len(errs) > 0 {
  187. return fmt.Errorf("errors closing shards: %v", errs)
  188. }
  189. return nil
  190. }
  191. // Hash functions for shard selection
  192. // DefaultHashFunc uses FNV-1a hash for shard distribution
  193. func DefaultHashFunc(key string, shardCount int) int {
  194. h := fnv.New32a()
  195. h.Write([]byte(key))
  196. return int(h.Sum32()) % shardCount
  197. }
  198. // MD5HashFunc uses MD5 hash for shard distribution
  199. func MD5HashFunc(key string, shardCount int) int {
  200. h := md5.Sum([]byte(key))
  201. // Use first 4 bytes as uint32
  202. val := uint32(h[0])<<24 | uint32(h[1])<<16 | uint32(h[2])<<8 | uint32(h[3])
  203. return int(val) % shardCount
  204. }
  205. // IPHashFunc optimized for IP address distribution
  206. func IPHashFunc(key string, shardCount int) int {
  207. // For IP addresses, use the last octet for better distribution
  208. h := fnv.New32a()
  209. // If key looks like an IP, hash the last part more heavily
  210. if len(key) > 7 && key[len(key)-4:] != key[:4] {
  211. // Weight the end of the string more (likely the varying part of IP)
  212. for i, b := range []byte(key) {
  213. if i >= len(key)/2 {
  214. h.Write([]byte{b, b}) // Double weight for later characters
  215. } else {
  216. h.Write([]byte{b})
  217. }
  218. }
  219. } else {
  220. h.Write([]byte(key))
  221. }
  222. return int(h.Sum32()) % shardCount
  223. }
  224. // TimestampHashFunc distributes based on timestamp ranges
  225. func TimestampHashFunc(timestamp int64, shardCount int) int {
  226. // Distribute by hour to keep related time periods together
  227. hourBucket := timestamp / 3600 // Unix timestamp to hour bucket
  228. result := int(hourBucket) % shardCount
  229. if result < 0 {
  230. result = -result
  231. }
  232. return result
  233. }
  234. // ConsistentHashFunc provides consistent hashing for better distribution
  235. func ConsistentHashFunc(key string, shardCount int) int {
  236. // Simple consistent hashing - can be enhanced with hash ring
  237. h1 := fnv.New64a()
  238. h1.Write([]byte(key))
  239. hash1 := h1.Sum64()
  240. h2 := fnv.New64()
  241. h2.Write([]byte(key + "_salt"))
  242. hash2 := h2.Sum64()
  243. // Combine hashes for better distribution
  244. combined := hash1 ^ hash2
  245. result := int(combined) % shardCount
  246. if result < 0 {
  247. result = -result
  248. }
  249. return result
  250. }
  251. // SetHashFunc allows changing the hash function
  252. func (sm *DefaultShardManager) SetHashFunc(fn ShardHashFunc) {
  253. sm.mu.Lock()
  254. defer sm.mu.Unlock()
  255. sm.hashFunc = fn
  256. }
  257. // GetShardDistribution returns the current distribution of documents across shards
  258. func (sm *DefaultShardManager) GetShardDistribution() map[int]uint64 {
  259. stats := sm.GetShardStats()
  260. distribution := make(map[int]uint64)
  261. for _, stat := range stats {
  262. distribution[stat.ID] = stat.DocumentCount
  263. }
  264. return distribution
  265. }
  266. // HealthCheck verifies all shards are accessible
  267. func (sm *DefaultShardManager) HealthCheck() error {
  268. sm.mu.RLock()
  269. defer sm.mu.RUnlock()
  270. for id, shard := range sm.shards {
  271. if shard == nil {
  272. return fmt.Errorf("shard %d is nil", id)
  273. }
  274. // Try a simple operation to verify accessibility
  275. if _, err := shard.DocCount(); err != nil {
  276. return fmt.Errorf("shard %d health check failed: %w", id, err)
  277. }
  278. }
  279. return nil
  280. }
  281. // Destroy closes all shards and deletes their data from disk.
  282. func (sm *DefaultShardManager) Destroy() error {
  283. sm.mu.Lock()
  284. defer sm.mu.Unlock()
  285. // First, close all shards
  286. var errs []error
  287. for id, shard := range sm.shards {
  288. if err := shard.Close(); err != nil {
  289. errs = append(errs, fmt.Errorf("failed to close shard %d for deletion: %w", id, err))
  290. }
  291. }
  292. // Then, delete all shard directories
  293. for _, path := range sm.shardPaths {
  294. if err := os.RemoveAll(path); err != nil {
  295. errs = append(errs, fmt.Errorf("failed to delete shard directory %s: %w", path, err))
  296. }
  297. }
  298. // Reset internal state
  299. sm.shards = make(map[int]bleve.Index)
  300. sm.shardPaths = make(map[int]string)
  301. if len(errs) > 0 {
  302. return fmt.Errorf("errors occurred while destroying shards: %v", errs)
  303. }
  304. return nil
  305. }