1
0

shard_manager.go 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. package indexer
  2. import (
  3. "crypto/md5"
  4. "fmt"
  5. "hash/fnv"
  6. "os"
  7. "path/filepath"
  8. "sync"
  9. "github.com/blevesearch/bleve/v2"
  10. "github.com/0xJacky/Nginx-UI/internal/nginx_log/utils"
  11. )
  12. // DefaultShardManager implements sharding logic for distributed indexing
  13. type DefaultShardManager struct {
  14. config *Config
  15. shards map[int]bleve.Index
  16. shardPaths map[int]string
  17. mu sync.RWMutex
  18. hashFunc ShardHashFunc
  19. }
  20. // ShardHashFunc defines how to determine which shard a document belongs to
  21. type ShardHashFunc func(key string, shardCount int) int
  22. // NewDefaultShardManager creates a new shard manager
  23. func NewDefaultShardManager(config *Config) *DefaultShardManager {
  24. return &DefaultShardManager{
  25. config: config,
  26. shards: make(map[int]bleve.Index),
  27. shardPaths: make(map[int]string),
  28. hashFunc: DefaultHashFunc,
  29. }
  30. }
  31. // Initialize sets up all shards
  32. func (sm *DefaultShardManager) Initialize() error {
  33. sm.mu.Lock()
  34. defer sm.mu.Unlock()
  35. for i := 0; i < sm.config.ShardCount; i++ {
  36. if err := sm.createShardLocked(i); err != nil {
  37. return fmt.Errorf("failed to create shard %d: %w", i, err)
  38. }
  39. }
  40. return nil
  41. }
  42. // GetShard returns the appropriate shard for a given key
  43. func (sm *DefaultShardManager) GetShard(key string) (bleve.Index, int, error) {
  44. shardID := sm.hashFunc(key, sm.config.ShardCount)
  45. index, err := sm.GetShardByID(shardID)
  46. return index, shardID, err
  47. }
  48. // GetShardByID returns the shard with the given ID
  49. func (sm *DefaultShardManager) GetShardByID(id int) (bleve.Index, error) {
  50. sm.mu.RLock()
  51. shard, exists := sm.shards[id]
  52. sm.mu.RUnlock()
  53. if !exists {
  54. return nil, fmt.Errorf("%s: %d", ErrShardNotFound, id)
  55. }
  56. return shard, nil
  57. }
  58. // GetAllShards returns all active shards
  59. func (sm *DefaultShardManager) GetAllShards() []bleve.Index {
  60. sm.mu.RLock()
  61. defer sm.mu.RUnlock()
  62. shards := make([]bleve.Index, 0, len(sm.shards))
  63. for i := 0; i < sm.config.ShardCount; i++ {
  64. if shard, exists := sm.shards[i]; exists {
  65. shards = append(shards, shard)
  66. }
  67. }
  68. return shards
  69. }
  70. // GetShardStats returns statistics for all shards
  71. func (sm *DefaultShardManager) GetShardStats() []*ShardInfo {
  72. sm.mu.RLock()
  73. defer sm.mu.RUnlock()
  74. stats := make([]*ShardInfo, 0, len(sm.shards))
  75. for id, shard := range sm.shards {
  76. if shard == nil {
  77. continue
  78. }
  79. docCount, _ := shard.DocCount()
  80. var size int64
  81. if path, exists := sm.shardPaths[id]; exists {
  82. if stat, err := os.Stat(path); err == nil {
  83. size = stat.Size()
  84. }
  85. }
  86. stats = append(stats, &ShardInfo{
  87. ID: id,
  88. Path: sm.shardPaths[id],
  89. DocumentCount: docCount,
  90. Size: size,
  91. LastUpdated: 0, // TODO: Track last update time
  92. })
  93. }
  94. return stats
  95. }
  96. // CreateShard creates a new shard with the given ID
  97. func (sm *DefaultShardManager) CreateShard(id int, path string) error {
  98. sm.mu.Lock()
  99. defer sm.mu.Unlock()
  100. return sm.createShardLocked(id)
  101. }
  102. // createShardLocked creates a shard while holding the lock
  103. func (sm *DefaultShardManager) createShardLocked(id int) error {
  104. // Use efficient string building for shard path
  105. shardNameBuf := make([]byte, 0, 16)
  106. shardNameBuf = append(shardNameBuf, "shard_"...)
  107. shardNameBuf = utils.AppendInt(shardNameBuf, id)
  108. shardName := utils.BytesToStringUnsafe(shardNameBuf)
  109. shardPath := filepath.Join(sm.config.IndexPath, shardName)
  110. // Ensure directory exists
  111. if err := os.MkdirAll(shardPath, 0755); err != nil {
  112. return fmt.Errorf("failed to create shard directory: %w", err)
  113. }
  114. // Create or open the shard index
  115. var shard bleve.Index
  116. var err error
  117. if _, statErr := os.Stat(filepath.Join(shardPath, "index_meta.json")); os.IsNotExist(statErr) {
  118. // Create new index with optimized disk space configuration
  119. mapping := CreateLogIndexMapping()
  120. // Optimize FloorSegmentFileSize for better disk space usage
  121. // FloorSegmentFileSize controls the minimum size of segment files.
  122. // Larger values reduce file fragmentation and improve I/O efficiency,
  123. // which can save disk space by reducing metadata overhead.
  124. // 5MB provides a good balance between space efficiency and performance.
  125. kvConfig := map[string]interface{}{
  126. "scorchMergePlanOptions": map[string]interface{}{
  127. "FloorSegmentFileSize": 5000000, // 5MB minimum segment file size
  128. },
  129. }
  130. shard, err = bleve.NewUsing(shardPath, mapping, bleve.Config.DefaultIndexType, bleve.Config.DefaultMemKVStore, kvConfig)
  131. if err != nil {
  132. return fmt.Errorf("failed to create new shard index: %w", err)
  133. }
  134. } else {
  135. // Open existing index
  136. shard, err = bleve.Open(shardPath)
  137. if err != nil {
  138. return fmt.Errorf("failed to open existing shard index: %w", err)
  139. }
  140. }
  141. sm.shards[id] = shard
  142. sm.shardPaths[id] = shardPath
  143. return nil
  144. }
  145. // CloseShard closes a shard and removes it from the manager
  146. func (sm *DefaultShardManager) CloseShard(id int) error {
  147. sm.mu.Lock()
  148. defer sm.mu.Unlock()
  149. shard, exists := sm.shards[id]
  150. if !exists {
  151. return fmt.Errorf("%s: %d", ErrShardNotFound, id)
  152. }
  153. if err := shard.Close(); err != nil {
  154. return fmt.Errorf("failed to close shard %d: %w", id, err)
  155. }
  156. delete(sm.shards, id)
  157. delete(sm.shardPaths, id)
  158. return nil
  159. }
  160. // OptimizeShard optimizes a specific shard
  161. func (sm *DefaultShardManager) OptimizeShard(id int) error {
  162. shard, err := sm.GetShardByID(id)
  163. if err != nil {
  164. return err
  165. }
  166. // Bleve doesn't have a direct optimize method, but we can trigger
  167. // internal optimizations by forcing a merge
  168. return shard.SetInternal([]byte("_optimize"), []byte("trigger"))
  169. }
  170. // Close closes all shards
  171. func (sm *DefaultShardManager) Close() error {
  172. sm.mu.Lock()
  173. defer sm.mu.Unlock()
  174. var errs []error
  175. for id, shard := range sm.shards {
  176. if err := shard.Close(); err != nil {
  177. errs = append(errs, fmt.Errorf("failed to close shard %d: %w", id, err))
  178. }
  179. }
  180. sm.shards = make(map[int]bleve.Index)
  181. sm.shardPaths = make(map[int]string)
  182. if len(errs) > 0 {
  183. return fmt.Errorf("errors closing shards: %v", errs)
  184. }
  185. return nil
  186. }
  187. // Hash functions for shard selection
  188. // DefaultHashFunc uses FNV-1a hash for shard distribution
  189. func DefaultHashFunc(key string, shardCount int) int {
  190. h := fnv.New32a()
  191. h.Write([]byte(key))
  192. return int(h.Sum32()) % shardCount
  193. }
  194. // MD5HashFunc uses MD5 hash for shard distribution
  195. func MD5HashFunc(key string, shardCount int) int {
  196. h := md5.Sum([]byte(key))
  197. // Use first 4 bytes as uint32
  198. val := uint32(h[0])<<24 | uint32(h[1])<<16 | uint32(h[2])<<8 | uint32(h[3])
  199. return int(val) % shardCount
  200. }
  201. // IPHashFunc optimized for IP address distribution
  202. func IPHashFunc(key string, shardCount int) int {
  203. // For IP addresses, use the last octet for better distribution
  204. h := fnv.New32a()
  205. // If key looks like an IP, hash the last part more heavily
  206. if len(key) > 7 && key[len(key)-4:] != key[:4] {
  207. // Weight the end of the string more (likely the varying part of IP)
  208. for i, b := range []byte(key) {
  209. if i >= len(key)/2 {
  210. h.Write([]byte{b, b}) // Double weight for later characters
  211. } else {
  212. h.Write([]byte{b})
  213. }
  214. }
  215. } else {
  216. h.Write([]byte(key))
  217. }
  218. return int(h.Sum32()) % shardCount
  219. }
  220. // TimestampHashFunc distributes based on timestamp ranges
  221. func TimestampHashFunc(timestamp int64, shardCount int) int {
  222. // Distribute by hour to keep related time periods together
  223. hourBucket := timestamp / 3600 // Unix timestamp to hour bucket
  224. result := int(hourBucket) % shardCount
  225. if result < 0 {
  226. result = -result
  227. }
  228. return result
  229. }
  230. // ConsistentHashFunc provides consistent hashing for better distribution
  231. func ConsistentHashFunc(key string, shardCount int) int {
  232. // Simple consistent hashing - can be enhanced with hash ring
  233. h1 := fnv.New64a()
  234. h1.Write([]byte(key))
  235. hash1 := h1.Sum64()
  236. h2 := fnv.New64()
  237. h2.Write([]byte(key + "_salt"))
  238. hash2 := h2.Sum64()
  239. // Combine hashes for better distribution
  240. combined := hash1 ^ hash2
  241. result := int(combined) % shardCount
  242. if result < 0 {
  243. result = -result
  244. }
  245. return result
  246. }
  247. // SetHashFunc allows changing the hash function
  248. func (sm *DefaultShardManager) SetHashFunc(fn ShardHashFunc) {
  249. sm.mu.Lock()
  250. defer sm.mu.Unlock()
  251. sm.hashFunc = fn
  252. }
  253. // GetShardDistribution returns the current distribution of documents across shards
  254. func (sm *DefaultShardManager) GetShardDistribution() map[int]uint64 {
  255. stats := sm.GetShardStats()
  256. distribution := make(map[int]uint64)
  257. for _, stat := range stats {
  258. distribution[stat.ID] = stat.DocumentCount
  259. }
  260. return distribution
  261. }
  262. // HealthCheck verifies all shards are accessible
  263. func (sm *DefaultShardManager) HealthCheck() error {
  264. sm.mu.RLock()
  265. defer sm.mu.RUnlock()
  266. for id, shard := range sm.shards {
  267. if shard == nil {
  268. return fmt.Errorf("shard %d is nil", id)
  269. }
  270. // Try a simple operation to verify accessibility
  271. if _, err := shard.DocCount(); err != nil {
  272. return fmt.Errorf("shard %d health check failed: %w", id, err)
  273. }
  274. }
  275. return nil
  276. }
  277. // Destroy closes all shards and deletes their data from disk.
  278. func (sm *DefaultShardManager) Destroy() error {
  279. sm.mu.Lock()
  280. defer sm.mu.Unlock()
  281. // First, close all shards
  282. var errs []error
  283. for id, shard := range sm.shards {
  284. if err := shard.Close(); err != nil {
  285. errs = append(errs, fmt.Errorf("failed to close shard %d for deletion: %w", id, err))
  286. }
  287. }
  288. // Then, delete all shard directories
  289. for _, path := range sm.shardPaths {
  290. if err := os.RemoveAll(path); err != nil {
  291. errs = append(errs, fmt.Errorf("failed to delete shard directory %s: %w", path, err))
  292. }
  293. }
  294. // Reset internal state
  295. sm.shards = make(map[int]bleve.Index)
  296. sm.shardPaths = make(map[int]string)
  297. if len(errs) > 0 {
  298. return fmt.Errorf("errors occurred while destroying shards: %v", errs)
  299. }
  300. return nil
  301. }