parallel_indexer_worker_test.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. package indexer
  2. import (
  3. "context"
  4. "sync"
  5. "testing"
  6. "time"
  7. "github.com/blevesearch/bleve/v2"
  8. )
  9. // Mock shard manager for parallel indexer tests
  10. type mockShardManagerForWorkerTest struct{}
  11. func (m *mockShardManagerForWorkerTest) GetShard(key string) (bleve.Index, int, error) {
  12. return nil, 0, nil
  13. }
  14. func (m *mockShardManagerForWorkerTest) GetShardForDocument(mainLogPath string, key string) (bleve.Index, int, error) {
  15. return m.GetShard(key)
  16. }
  17. func (m *mockShardManagerForWorkerTest) GetShardByID(id int) (bleve.Index, error) {
  18. return nil, nil
  19. }
  20. func (m *mockShardManagerForWorkerTest) GetAllShards() []bleve.Index {
  21. return []bleve.Index{}
  22. }
  23. func (m *mockShardManagerForWorkerTest) GetShardCount() int {
  24. return 1
  25. }
  26. func (m *mockShardManagerForWorkerTest) Initialize() error {
  27. return nil
  28. }
  29. func (m *mockShardManagerForWorkerTest) GetShardStats() []*ShardInfo {
  30. return []*ShardInfo{}
  31. }
  32. func (m *mockShardManagerForWorkerTest) CreateShard(id int, path string) error {
  33. return nil
  34. }
  35. func (m *mockShardManagerForWorkerTest) Close() error {
  36. return nil
  37. }
  38. func (m *mockShardManagerForWorkerTest) CloseShard(id int) error {
  39. return nil
  40. }
  41. func (m *mockShardManagerForWorkerTest) HealthCheck() error {
  42. return nil
  43. }
  44. func (m *mockShardManagerForWorkerTest) OptimizeShard(id int) error {
  45. return nil
  46. }
  47. // Test helper to create parallel indexer for worker tests
  48. func createTestParallelIndexer(workerCount int) *ParallelIndexer {
  49. config := &Config{
  50. WorkerCount: workerCount,
  51. BatchSize: 100,
  52. MaxQueueSize: 1000,
  53. }
  54. shardManager := &mockShardManagerForWorkerTest{}
  55. return NewParallelIndexer(config, shardManager)
  56. }
  57. func TestParallelIndexer_handleWorkerCountChange_Increase(t *testing.T) {
  58. pi := createTestParallelIndexer(4)
  59. // Start the indexer
  60. ctx := context.Background()
  61. err := pi.Start(ctx)
  62. if err != nil {
  63. t.Fatalf("Failed to start parallel indexer: %v", err)
  64. }
  65. defer pi.Stop()
  66. // Allow time for initialization
  67. time.Sleep(100 * time.Millisecond)
  68. initialWorkerCount := len(pi.workers)
  69. if initialWorkerCount != 4 {
  70. t.Fatalf("Expected 4 initial workers, got %d", initialWorkerCount)
  71. }
  72. // Test increasing worker count
  73. pi.handleWorkerCountChange(4, 6)
  74. // Verify worker count increased
  75. newWorkerCount := len(pi.workers)
  76. if newWorkerCount != 6 {
  77. t.Errorf("Expected 6 workers after increase, got %d", newWorkerCount)
  78. }
  79. // Verify config was updated
  80. if pi.config.WorkerCount != 6 {
  81. t.Errorf("Expected config worker count to be 6, got %d", pi.config.WorkerCount)
  82. }
  83. // Verify stats were updated
  84. if len(pi.stats.WorkerStats) != 6 {
  85. t.Errorf("Expected 6 worker stats, got %d", len(pi.stats.WorkerStats))
  86. }
  87. }
  88. func TestParallelIndexer_handleWorkerCountChange_Decrease(t *testing.T) {
  89. pi := createTestParallelIndexer(6)
  90. // Start the indexer
  91. ctx := context.Background()
  92. err := pi.Start(ctx)
  93. if err != nil {
  94. t.Fatalf("Failed to start parallel indexer: %v", err)
  95. }
  96. defer pi.Stop()
  97. // Allow time for initialization
  98. time.Sleep(100 * time.Millisecond)
  99. initialWorkerCount := len(pi.workers)
  100. if initialWorkerCount != 6 {
  101. t.Fatalf("Expected 6 initial workers, got %d", initialWorkerCount)
  102. }
  103. // Test decreasing worker count
  104. pi.handleWorkerCountChange(6, 4)
  105. // Verify worker count decreased
  106. newWorkerCount := len(pi.workers)
  107. if newWorkerCount != 4 {
  108. t.Errorf("Expected 4 workers after decrease, got %d", newWorkerCount)
  109. }
  110. // Verify config was updated
  111. if pi.config.WorkerCount != 4 {
  112. t.Errorf("Expected config worker count to be 4, got %d", pi.config.WorkerCount)
  113. }
  114. // Verify stats were updated
  115. if len(pi.stats.WorkerStats) != 4 {
  116. t.Errorf("Expected 4 worker stats, got %d", len(pi.stats.WorkerStats))
  117. }
  118. }
  119. func TestParallelIndexer_handleWorkerCountChange_NoChange(t *testing.T) {
  120. pi := createTestParallelIndexer(4)
  121. // Start the indexer
  122. ctx := context.Background()
  123. err := pi.Start(ctx)
  124. if err != nil {
  125. t.Fatalf("Failed to start parallel indexer: %v", err)
  126. }
  127. defer pi.Stop()
  128. // Allow time for initialization
  129. time.Sleep(100 * time.Millisecond)
  130. initialWorkerCount := len(pi.workers)
  131. // Test no change scenario
  132. pi.handleWorkerCountChange(4, 4)
  133. // Verify worker count didn't change
  134. newWorkerCount := len(pi.workers)
  135. if newWorkerCount != initialWorkerCount {
  136. t.Errorf("Expected worker count to remain %d, got %d", initialWorkerCount, newWorkerCount)
  137. }
  138. }
  139. func TestParallelIndexer_handleWorkerCountChange_NotRunning(t *testing.T) {
  140. pi := createTestParallelIndexer(4)
  141. // Don't start the indexer - it should be in stopped state
  142. initialWorkerCount := len(pi.workers)
  143. // Test worker count change when not running
  144. pi.handleWorkerCountChange(4, 6)
  145. // Verify no change occurred
  146. newWorkerCount := len(pi.workers)
  147. if newWorkerCount != initialWorkerCount {
  148. t.Errorf("Expected no worker change when not running, initial: %d, new: %d",
  149. initialWorkerCount, newWorkerCount)
  150. }
  151. // Verify config wasn't updated
  152. if pi.config.WorkerCount != 4 {
  153. t.Errorf("Expected config worker count to remain 4, got %d", pi.config.WorkerCount)
  154. }
  155. }
  156. func TestParallelIndexer_addWorkers(t *testing.T) {
  157. pi := createTestParallelIndexer(2)
  158. // Start the indexer
  159. ctx := context.Background()
  160. err := pi.Start(ctx)
  161. if err != nil {
  162. t.Fatalf("Failed to start parallel indexer: %v", err)
  163. }
  164. defer pi.Stop()
  165. // Allow time for initialization
  166. time.Sleep(100 * time.Millisecond)
  167. initialCount := len(pi.workers)
  168. if initialCount != 2 {
  169. t.Fatalf("Expected 2 initial workers, got %d", initialCount)
  170. }
  171. // Add 3 workers
  172. pi.addWorkers(3)
  173. // Verify workers were added
  174. newCount := len(pi.workers)
  175. if newCount != 5 {
  176. t.Errorf("Expected 5 workers after adding 3, got %d", newCount)
  177. }
  178. // Verify worker IDs are sequential
  179. for i, worker := range pi.workers {
  180. if worker.id != i {
  181. t.Errorf("Expected worker %d to have ID %d, got %d", i, i, worker.id)
  182. }
  183. }
  184. // Verify stats were updated
  185. if len(pi.stats.WorkerStats) != 5 {
  186. t.Errorf("Expected 5 worker stats, got %d", len(pi.stats.WorkerStats))
  187. }
  188. }
  189. func TestParallelIndexer_removeWorkers(t *testing.T) {
  190. pi := createTestParallelIndexer(5)
  191. // Start the indexer
  192. ctx := context.Background()
  193. err := pi.Start(ctx)
  194. if err != nil {
  195. t.Fatalf("Failed to start parallel indexer: %v", err)
  196. }
  197. defer pi.Stop()
  198. // Allow time for initialization
  199. time.Sleep(100 * time.Millisecond)
  200. initialCount := len(pi.workers)
  201. if initialCount != 5 {
  202. t.Fatalf("Expected 5 initial workers, got %d", initialCount)
  203. }
  204. // Remove 2 workers
  205. pi.removeWorkers(2)
  206. // Verify workers were removed
  207. newCount := len(pi.workers)
  208. if newCount != 3 {
  209. t.Errorf("Expected 3 workers after removing 2, got %d", newCount)
  210. }
  211. // Verify stats were updated
  212. if len(pi.stats.WorkerStats) != 3 {
  213. t.Errorf("Expected 3 worker stats, got %d", len(pi.stats.WorkerStats))
  214. }
  215. }
  216. func TestParallelIndexer_removeWorkers_KeepMinimum(t *testing.T) {
  217. pi := createTestParallelIndexer(2)
  218. // Start the indexer
  219. ctx := context.Background()
  220. err := pi.Start(ctx)
  221. if err != nil {
  222. t.Fatalf("Failed to start parallel indexer: %v", err)
  223. }
  224. defer pi.Stop()
  225. // Allow time for initialization
  226. time.Sleep(100 * time.Millisecond)
  227. initialCount := len(pi.workers)
  228. if initialCount != 2 {
  229. t.Fatalf("Expected 2 initial workers, got %d", initialCount)
  230. }
  231. // Try to remove all workers (should keep at least one)
  232. pi.removeWorkers(2)
  233. // Verify at least one worker remains
  234. newCount := len(pi.workers)
  235. if newCount != 1 {
  236. t.Errorf("Expected 1 worker to remain after trying to remove all, got %d", newCount)
  237. }
  238. // Verify stats were updated
  239. if len(pi.stats.WorkerStats) != 1 {
  240. t.Errorf("Expected 1 worker stat, got %d", len(pi.stats.WorkerStats))
  241. }
  242. }
  243. func TestParallelIndexer_AdaptiveOptimizerIntegration(t *testing.T) {
  244. pi := createTestParallelIndexer(4)
  245. // Enable optimization
  246. pi.optimizationEnabled = true
  247. pi.adaptiveOptimizer = NewAdaptiveOptimizer(pi.config)
  248. // Start the indexer
  249. ctx := context.Background()
  250. err := pi.Start(ctx)
  251. if err != nil {
  252. t.Fatalf("Failed to start parallel indexer: %v", err)
  253. }
  254. defer pi.Stop()
  255. // Allow time for initialization
  256. time.Sleep(100 * time.Millisecond)
  257. // Verify adaptive optimizer callback was set
  258. if pi.adaptiveOptimizer.onWorkerCountChange == nil {
  259. t.Error("Expected adaptive optimizer callback to be set")
  260. }
  261. // Simulate worker count change from adaptive optimizer
  262. initialWorkerCount := len(pi.workers)
  263. // Trigger callback (simulate adaptive optimizer decision)
  264. if pi.adaptiveOptimizer.onWorkerCountChange != nil {
  265. pi.adaptiveOptimizer.onWorkerCountChange(4, 6)
  266. }
  267. // Verify worker count changed
  268. newWorkerCount := len(pi.workers)
  269. if newWorkerCount == initialWorkerCount {
  270. t.Error("Expected worker count to change from adaptive optimizer callback")
  271. }
  272. }
  273. func TestParallelIndexer_ConcurrentWorkerAdjustments(t *testing.T) {
  274. pi := createTestParallelIndexer(4)
  275. // Start the indexer
  276. ctx := context.Background()
  277. err := pi.Start(ctx)
  278. if err != nil {
  279. t.Fatalf("Failed to start parallel indexer: %v", err)
  280. }
  281. defer pi.Stop()
  282. // Allow time for initialization
  283. time.Sleep(100 * time.Millisecond)
  284. var wg sync.WaitGroup
  285. // Simulate concurrent worker adjustments
  286. for i := 0; i < 10; i++ {
  287. wg.Add(1)
  288. go func(iteration int) {
  289. defer wg.Done()
  290. // Alternate between increasing and decreasing
  291. if iteration%2 == 0 {
  292. pi.handleWorkerCountChange(pi.config.WorkerCount, pi.config.WorkerCount+1)
  293. } else {
  294. if pi.config.WorkerCount > 2 {
  295. pi.handleWorkerCountChange(pi.config.WorkerCount, pi.config.WorkerCount-1)
  296. }
  297. }
  298. }(i)
  299. }
  300. wg.Wait()
  301. // Verify final state is consistent
  302. workerCount := len(pi.workers)
  303. configCount := pi.config.WorkerCount
  304. statsCount := len(pi.stats.WorkerStats)
  305. if workerCount != configCount {
  306. t.Errorf("Worker count (%d) doesn't match config count (%d)", workerCount, configCount)
  307. }
  308. if workerCount != statsCount {
  309. t.Errorf("Worker count (%d) doesn't match stats count (%d)", workerCount, statsCount)
  310. }
  311. // Verify worker IDs are sequential and unique
  312. workerIDs := make(map[int]bool)
  313. for i, worker := range pi.workers {
  314. if worker.id != i {
  315. t.Errorf("Expected worker at index %d to have ID %d, got %d", i, i, worker.id)
  316. }
  317. if workerIDs[worker.id] {
  318. t.Errorf("Duplicate worker ID found: %d", worker.id)
  319. }
  320. workerIDs[worker.id] = true
  321. }
  322. }
  323. func TestParallelIndexer_WorkerStatsConsistency(t *testing.T) {
  324. pi := createTestParallelIndexer(3)
  325. // Start the indexer
  326. ctx := context.Background()
  327. err := pi.Start(ctx)
  328. if err != nil {
  329. t.Fatalf("Failed to start parallel indexer: %v", err)
  330. }
  331. defer pi.Stop()
  332. // Allow time for initialization
  333. time.Sleep(100 * time.Millisecond)
  334. // Test adding workers
  335. pi.addWorkers(2)
  336. // Verify stats consistency
  337. workerCount := len(pi.workers)
  338. statsCount := len(pi.stats.WorkerStats)
  339. if workerCount != statsCount {
  340. t.Errorf("Worker count (%d) doesn't match stats count (%d)", workerCount, statsCount)
  341. }
  342. // Verify each worker has corresponding stats
  343. for i, worker := range pi.workers {
  344. if pi.stats.WorkerStats[i].ID != worker.id {
  345. t.Errorf("Worker %d ID (%d) doesn't match stats ID (%d)",
  346. i, worker.id, pi.stats.WorkerStats[i].ID)
  347. }
  348. if worker.stats != pi.stats.WorkerStats[i] {
  349. t.Errorf("Worker %d stats pointer doesn't match global stats", i)
  350. }
  351. }
  352. // Test removing workers
  353. pi.removeWorkers(1)
  354. // Verify stats consistency after removal
  355. workerCount = len(pi.workers)
  356. statsCount = len(pi.stats.WorkerStats)
  357. if workerCount != statsCount {
  358. t.Errorf("After removal, worker count (%d) doesn't match stats count (%d)",
  359. workerCount, statsCount)
  360. }
  361. }