|
1 月之前 | |
---|---|---|
.. | ||
README.md | 1 月之前 | |
cardinality_counter.go | 1 月之前 | |
cardinality_counter_test.go | 1 月之前 | |
distributed_searcher.go | 1 月之前 | |
facet_aggregator.go | 1 月之前 | |
facet_aggregator_test.go | 1 月之前 | |
facet_total_fix_test.go | 1 月之前 | |
hot_swap_test.go | 1 月之前 | |
optimized_cache.go | 1 月之前 | |
query_builder.go | 1 月之前 | |
searcher_test.go | 1 月之前 | |
simple_test.go | 1 月之前 | |
types.go | 1 月之前 |
The searcher package provides high-performance, distributed search capabilities for NGINX logs with advanced query building, faceted search, caching, and real-time analytics.
searcher/
├── types.go # Core types, interfaces, and query structures
├── distributed_searcher.go # Main distributed search implementation
├── query_builder.go # Advanced query construction utilities
├── facet_aggregator.go # Faceted search and aggregation engine
├── optimized_cache.go # Multi-level caching system
├── performance_optimizations.go # Memory and performance management
├── simple_test.go # Unit tests and benchmarks
└── README.md # This documentation
import "github.com/0xJacky/Nginx-UI/internal/nginx_log/searcher"
// Create searcher with default configuration
searcher := searcher.NewDistributedSearcher(nil, shardManager)
// Simple text search
query := &searcher.SearchQuery{
Query: "GET /api",
StartTime: time.Now().Add(-24 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
Limit: 100,
}
results, err := searcher.Search(context.Background(), query)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Found %d results in %v\n", results.Total, results.Duration)
for _, hit := range results.Hits {
entry := hit.Fields
fmt.Printf("%s %s %s %d\n", entry.IP, entry.Method, entry.Path, entry.Status)
}
// Create complex query with multiple conditions
queryBuilder := searcher.NewQueryBuilder()
// Add text search
queryBuilder.AddTextQuery("path", "/api/users")
// Add range filters
queryBuilder.AddRangeQuery("status", 200, 299) // Success status codes
queryBuilder.AddRangeQuery("timestamp", startTime, endTime) // Time range
queryBuilder.AddRangeQuery("request_time", 0, 1.0) // Fast requests only
// Add term filters
queryBuilder.AddTermQuery("method", "GET")
queryBuilder.AddTermQuery("region_code", "US")
// Add IP range filter
queryBuilder.AddIPRangeQuery("ip", "192.168.1.0/24")
// Build final query
query := queryBuilder.Build()
query.Limit = 1000
query.SortBy = []searcher.SortField{
{Field: "timestamp", Descending: true},
{Field: "request_time", Descending: true},
}
results, err := searcher.Search(context.Background(), query)
// Search with faceted aggregations
query := &searcher.SearchQuery{
Query: "*", // Match all
StartTime: time.Now().Add(-24 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
Facets: []searcher.FacetRequest{
{
Name: "status_codes",
Field: "status",
Size: 10,
},
{
Name: "top_ips",
Field: "ip",
Size: 20,
},
{
Name: "browsers",
Field: "browser",
Size: 15,
},
{
Name: "countries",
Field: "region_code",
Size: 50,
},
},
}
results, err := searcher.Search(context.Background(), query)
if err != nil {
log.Fatal(err)
}
// Process facet results
for facetName, facet := range results.Facets {
fmt.Printf("\n%s:\n", facetName)
for _, bucket := range facet.Buckets {
fmt.Printf(" %s: %d (%.2f%%)\n",
bucket.Key, bucket.Count,
float64(bucket.Count)/float64(results.Total)*100)
}
}
type SearcherConfig struct {
// Cache configuration
EnableCache bool `json:"enable_cache"` // Enable result caching (default: true)
CacheSize int `json:"cache_size"` // Cache entries limit (default: 10000)
CacheTTL time.Duration `json:"cache_ttl"` // Cache TTL (default: 5 minutes)
// Performance settings
MaxConcurrentSearches int `json:"max_concurrent"` // Concurrent search limit (default: 100)
SearchTimeout time.Duration `json:"search_timeout"` // Search timeout (default: 30s)
MaxResultSize int `json:"max_result_size"` // Maximum results per query (default: 10000)
// Aggregation settings
MaxFacetSize int `json:"max_facet_size"` // Maximum facet results (default: 1000)
EnableFacetCache bool `json:"enable_facet_cache"` // Cache facet results (default: true)
// Memory management
MemoryLimit int64 `json:"memory_limit"` // Memory usage limit (default: 512MB)
GCInterval time.Duration `json:"gc_interval"` // Garbage collection interval (default: 5m)
// Query optimization
OptimizeQueries bool `json:"optimize_queries"` // Enable query optimization (default: true)
QueryAnalysisEnabled bool `json:"query_analysis"` // Enable query analysis (default: true)
}
func DefaultSearcherConfig() *SearcherConfig {
return &SearcherConfig{
EnableCache: true,
CacheSize: 10000,
CacheTTL: 5 * time.Minute,
MaxConcurrentSearches: 100,
SearchTimeout: 30 * time.Second,
MaxResultSize: 10000,
MaxFacetSize: 1000,
EnableFacetCache: true,
MemoryLimit: 512 * 1024 * 1024, // 512MB
GCInterval: 5 * time.Minute,
OptimizeQueries: true,
QueryAnalysisEnabled: true,
}
}
The main search engine with multi-shard support:
// Create with custom configuration
config := &SearcherConfig{
MaxConcurrentSearches: 200, // Higher concurrency
SearchTimeout: 60 * time.Second, // Longer timeout
CacheSize: 50000, // Larger cache
}
searcher := NewDistributedSearcher(config, shardManager)
// Execute search across all shards
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
results, err := searcher.Search(ctx, query)
if err != nil {
log.Printf("Search failed: %v", err)
} else {
fmt.Printf("Search completed: %d results from %d shards in %v\n",
results.Total, len(results.ShardResults), results.Duration)
}
Advanced query construction with multiple conditions:
// Create query builder
qb := NewQueryBuilder()
// Text search with wildcards
qb.AddWildcardQuery("path", "/api/users/*")
qb.AddPrefixQuery("user_agent", "Mozilla/5.0")
// Numeric ranges
qb.AddRangeQuery("status", 400, 599) // Error status codes
qb.AddRangeQuery("bytes_sent", 1024, nil) // Large responses (>1KB)
// Boolean combinations
qb.AddMustQuery(func(sub *QueryBuilder) {
sub.AddTermQuery("method", "POST")
sub.AddTermQuery("region_code", "US")
})
qb.AddShouldQuery(func(sub *QueryBuilder) {
sub.AddTermQuery("browser", "Chrome")
sub.AddTermQuery("browser", "Firefox")
})
qb.AddMustNotQuery(func(sub *QueryBuilder) {
sub.AddTermQuery("ip", "127.0.0.1")
sub.AddPrefixQuery("path", "/health")
})
// Build and execute
query := qb.Build()
query.Limit = 500
query.Offset = 0
results, err := searcher.Search(context.Background(), query)
Real-time aggregations for analytics:
// Define complex facet configuration
query := &SearchQuery{
Query: "status:[400 TO 599]", // Error requests only
StartTime: time.Now().Add(-1 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
Facets: []FacetRequest{
// Error distribution by status code
{
Name: "error_codes",
Field: "status",
Size: 20,
},
// Geographic distribution of errors
{
Name: "error_regions",
Field: "region_code",
Size: 50,
},
// Top error-generating IPs
{
Name: "error_ips",
Field: "ip",
Size: 100,
},
// Error distribution over time (hourly buckets)
{
Name: "error_timeline",
Field: "timestamp",
Size: 24,
Interval: 3600, // 1 hour intervals
},
// Most problematic endpoints
{
Name: "error_paths",
Field: "path_exact",
Size: 50,
},
},
}
results, err := searcher.Search(context.Background(), query)
// Process time-series facet
if timeline, exists := results.Facets["error_timeline"]; exists {
fmt.Println("Error timeline (last 24 hours):")
for _, bucket := range timeline.Buckets {
timestamp := time.Unix(int64(bucket.KeyAsNumber), 0)
fmt.Printf(" %s: %d errors\n",
timestamp.Format("2006-01-02 15:04"), bucket.Count)
}
}
// Analyze top error sources
if errorIPs, exists := results.Facets["error_ips"]; exists {
fmt.Println("\nTop error-generating IPs:")
for i, bucket := range errorIPs.Buckets {
if i >= 10 { break } // Top 10
percentage := float64(bucket.Count) / float64(results.Total) * 100
fmt.Printf(" %s: %d errors (%.2f%%)\n",
bucket.Key, bucket.Count, percentage)
}
}
Multi-level caching system for improved performance:
// Cache configuration
config := &SearcherConfig{
EnableCache: true,
CacheSize: 20000,
CacheTTL: 10 * time.Minute,
EnableFacetCache: true,
}
searcher := NewDistributedSearcher(config, shardManager)
// First search - cache miss
start := time.Now()
results1, _ := searcher.Search(context.Background(), query)
duration1 := time.Since(start)
// Second search - cache hit
start = time.Now()
results2, _ := searcher.Search(context.Background(), query)
duration2 := time.Since(start)
fmt.Printf("First search: %v\n", duration1) // e.g., 250ms
fmt.Printf("Second search: %v\n", duration2) // e.g., 2ms
fmt.Printf("Speedup: %.1fx\n", float64(duration1)/float64(duration2))
// Cache statistics
stats := searcher.GetCacheStats()
fmt.Printf("Cache hit rate: %.2f%%\n", stats.HitRate*100)
fmt.Printf("Cache size: %d/%d entries\n", stats.Size, stats.Capacity)
Memory management and query optimization:
// Enable performance monitoring
searcher := NewDistributedSearcher(&SearcherConfig{
QueryAnalysisEnabled: true,
MemoryLimit: 1024 * 1024 * 1024, // 1GB
GCInterval: 2 * time.Minute,
}, shardManager)
// Monitor performance
perfStats := searcher.GetPerformanceStats()
fmt.Printf("Search performance:\n")
fmt.Printf(" Average latency: %v\n", perfStats.AverageLatency)
fmt.Printf(" Queries per second: %.2f\n", perfStats.QPS)
fmt.Printf(" Memory usage: %.2f MB\n",
float64(perfStats.MemoryUsage)/(1024*1024))
// Query optimization
optimizedQuery := searcher.OptimizeQuery(query)
fmt.Printf("Original query complexity: %d\n", query.Complexity())
fmt.Printf("Optimized query complexity: %d\n", optimizedQuery.Complexity())
// Memory cleanup
if perfStats.MemoryUsage > config.MemoryLimit*0.8 {
searcher.TriggerGC()
}
Based on comprehensive benchmarking on Apple M2 Pro:
Operation | Performance | Memory Usage | Notes |
---|---|---|---|
Simple search (1 shard) | ~5.2ms | 128KB | Single term query |
Complex search (4 shards) | ~18ms | 512KB | Multiple conditions |
Faceted search (10 facets) | ~25ms | 768KB | With aggregations |
Cache hit (simple) | ~45µs | 2KB | In-memory lookup |
Cache hit (complex) | ~120µs | 8KB | Complex result deserialization |
Query building | ~2.1µs | 448B | QueryBuilder operations |
Result aggregation | ~850µs | 64KB | Cross-shard merging |
Scenario | Queries/sec | Memory Peak | CPU Usage | Cache Hit Rate |
---|---|---|---|---|
Simple queries | ~4,500 | 256MB | 45% | 85% |
Complex queries | ~1,200 | 512MB | 75% | 70% |
Faceted queries | ~800 | 768MB | 85% | 60% |
Mixed workload | ~2,800 | 640MB | 65% | 78% |
Concurrency Configuration
// High-throughput environments
config.MaxConcurrentSearches = runtime.NumCPU() * 50
// Memory-constrained environments
config.MaxConcurrentSearches = runtime.NumCPU() * 10
// Latency-sensitive applications
config.MaxConcurrentSearches = runtime.NumCPU() * 20
Cache Optimization
// High cache hit rate scenarios
config.CacheSize = 50000
config.CacheTTL = 15 * time.Minute
// Memory-limited environments
config.CacheSize = 5000
config.CacheTTL = 2 * time.Minute
// Real-time analytics
config.CacheSize = 20000
config.CacheTTL = 30 * time.Second
Search Timeout Tuning
// Real-time dashboards
config.SearchTimeout = 5 * time.Second
// Batch analytics
config.SearchTimeout = 60 * time.Second
// Interactive exploration
config.SearchTimeout = 15 * time.Second
// Simple text search
query := &SearchQuery{Query: "error"}
// Phrase search
query := &SearchQuery{Query: "\"internal server error\""}
// Wildcard search
query := &SearchQuery{Query: "api/users/*"}
// Field-specific search
query := &SearchQuery{Query: "path:/api/users"}
// Boolean operators
query := &SearchQuery{Query: "error AND status:500"}
query := &SearchQuery{Query: "GET OR POST"}
query := &SearchQuery{Query: "error NOT status:404"}
// Numeric ranges
qb.AddRangeQuery("status", 200, 299) // HTTP 2xx
qb.AddRangeQuery("bytes_sent", 1024, nil) // Large responses
qb.AddRangeQuery("request_time", nil, 1.0) // Fast requests
// Time ranges
qb.AddRangeQuery("timestamp", startTime, endTime)
// Open-ended ranges
qb.AddRangeQuery("status", 400, nil) // 400+
qb.AddRangeQuery("request_time", nil, 0.1) // <= 100ms
// Exact term matching
qb.AddTermQuery("method", "GET")
qb.AddTermQuery("status", "404")
qb.AddTermQuery("browser", "Chrome")
// Multiple terms (OR)
qb.AddTermsQuery("status", []string{"200", "201", "202"})
qb.AddTermsQuery("method", []string{"GET", "HEAD"})
// CIDR notation
qb.AddIPRangeQuery("ip", "192.168.1.0/24")
qb.AddIPRangeQuery("ip", "10.0.0.0/8")
// IP range
qb.AddIPRangeQuery("ip", "192.168.1.1-192.168.1.100")
// Single IP
qb.AddTermQuery("ip", "192.168.1.1")
// Country-based filtering
qb.AddTermQuery("region_code", "US")
qb.AddTermsQuery("region_code", []string{"US", "CA", "MX"})
// Regional analysis
qb.AddTermQuery("province", "California")
qb.AddTermQuery("city", "San Francisco")
qb := NewQueryBuilder()
// Must conditions (AND)
qb.AddMustQuery(func(sub *QueryBuilder) {
sub.AddRangeQuery("timestamp", startTime, endTime)
sub.AddTermQuery("method", "POST")
sub.AddRangeQuery("status", 200, 299)
})
// Should conditions (OR)
qb.AddShouldQuery(func(sub *QueryBuilder) {
sub.AddTermQuery("browser", "Chrome")
sub.AddTermQuery("browser", "Safari")
sub.AddTermQuery("browser", "Firefox")
})
// Must not conditions (NOT)
qb.AddMustNotQuery(func(sub *QueryBuilder) {
sub.AddTermQuery("path", "/health")
sub.AddTermQuery("path", "/metrics")
sub.AddPrefixQuery("user_agent", "bot")
})
query := qb.Build()
// Hourly request distribution
query := &SearchQuery{
Query: "*",
StartTime: time.Now().Add(-24 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
Facets: []FacetRequest{
{
Name: "hourly_requests",
Field: "timestamp",
Size: 24,
Interval: 3600, // 1 hour buckets
},
},
}
results, _ := searcher.Search(context.Background(), query)
// Plot time series
timeSeries := results.Facets["hourly_requests"]
for _, bucket := range timeSeries.Buckets {
hour := time.Unix(int64(bucket.KeyAsNumber), 0)
fmt.Printf("%s: %d requests\n", hour.Format("15:04"), bucket.Count)
}
// Comprehensive error analysis
errorQuery := &SearchQuery{
Query: "status:[400 TO 599]",
StartTime: time.Now().Add(-1 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
Facets: []FacetRequest{
{Name: "error_codes", Field: "status", Size: 20},
{Name: "error_paths", Field: "path_exact", Size: 50},
{Name: "error_ips", Field: "ip", Size: 100},
{Name: "error_browsers", Field: "browser", Size: 20},
},
}
results, _ := searcher.Search(context.Background(), errorQuery)
// Error rate calculation
totalQuery := &SearchQuery{
StartTime: errorQuery.StartTime,
EndTime: errorQuery.EndTime,
Limit: 0, // Count only
}
totalResults, _ := searcher.Search(context.Background(), totalQuery)
errorRate := float64(results.Total) / float64(totalResults.Total) * 100
fmt.Printf("Error rate: %.2f%% (%d/%d)\n",
errorRate, results.Total, totalResults.Total)
// Top error endpoints
errorPaths := results.Facets["error_paths"]
fmt.Println("Most problematic endpoints:")
for i, bucket := range errorPaths.Buckets {
if i >= 10 { break }
fmt.Printf(" %s: %d errors\n", bucket.Key, bucket.Count)
}
// Request performance analysis
perfQuery := &SearchQuery{
Query: "*",
StartTime: time.Now().Add(-1 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
SortBy: []SortField{
{Field: "request_time", Descending: true},
},
Limit: 100, // Top 100 slowest requests
}
slowRequests, _ := searcher.Search(context.Background(), perfQuery)
fmt.Println("Slowest requests:")
for i, hit := range slowRequests.Hits {
entry := hit.Fields
fmt.Printf("%d. %s %s - %.3fs\n",
i+1, entry.Method, entry.Path, entry.RequestTime)
}
// Performance distribution
perfDistQuery := &SearchQuery{
Query: "*",
StartTime: perfQuery.StartTime,
EndTime: perfQuery.EndTime,
Facets: []FacetRequest{
{
Name: "perf_buckets",
Field: "request_time",
Size: 10,
Ranges: []FacetRange{
{From: 0, To: 0.1}, // < 100ms
{From: 0.1, To: 0.5}, // 100ms - 500ms
{From: 0.5, To: 1.0}, // 500ms - 1s
{From: 1.0, To: 5.0}, // 1s - 5s
{From: 5.0, To: nil}, // > 5s
},
},
},
}
perfResults, _ := searcher.Search(context.Background(), perfDistQuery)
perfBuckets := perfResults.Facets["perf_buckets"]
fmt.Println("Performance distribution:")
labels := []string{"<100ms", "100ms-500ms", "500ms-1s", "1s-5s", ">5s"}
for i, bucket := range perfBuckets.Buckets {
percentage := float64(bucket.Count) / float64(perfResults.Total) * 100
fmt.Printf(" %s: %d requests (%.2f%%)\n",
labels[i], bucket.Count, percentage)
}
// Geographic request distribution
geoQuery := &SearchQuery{
Query: "*",
StartTime: time.Now().Add(-24 * time.Hour).Unix(),
EndTime: time.Now().Unix(),
Facets: []FacetRequest{
{Name: "countries", Field: "region_code", Size: 50},
{Name: "states", Field: "province", Size: 50},
{Name: "cities", Field: "city", Size: 100},
},
}
geoResults, _ := searcher.Search(context.Background(), geoQuery)
// Top countries
countries := geoResults.Facets["countries"]
fmt.Println("Top countries by request volume:")
for i, bucket := range countries.Buckets {
if i >= 10 { break }
percentage := float64(bucket.Count) / float64(geoResults.Total) * 100
fmt.Printf(" %s: %d requests (%.2f%%)\n",
bucket.Key, bucket.Count, percentage)
}
// US state breakdown for US traffic
usQuery := &SearchQuery{
Query: "region_code:US",
StartTime: geoQuery.StartTime,
EndTime: geoQuery.EndTime,
Facets: []FacetRequest{
{Name: "us_states", Field: "province", Size: 50},
},
}
usResults, _ := searcher.Search(context.Background(), usQuery)
usStates := usResults.Facets["us_states"]
fmt.Println("US traffic by state:")
for i, bucket := range usStates.Buckets {
if i >= 10 { break }
percentage := float64(bucket.Count) / float64(usResults.Total) * 100
fmt.Printf(" %s: %d requests (%.2f%%)\n",
bucket.Key, bucket.Count, percentage)
}
var (
ErrSearchTimeout = "search operation timed out"
ErrTooManyResults = "result set too large"
ErrInvalidQuery = "invalid search query"
ErrShardUnavailable = "one or more shards unavailable"
ErrCacheError = "cache operation failed"
ErrInvalidFacet = "invalid facet configuration"
ErrMemoryLimit = "memory limit exceeded"
ErrConcurrencyLimit = "concurrent search limit exceeded"
)
// Retry with backoff for temporary failures
func searchWithRetry(searcher *DistributedSearcher, query *SearchQuery, maxRetries int) (*SearchResult, error) {
for attempt := 0; attempt < maxRetries; attempt++ {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
result, err := searcher.Search(ctx, query)
cancel()
if err == nil {
return result, nil
}
// Check if error is retryable
if isRetryableSearchError(err) {
backoff := time.Duration(attempt+1) * time.Second
log.Printf("Search attempt %d failed: %v, retrying in %v",
attempt+1, err, backoff)
time.Sleep(backoff)
continue
}
// Non-retryable error
return nil, fmt.Errorf("search failed: %w", err)
}
return nil, fmt.Errorf("max search retries (%d) exceeded", maxRetries)
}
func isRetryableSearchError(err error) bool {
errStr := err.Error()
return strings.Contains(errStr, "timeout") ||
strings.Contains(errStr, "temporary") ||
strings.Contains(errStr, "unavailable")
}
// Fallback to simplified search if complex query fails
func searchWithFallback(searcher *DistributedSearcher, query *SearchQuery) (*SearchResult, error) {
// Try original query first
result, err := searcher.Search(context.Background(), query)
if err == nil {
return result, nil
}
log.Printf("Complex search failed: %v, trying simplified query", err)
// Create simplified fallback query
fallbackQuery := &SearchQuery{
Query: query.Query,
StartTime: query.StartTime,
EndTime: query.EndTime,
Limit: min(query.Limit, 1000), // Reduce limit
// Remove complex facets and sorts
}
result, err = searcher.Search(context.Background(), fallbackQuery)
if err != nil {
return nil, fmt.Errorf("both original and fallback searches failed: %w", err)
}
// Mark result as degraded
result.Degraded = true
result.DegradationReason = "Simplified due to original query failure"
return result, nil
}
package main
import (
"context"
"fmt"
"log"
"time"
"github.com/0xJacky/Nginx-UI/internal/nginx_log/searcher"
)
func main() {
// Initialize searcher
searcher := searcher.NewDistributedSearcher(&searcher.SearcherConfig{
EnableCache: true,
CacheSize: 20000,
CacheTTL: 30 * time.Second, // Short TTL for real-time data
MaxConcurrentSearches: 200,
SearchTimeout: 5 * time.Second, // Fast response for dashboards
}, shardManager)
// Dashboard update interval
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
dashboard := generateDashboard(searcher)
updateDashboard(dashboard)
}
}
func generateDashboard(searcher *searcher.DistributedSearcher) *Dashboard {
now := time.Now()
startTime := now.Add(-5 * time.Minute).Unix() // Last 5 minutes
endTime := now.Unix()
// Real-time metrics query
metricsQuery := &searcher.SearchQuery{
Query: "*",
StartTime: startTime,
EndTime: endTime,
Limit: 0, // Count only
Facets: []searcher.FacetRequest{
{Name: "status_codes", Field: "status", Size: 10},
{Name: "top_ips", Field: "ip", Size: 20},
{Name: "request_methods", Field: "method", Size: 10},
{Name: "response_sizes", Field: "bytes_sent", Size: 10,
Ranges: []searcher.FacetRange{
{From: 0, To: 1024}, // < 1KB
{From: 1024, To: 10240}, // 1KB - 10KB
{From: 10240, To: 102400}, // 10KB - 100KB
{From: 102400, To: 1048576}, // 100KB - 1MB
{From: 1048576, To: nil}, // > 1MB
}},
},
}
results, err := searcher.Search(context.Background(), metricsQuery)
if err != nil {
log.Printf("Dashboard query failed: %v", err)
return &Dashboard{Error: err}
}
// Calculate metrics
requestRate := float64(results.Total) / 300.0 // requests per second (5 minutes)
// Error rate calculation
errorCount := int64(0)
statusFacet := results.Facets["status_codes"]
for _, bucket := range statusFacet.Buckets {
if status, err := strconv.Atoi(bucket.Key); err == nil && status >= 400 {
errorCount += bucket.Count
}
}
errorRate := float64(errorCount) / float64(results.Total) * 100
return &Dashboard{
Timestamp: now,
RequestRate: requestRate,
ErrorRate: errorRate,
TotalRequests: results.Total,
StatusCodes: statusFacet.Buckets,
TopIPs: results.Facets["top_ips"].Buckets,
Methods: results.Facets["request_methods"].Buckets,
ResponseSizes: results.Facets["response_sizes"].Buckets,
}
}
type Dashboard struct {
Timestamp time.Time
RequestRate float64
ErrorRate float64
TotalRequests int64
StatusCodes []searcher.FacetBucket
TopIPs []searcher.FacetBucket
Methods []searcher.FacetBucket
ResponseSizes []searcher.FacetBucket
Error error
}
func updateDashboard(dashboard *Dashboard) {
if dashboard.Error != nil {
fmt.Printf("Dashboard error: %v\n", dashboard.Error)
return
}
fmt.Printf("\n=== NGINX Dashboard (%s) ===\n",
dashboard.Timestamp.Format("2006-01-02 15:04:05"))
fmt.Printf("Request rate: %.2f req/sec\n", dashboard.RequestRate)
fmt.Printf("Error rate: %.2f%%\n", dashboard.ErrorRate)
fmt.Printf("Total requests (5m): %d\n", dashboard.TotalRequests)
fmt.Println("\nStatus codes:")
for _, bucket := range dashboard.StatusCodes {
percentage := float64(bucket.Count) / float64(dashboard.TotalRequests) * 100
fmt.Printf(" %s: %d (%.1f%%)\n", bucket.Key, bucket.Count, percentage)
}
fmt.Println("\nTop IPs:")
for i, bucket := range dashboard.TopIPs {
if i >= 5 { break }
fmt.Printf(" %s: %d requests\n", bucket.Key, bucket.Count)
}
}
// Searcher defines the main search interface
type Searcher interface {
Search(ctx context.Context, query *SearchQuery) (*SearchResult, error)
GetPerformanceStats() *PerformanceStats
GetCacheStats() *CacheStats
OptimizeQuery(query *SearchQuery) *SearchQuery
TriggerGC()
Close() error
}
// SearchQuery represents a search request
type SearchQuery struct {
// Text query
Query string `json:"query"`
// Time range
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
// Pagination
Limit int `json:"limit"`
Offset int `json:"offset"`
// Sorting
SortBy []SortField `json:"sort_by"`
// Aggregations
Facets []FacetRequest `json:"facets"`
// Performance hints
Timeout time.Duration `json:"timeout"`
Priority int `json:"priority"`
}
// SearchResult represents search response
type SearchResult struct {
// Results
Hits []*SearchHit `json:"hits"`
Total int64 `json:"total"`
MaxScore float64 `json:"max_score"`
// Aggregations
Facets map[string]*Facet `json:"facets"`
// Performance
Duration time.Duration `json:"duration"`
ShardResults []ShardResult `json:"shard_results"`
// Quality indicators
Degraded bool `json:"degraded"`
DegradationReason string `json:"degradation_reason,omitempty"`
}
This comprehensive documentation covers all aspects of the searcher package including advanced query capabilities, performance optimization, real-time analytics, and practical integration examples.
Latest benchmark results on Apple M2 Pro (August 25, 2025):
Operation | Rate | ns/op | B/op | allocs/op | Notes |
---|---|---|---|---|---|
CacheKeyGeneration | 1.2M ops/sec | 990.2 | 496 | 3 | Optimized string building |
Cache Put | 389K ops/sec | 3,281 | 873 | 14 | Ristretto backend with compression |
Cache Get | 1.2M ops/sec | 992.6 | 521 | 4 | High-speed cache retrieval |
Performance optimizations delivered 60% allocation reduction in cache operations.