فهرست منبع

refactor(nginx-log): change time fields to Unix timestamps

0xJacky 1 ماه پیش
والد
کامیت
13ee167e23
41فایلهای تغییر یافته به همراه535 افزوده شده و 417 حذف شده
  1. 32 32
      api/nginx_log/analytics.go
  2. 16 16
      app/src/api/nginx_log.ts
  3. 3 3
      app/src/views/nginx_log/NginxLogList.vue
  4. 3 3
      app/src/views/nginx_log/dashboard/DashboardViewer.vue
  5. 14 3
      app/src/views/nginx_log/dashboard/components/DailyTrendsChart.vue
  6. 1 1
      app/src/views/nginx_log/dashboard/components/HourlyChart.vue
  7. 5 5
      app/src/views/nginx_log/structured/StructuredLogViewer.vue
  8. 15 9
      internal/nginx_log/analytics_service_calculations.go
  9. 1 1
      internal/nginx_log/analytics_service_core.go
  10. 24 8
      internal/nginx_log/analytics_service_entries.go
  11. 16 4
      internal/nginx_log/analytics_service_geo.go
  12. 4 4
      internal/nginx_log/analytics_service_test.go
  13. 13 15
      internal/nginx_log/analytics_service_types.go
  14. 9 7
      internal/nginx_log/batch_search_optimizer.go
  15. 3 3
      internal/nginx_log/bleve_field_test.go
  16. 23 19
      internal/nginx_log/bleve_stats_service_core.go
  17. 17 11
      internal/nginx_log/bleve_stats_service_time.go
  18. 37 6
      internal/nginx_log/bleve_stats_service_utils.go
  19. 3 5
      internal/nginx_log/dashboard_types.go
  20. 5 6
      internal/nginx_log/indexer_file_batch.go
  21. 2 3
      internal/nginx_log/indexer_file_management.go
  22. 27 14
      internal/nginx_log/indexer_file_streaming.go
  23. 19 16
      internal/nginx_log/indexer_file_utils.go
  24. 17 16
      internal/nginx_log/indexer_search.go
  25. 7 6
      internal/nginx_log/indexer_stats.go
  26. 48 49
      internal/nginx_log/indexer_types.go
  27. 45 33
      internal/nginx_log/log_cache_grouping.go
  28. 14 14
      internal/nginx_log/log_cache_index.go
  29. 16 18
      internal/nginx_log/log_cache_types.go
  30. 20 21
      internal/nginx_log/log_formats.go
  31. 1 1
      internal/nginx_log/log_indexer_core.go
  32. 2 2
      internal/nginx_log/log_indexer_rebuild.go
  33. 16 4
      internal/nginx_log/log_indexer_status.go
  34. 6 6
      internal/nginx_log/log_list.go
  35. 6 6
      internal/nginx_log/log_parser_parse_test.go
  36. 1 1
      internal/nginx_log/optimized_parser.go
  37. 4 4
      internal/nginx_log/optimized_search_indexer.go
  38. 20 20
      internal/nginx_log/optimized_search_query.go
  39. 4 4
      internal/nginx_log/persistence.go
  40. 12 14
      internal/nginx_log/progress_tracker.go
  41. 4 4
      internal/nginx_log/search_performance_bench_test.go

+ 32 - 32
api/nginx_log/analytics.go

@@ -14,39 +14,39 @@ import (
 
 // AnalyticsRequest represents the request for log analytics
 type AnalyticsRequest struct {
-	Path      string    `json:"path" form:"path"`
-	StartTime time.Time `json:"start_time" form:"start_time"`
-	EndTime   time.Time `json:"end_time" form:"end_time"`
-	Limit     int       `json:"limit" form:"limit"`
+	Path      string `json:"path" form:"path"`
+	StartTime int64  `json:"start_time" form:"start_time"`
+	EndTime   int64  `json:"end_time" form:"end_time"`
+	Limit     int    `json:"limit" form:"limit"`
 }
 
 // AdvancedSearchRequest represents the request for advanced log search
 type AdvancedSearchRequest struct {
-	Query     string    `json:"query" form:"query"`
-	LogPath   string    `json:"log_path" form:"log_path"`
-	StartTime time.Time `json:"start_time" form:"start_time"`
-	EndTime   time.Time `json:"end_time" form:"end_time"`
-	IP        string    `json:"ip" form:"ip"`
-	Method    string    `json:"method" form:"method"`
-	Status    []int     `json:"status" form:"status"`
-	Path      string    `json:"path" form:"path"`
-	UserAgent string    `json:"user_agent" form:"user_agent"`
-	Referer   string    `json:"referer" form:"referer"`
-	Browser   string    `json:"browser" form:"browser"`
-	OS        string    `json:"os" form:"os"`
-	Device    string    `json:"device" form:"device"`
-	Limit     int       `json:"limit" form:"limit"`
-	Offset    int       `json:"offset" form:"offset"`
-	SortBy    string    `json:"sort_by" form:"sort_by"`
-	SortOrder string    `json:"sort_order" form:"sort_order"`
+	Query     string `json:"query" form:"query"`
+	LogPath   string `json:"log_path" form:"log_path"`
+	StartTime int64  `json:"start_time" form:"start_time"`
+	EndTime   int64  `json:"end_time" form:"end_time"`
+	IP        string `json:"ip" form:"ip"`
+	Method    string `json:"method" form:"method"`
+	Status    []int  `json:"status" form:"status"`
+	Path      string `json:"path" form:"path"`
+	UserAgent string `json:"user_agent" form:"user_agent"`
+	Referer   string `json:"referer" form:"referer"`
+	Browser   string `json:"browser" form:"browser"`
+	OS        string `json:"os" form:"os"`
+	Device    string `json:"device" form:"device"`
+	Limit     int    `json:"limit" form:"limit"`
+	Offset    int    `json:"offset" form:"offset"`
+	SortBy    string `json:"sort_by" form:"sort_by"`
+	SortOrder string `json:"sort_order" form:"sort_order"`
 }
 
 // PreflightResponse represents the response for preflight query
 type PreflightResponse struct {
-	StartTime   *time.Time `json:"start_time,omitempty"`
-	EndTime     *time.Time `json:"end_time,omitempty"`
-	Available   bool       `json:"available"`
-	IndexStatus string     `json:"index_status"`
+	StartTime   *int64 `json:"start_time,omitempty"`
+	EndTime     *int64 `json:"end_time,omitempty"`
+	Available   bool   `json:"available"`
+	IndexStatus string `json:"index_status"`
 }
 
 // GetLogAnalytics provides comprehensive log analytics
@@ -108,8 +108,8 @@ func GetLogPreflight(c *gin.Context) {
 
 	// Convert internal result to API response
 	response := PreflightResponse{
-		StartTime:   result.StartTime,
-		EndTime:     result.EndTime,
+		StartTime:   &result.StartTime,
+		EndTime:     &result.EndTime,
 		Available:   result.Available,
 		IndexStatus: result.IndexStatus,
 	}
@@ -372,8 +372,8 @@ func GetDashboardAnalytics(c *gin.Context) {
 	// Debug: Log exact query parameters
 	queryRequest := &nginx_log.DashboardQueryRequest{
 		LogPath:   req.LogPath,
-		StartTime: startTime,
-		EndTime:   endTime,
+		StartTime: startTime.Unix(),
+		EndTime:   endTime.Unix(),
 	}
 	logger.Debugf("Query parameters - LogPath='%s', StartTime=%v, EndTime=%v", 
 		queryRequest.LogPath, queryRequest.StartTime, queryRequest.EndTime)
@@ -434,7 +434,7 @@ func GetWorldMapData(c *gin.Context) {
 	ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
 	defer cancel()
 
-	data, err := service.GetWorldMapData(ctx, req.Path, req.StartTime, req.EndTime)
+	data, err := service.GetWorldMapData(ctx, req.Path, time.Unix(req.StartTime, 0), time.Unix(req.EndTime, 0))
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return
@@ -479,7 +479,7 @@ func GetChinaMapData(c *gin.Context) {
 	ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
 	defer cancel()
 
-	data, err := service.GetChinaMapData(ctx, req.Path, req.StartTime, req.EndTime)
+	data, err := service.GetChinaMapData(ctx, req.Path, time.Unix(req.StartTime, 0), time.Unix(req.EndTime, 0))
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return
@@ -529,7 +529,7 @@ func GetGeoStats(c *gin.Context) {
 	ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
 	defer cancel()
 
-	stats, err := service.GetGeoStats(ctx, req.Path, req.StartTime, req.EndTime, req.Limit)
+	stats, err := service.GetGeoStats(ctx, req.Path, time.Unix(req.StartTime, 0), time.Unix(req.EndTime, 0), req.Limit)
 	if err != nil {
 		cosy.ErrHandler(c, err)
 		return

+ 16 - 16
app/src/api/nginx_log.ts

@@ -6,26 +6,26 @@ export interface NginxLogData {
   name?: string
   config_file?: string
   index_status?: string
-  last_modified?: string
-  last_indexed?: string
-  index_start_time?: string
+  last_modified?: number
+  last_indexed?: number
+  index_start_time?: number
   index_duration?: number
   is_compressed?: boolean
   has_timerange?: boolean
-  timerange_start?: string
-  timerange_end?: string
+  timerange_start?: number
+  timerange_end?: number
   document_count?: number
 }
 
 export interface AnalyticsRequest {
   path: string
-  start_time?: string
-  end_time?: string
+  start_time?: number
+  end_time?: number
   limit?: number
 }
 
 export interface AccessLogEntry {
-  timestamp: string
+  timestamp: number
   ip: string
   method: string
   region_code: string
@@ -123,8 +123,8 @@ export interface SearchFilters {
 }
 
 export interface AdvancedSearchRequest {
-  start_time?: string
-  end_time?: string
+  start_time?: number
+  end_time?: number
   query?: string
   ip?: string
   method?: string
@@ -159,8 +159,8 @@ export interface AdvancedSearchResponse {
 }
 
 export interface PreflightResponse {
-  start_time: string
-  end_time: string
+  start_time: number
+  end_time: number
   available: boolean
   index_status: string
 }
@@ -168,12 +168,12 @@ export interface PreflightResponse {
 // Index status related interfaces
 export interface FileStatus {
   path: string
-  last_modified: string
-  last_indexed: string
+  last_modified: number
+  last_indexed: number
   is_compressed: boolean
   has_timerange: boolean
-  timerange_start?: string
-  timerange_end?: string
+  timerange_start?: number
+  timerange_end?: number
 }
 
 export interface IndexStatus {

+ 3 - 3
app/src/views/nginx_log/NginxLogList.vue

@@ -161,7 +161,7 @@ const columns: StdTableColumn[] = [
       if (!record || !record.last_indexed)
         return <span class="text-gray-400 dark:text-gray-500">-</span>
 
-      const lastIndexed = dayjs(record.last_indexed)
+      const lastIndexed = dayjs.unix(record.last_indexed)
       const displayText = lastIndexed.format('YYYY-MM-DD HH:mm')
       const statusIcon = <CheckCircleOutlined class="text-green-500 ml-1" />
 
@@ -230,8 +230,8 @@ const columns: StdTableColumn[] = [
         return <span class="text-gray-400 dark:text-gray-500">-</span>
       }
 
-      const start = dayjs(record.timerange_start)
-      const end = dayjs(record.timerange_end)
+      const start = dayjs.unix(record.timerange_start)
+      const end = dayjs.unix(record.timerange_end)
       const duration = end.diff(start, 'day')
 
       // Format duration display

+ 3 - 3
app/src/views/nginx_log/dashboard/DashboardViewer.vue

@@ -45,7 +45,7 @@ async function loadTimeRange() {
     const preflight = await nginx_log.getPreflight(props.logPath)
 
     if (preflight.available && preflight.start_time && preflight.end_time) {
-      const endTime = dayjs(preflight.end_time)
+      const endTime = dayjs.unix(preflight.end_time)
 
       // Use last week's data as default range (from last day back to 7 days ago)
       const weekStart = endTime.subtract(7, 'day').startOf('day')
@@ -91,8 +91,8 @@ async function loadGeographicData() {
   try {
     const request: AnalyticsRequest = {
       path: props.logPath,
-      start_time: dateRange.value[0].toISOString(),
-      end_time: dateRange.value[1].toISOString(),
+      start_time: dateRange.value[0].unix(),
+      end_time: dateRange.value[1].unix(),
     }
 
     // Load both world and China map data in parallel

+ 14 - 3
app/src/views/nginx_log/dashboard/components/DailyTrendsChart.vue

@@ -21,7 +21,10 @@ const dailyChartOptions = computed(() => {
     return {}
 
   const dailyData = props.dashboardData.daily_stats || []
-  const dates = dailyData.map(item => item.date)
+  const dates = dailyData.map(item => {
+    const date = new Date(item.date)
+    return `${date.getMonth() + 1}-${date.getDate()}`
+  })
 
   return {
     chart: {
@@ -42,13 +45,13 @@ const dailyChartOptions = computed(() => {
         color: fontColor(),
       },
     },
-    colors: ['#1890ff', '#52c41a'], // PV蓝色, UV绿色
+    colors: ['#1890ff', '#52c41a'], // PV blue, UV green
     dataLabels: {
       enabled: false,
     },
     stroke: {
       curve: 'smooth',
-      width: 2, // 保持线条显示
+      width: 2, // Keep line stroke visible
     },
     fill: {
       type: 'gradient',
@@ -74,7 +77,15 @@ const dailyChartOptions = computed(() => {
         style: {
           colors: fontColor(),
         },
+        rotate: -45, // Rotate labels to avoid overlap
+        rotateAlways: dates.length > 10, // Rotate when more than 10 labels
+        hideOverlappingLabels: true, // Automatically hide overlapping labels
+        maxHeight: 80, // Increase label area height for rotated text
+        trim: false, // Don't trim since we're using short format
+        show: true,
+        showDuplicates: false,
       },
+      tickAmount: dates.length > 20 ? 10 : dates.length > 10 ? Math.ceil(dates.length / 2) : undefined, // Limit tick amount for better readability
     },
     yaxis: {
       title: {

+ 1 - 1
app/src/views/nginx_log/dashboard/components/HourlyChart.vue

@@ -22,7 +22,7 @@ const hourlyChartOptions = computed(() => {
     return {}
 
   const hourlyData = props.dashboardData.hourly_stats || []
-  const hours = hourlyData.map(item => `${item.hour}:00`)
+  const hours = hourlyData.map(item => `${item.hour}`)
 
   return {
     chart: {

+ 5 - 5
app/src/views/nginx_log/structured/StructuredLogViewer.vue

@@ -152,7 +152,7 @@ const structuredLogColumns = computed(() => [
     fixed: 'left' as const,
     sorter: true,
     sortOrder: getSortOrder('timestamp'),
-    customRender: ({ record }: { record: AccessLogEntry }) => h('span', dayjs(record.timestamp).format('YYYY-MM-DD HH:mm:ss')),
+    customRender: ({ record }: { record: AccessLogEntry }) => h('span', dayjs.unix(record.timestamp).format('YYYY-MM-DD HH:mm:ss')),
   },
   {
     title: $gettext('IP'),
@@ -297,8 +297,8 @@ async function performAdvancedSearch() {
   searchLoading.value = true
   try {
     const searchRequest: AdvancedSearchRequest = {
-      start_time: timeRange.value.start.toISOString(),
-      end_time: timeRange.value.end.toISOString(),
+      start_time: timeRange.value.start.unix(),
+      end_time: timeRange.value.end.unix(),
       query: searchFilters.value.query || undefined,
       ip: searchFilters.value.ip || undefined,
       method: searchFilters.value.method || undefined,
@@ -355,8 +355,8 @@ async function loadPreflight(): Promise<boolean> {
     if (preflightResponse.value.available) {
       // Cache this path as valid and set time range
       pathValidationCache.value.set(currentPath, true)
-      timeRange.value.start = dayjs(preflightResponse.value.start_time)
-      timeRange.value.end = dayjs(preflightResponse.value.end_time)
+      timeRange.value.start = dayjs.unix(preflightResponse.value.start_time)
+      timeRange.value.end = dayjs.unix(preflightResponse.value.end_time)
       return true // Index is ready
     }
     else {

+ 15 - 9
internal/nginx_log/analytics_service_calculations.go

@@ -6,7 +6,7 @@ import (
 )
 
 // calculateHourlyStats calculates UV/PV statistics for each hour of the day
-func (s *AnalyticsService) calculateHourlyStats(entries []*AccessLogEntry, startTime, endTime time.Time) []HourlyAccessStats {
+func (s *AnalyticsService) calculateHourlyStats(entries []*AccessLogEntry, startTime, endTime int64) []HourlyAccessStats {
 	// Create map to aggregate stats by hour (0-23)
 	hourStats := make(map[int]map[string]bool) // hour -> set of unique IPs
 	hourPV := make(map[int]int)                // hour -> page view count
@@ -19,7 +19,8 @@ func (s *AnalyticsService) calculateHourlyStats(entries []*AccessLogEntry, start
 
 	// Process entries
 	for _, entry := range entries {
-		hour := entry.Timestamp.Hour()
+		entryTime := time.Unix(entry.Timestamp, 0)
+		hour := entryTime.Hour()
 
 		// Count unique visitors (UV)
 		hourStats[hour][entry.IP] = true
@@ -47,14 +48,15 @@ func (s *AnalyticsService) calculateHourlyStats(entries []*AccessLogEntry, start
 }
 
 // calculateDailyStats calculates daily UV/PV statistics for the time range with padding
-func (s *AnalyticsService) calculateDailyStats(entries []*AccessLogEntry, startTime, endTime time.Time) []DailyAccessStats {
+func (s *AnalyticsService) calculateDailyStats(entries []*AccessLogEntry, startTime, endTime int64) []DailyAccessStats {
 	// Create map to aggregate stats by date
 	dailyStats := make(map[string]map[string]bool) // date -> set of unique IPs
 	dailyPV := make(map[string]int)                // date -> page view count
 
 	// Process entries
 	for _, entry := range entries {
-		date := entry.Timestamp.Format("2006-01-02")
+		entryTime := time.Unix(entry.Timestamp, 0)
+		date := entryTime.Format("2006-01-02")
 
 		if dailyStats[date] == nil {
 			dailyStats[date] = make(map[string]bool)
@@ -71,13 +73,17 @@ func (s *AnalyticsService) calculateDailyStats(entries []*AccessLogEntry, startT
 	result := make([]DailyAccessStats, 0)
 
 	// Use default time range if not provided
-	if startTime.IsZero() || endTime.IsZero() {
-		endTime = time.Now()
-		startTime = endTime.AddDate(0, 0, -30) // 30 days ago
+	var startDateTime, endDateTime time.Time
+	if startTime == 0 || endTime == 0 {
+		endDateTime = time.Now()
+		startDateTime = endDateTime.AddDate(0, 0, -30) // 30 days ago
+	} else {
+		startDateTime = time.Unix(startTime, 0)
+		endDateTime = time.Unix(endTime, 0)
 	}
 
-	currentDate := startTime.Truncate(24 * time.Hour)
-	for currentDate.Before(endTime) || currentDate.Equal(endTime.Truncate(24*time.Hour)) {
+	currentDate := startDateTime.Truncate(24 * time.Hour)
+	for currentDate.Before(endDateTime) || currentDate.Equal(endDateTime.Truncate(24*time.Hour)) {
 		dateKey := currentDate.Format("2006-01-02")
 
 		if ips, exists := dailyStats[dateKey]; exists {

+ 1 - 1
internal/nginx_log/analytics_service_core.go

@@ -95,7 +95,7 @@ func (s *AnalyticsService) validateAndNormalizeSearchRequest(req *QueryRequest)
 	}
 
 	// Validate time range
-	if !req.StartTime.IsZero() && !req.EndTime.IsZero() && req.StartTime.After(req.EndTime) {
+	if req.StartTime != 0 && req.EndTime != 0 && req.StartTime > req.EndTime {
 		return fmt.Errorf("start time cannot be after end time")
 	}
 

+ 24 - 8
internal/nginx_log/analytics_service_entries.go

@@ -75,9 +75,25 @@ func (s *AnalyticsService) GetPreflightStatus(logPath string) (*PreflightResult,
 	var start, end time.Time
 	var indexStatus string
 
+	logger.Infof("GetPreflightStatus called with logPath='%s'", logPath)
+	
+	// Check if analytics service has an indexer
+	if s.indexer == nil {
+		logger.Error("GetPreflightStatus: Analytics service has no indexer")
+		return &PreflightResult{
+			StartTime:   0,
+			EndTime:     0,
+			Available:   false,
+			IndexStatus: IndexStatusNotIndexed,
+		}, nil
+	}
+	
+	logger.Infof("GetPreflightStatus: Analytics service has indexer")
+
 	// Check real indexing status using IndexingStatusManager
 	statusManager := GetIndexingStatusManager()
 	isCurrentlyIndexing := statusManager.IsIndexing()
+	logger.Infof("GetPreflightStatus: Is currently indexing: %v", isCurrentlyIndexing)
 
 	if logPath != "" {
 		// Validate log path exists
@@ -119,10 +135,10 @@ func (s *AnalyticsService) GetPreflightStatus(logPath string) (*PreflightResult,
 					if file.Path == logPath {
 						found = true
 						logger.Debugf("Found matching path %s, HasTimeRange=%v", logPath, file.HasTimeRange)
-						if file.HasTimeRange && !file.TimeRangeStart.IsZero() && !file.TimeRangeEnd.IsZero() {
+						if file.HasTimeRange && file.TimeRangeStart != 0 && file.TimeRangeEnd != 0 {
 							// File is indexed with time range data
-							start = file.TimeRangeStart
-							end = file.TimeRangeEnd
+							start = time.Unix(file.TimeRangeStart, 0)
+							end = time.Unix(file.TimeRangeEnd, 0)
 							indexStatus = IndexStatusReady
 							logger.Debugf("File %s found in index status with time range %v to %v", logPath, start, end)
 							goto statusDetermined
@@ -150,20 +166,20 @@ func (s *AnalyticsService) GetPreflightStatus(logPath string) (*PreflightResult,
 		}
 	}
 
-	var startPtr, endPtr *time.Time
+	var startUnix, endUnix int64
 	if !start.IsZero() {
-		startPtr = &start
+		startUnix = start.Unix()
 	}
 	if !end.IsZero() {
-		endPtr = &end
+		endUnix = end.Unix()
 	}
 
 	// Data is available if we have time range data from Bleve or if currently indexing
 	dataAvailable := (!start.IsZero() && !end.IsZero()) || indexStatus == IndexStatusIndexing
 
 	result := &PreflightResult{
-		StartTime:   startPtr,
-		EndTime:     endPtr,
+		StartTime:   startUnix,
+		EndTime:     endUnix,
 		Available:   dataAvailable,
 		IndexStatus: indexStatus,
 	}

+ 16 - 4
internal/nginx_log/analytics_service_geo.go

@@ -95,11 +95,23 @@ func (s *AnalyticsService) buildTimeRangeQuery(logPath string, startTime, endTim
 
 	// Add time range filter if specified
 	if !startTime.IsZero() || !endTime.IsZero() {
-		dateQuery := bleve.NewDateRangeQuery(startTime, endTime)
-		dateQuery.SetField("timestamp")
-		queries = append(queries, dateQuery)
+		var start, end *float64
 		
-		logger.Debugf("Time range query: start=%v, end=%v", startTime, endTime)
+		if !startTime.IsZero() {
+			startFloat := float64(startTime.Unix())
+			start = &startFloat
+		}
+		
+		if !endTime.IsZero() {
+			endFloat := float64(endTime.Unix())
+			end = &endFloat
+		}
+		
+		numericQuery := bleve.NewNumericRangeQuery(start, end)
+		numericQuery.SetField("timestamp")
+		queries = append(queries, numericQuery)
+		
+		logger.Debugf("Time range query: start=%v (%v), end=%v (%v)", startTime, start, endTime, end)
 	}
 
 	// Combine queries

+ 4 - 4
internal/nginx_log/analytics_service_test.go

@@ -79,8 +79,8 @@ func TestAnalyticsService_ValidateAndNormalizeSearchRequest(t *testing.T) {
 		{
 			name: "Invalid time range should return error",
 			req: &QueryRequest{
-				StartTime: time.Now(),
-				EndTime:   time.Now().Add(-1 * time.Hour),
+				StartTime: time.Now().Unix(),
+				EndTime:   time.Now().Add(-1 * time.Hour).Unix(),
 				Limit:     100,
 			},
 			wantErr: true,
@@ -161,8 +161,8 @@ func TestAnalyticsService_GetLogEntries(t *testing.T) {
 func BenchmarkAnalyticsService_ValidateAndNormalizeSearchRequest(b *testing.B) {
 	service := NewAnalyticsService()
 	req := &QueryRequest{
-		StartTime: time.Now().Add(-1 * time.Hour),
-		EndTime:   time.Now(),
+		StartTime: time.Now().Add(-1 * time.Hour).Unix(),
+		EndTime:   time.Now().Unix(),
 		Query:     "test query",
 		IP:        "192.168.1.1",
 		Method:    "GET",

+ 13 - 15
internal/nginx_log/analytics_service_types.go

@@ -1,8 +1,6 @@
 package nginx_log
 
-import (
-	"time"
-)
+import ()
 
 // KeyValue represents a key-value pair for analytics
 type KeyValue struct {
@@ -12,14 +10,14 @@ type KeyValue struct {
 
 // FileStatus represents the status of a log file
 type FileStatus struct {
-	Path           string    `json:"path"`
-	LastModified   time.Time `json:"last_modified"`
-	LastSize       int64     `json:"last_size"`
-	LastIndexed    time.Time `json:"last_indexed"`
-	IsCompressed   bool      `json:"is_compressed"`
-	HasTimeRange   bool      `json:"has_timerange"`
-	TimeRangeStart time.Time `json:"timerange_start,omitzero"`
-	TimeRangeEnd   time.Time `json:"timerange_end,omitzero"`
+	Path           string `json:"path"`
+	LastModified   int64  `json:"last_modified"`   // Unix timestamp
+	LastSize       int64  `json:"last_size"`
+	LastIndexed    int64  `json:"last_indexed"`    // Unix timestamp
+	IsCompressed   bool   `json:"is_compressed"`
+	HasTimeRange   bool   `json:"has_timerange"`
+	TimeRangeStart int64  `json:"timerange_start,omitzero"` // Unix timestamp
+	TimeRangeEnd   int64  `json:"timerange_end,omitzero"`   // Unix timestamp
 }
 
 // IndexStatus represents comprehensive index status and statistics
@@ -60,8 +58,8 @@ const (
 
 // PreflightResult represents the result of a preflight check
 type PreflightResult struct {
-	StartTime   *time.Time `json:"start_time,omitempty"`
-	EndTime     *time.Time `json:"end_time,omitempty"`
-	Available   bool       `json:"available"`
-	IndexStatus string     `json:"index_status"`
+	StartTime   int64  `json:"start_time,omitempty"` // Unix timestamp
+	EndTime     int64  `json:"end_time,omitempty"`   // Unix timestamp
+	Available   bool   `json:"available"`
+	IndexStatus string `json:"index_status"`
 }

+ 9 - 7
internal/nginx_log/batch_search_optimizer.go

@@ -313,17 +313,19 @@ func (bso *BatchSearchOptimizer) findCommonTimeRange(requests []*BatchSearchRequ
 	hasTimeRange := false
 	
 	for _, req := range requests {
-		if !req.Request.StartTime.IsZero() && !req.Request.EndTime.IsZero() {
+		if req.Request.StartTime != 0 && req.Request.EndTime != 0 {
 			if !hasTimeRange {
-				minStart = req.Request.StartTime
-				maxEnd = req.Request.EndTime
+				minStart = time.Unix(req.Request.StartTime, 0)
+				maxEnd = time.Unix(req.Request.EndTime, 0)
 				hasTimeRange = true
 			} else {
-				if req.Request.StartTime.Before(minStart) {
-					minStart = req.Request.StartTime
+				reqStartTime := time.Unix(req.Request.StartTime, 0)
+				if reqStartTime.Before(minStart) {
+					minStart = reqStartTime
 				}
-				if req.Request.EndTime.After(maxEnd) {
-					maxEnd = req.Request.EndTime
+				reqEndTime := time.Unix(req.Request.EndTime, 0)
+				if reqEndTime.After(maxEnd) {
+					maxEnd = reqEndTime
 				}
 			}
 		}

+ 3 - 3
internal/nginx_log/bleve_field_test.go

@@ -21,7 +21,7 @@ func TestBleveFieldMapping(t *testing.T) {
 	testEntry := &IndexedLogEntry{
 		ID:        "test_1",
 		FilePath:  "/var/log/nginx/access.log",
-		Timestamp: time.Now(),
+		Timestamp: time.Now().Unix(),
 		IP:        "135.220.172.38",
 		Method:    "GET",
 		Path:      "/test",
@@ -190,7 +190,7 @@ func createTestIndexMapping() mapping.IndexMapping {
 	logMapping := bleve.NewDocumentMapping()
 
 	// Timestamp
-	timestampMapping := bleve.NewDateTimeFieldMapping()
+	timestampMapping := bleve.NewNumericFieldMapping()
 	logMapping.AddFieldMappingsAt("timestamp", timestampMapping)
 
 	// File path with TextFieldMapping + keyword analyzer (current approach)
@@ -227,7 +227,7 @@ func createAlternativeIndexMapping() mapping.IndexMapping {
 	logMapping := bleve.NewDocumentMapping()
 
 	// Timestamp
-	timestampMapping := bleve.NewDateTimeFieldMapping()
+	timestampMapping := bleve.NewNumericFieldMapping()
 	logMapping.AddFieldMappingsAt("timestamp", timestampMapping)
 
 	// File path with TextFieldMapping instead of KeywordFieldMapping

+ 23 - 19
internal/nginx_log/bleve_stats_service_core.go

@@ -3,7 +3,6 @@ package nginx_log
 import (
 	"context"
 	"fmt"
-	"time"
 
 	"github.com/blevesearch/bleve/v2"
 	"github.com/blevesearch/bleve/v2/search/query"
@@ -138,37 +137,42 @@ func (s *BleveStatsService) GetDashboardAnalytics(ctx context.Context, req *Dash
 }
 
 // buildTimeRangeQuery builds a time range query for Bleve
-func (s *BleveStatsService) buildTimeRangeQuery(startTime, endTime time.Time) query.Query {
+func (s *BleveStatsService) buildTimeRangeQuery(startTime, endTime int64) query.Query {
 	// If both times are zero or the range is too wide, return match all query
-	if startTime.IsZero() && endTime.IsZero() {
+	if startTime == 0 && endTime == 0 {
 		return bleve.NewMatchAllQuery()
 	}
 
 	// Check if the time range is reasonable (same as search interface)
-	if !startTime.IsZero() && !endTime.IsZero() {
-		if endTime.Sub(startTime) >= 400*24*time.Hour { // More than ~400 days
+	if startTime != 0 && endTime != 0 {
+		if endTime-startTime >= 400*24*3600 { // More than ~400 days in seconds
 			return bleve.NewMatchAllQuery()
 		}
 	}
 
 	// Build proper time range query
 	var timeQuery query.Query
-	if !startTime.IsZero() && !endTime.IsZero() {
-		// Add 1 millisecond to endTime to ensure boundary values are included
-		inclusiveEndTime := endTime.Add(1 * time.Millisecond)
-		timeQuery = bleve.NewDateRangeQuery(startTime, inclusiveEndTime)
-		timeQuery.(*query.DateRangeQuery).SetField("timestamp")
-	} else if !startTime.IsZero() {
-		timeQuery = bleve.NewDateRangeQuery(startTime, time.Time{})
-		timeQuery.(*query.DateRangeQuery).SetField("timestamp")
-	} else if !endTime.IsZero() {
-		// Add 1 millisecond to endTime to ensure boundary values are included
-		inclusiveEndTime := endTime.Add(1 * time.Millisecond)
-		timeQuery = bleve.NewDateRangeQuery(time.Time{}, inclusiveEndTime)
-		timeQuery.(*query.DateRangeQuery).SetField("timestamp")
+	if startTime != 0 && endTime != 0 {
+		// Add 1 second to endTime to ensure boundary values are included
+		inclusiveEndTime := endTime + 1
+		startFloat := float64(startTime)
+		endFloat := float64(inclusiveEndTime)
+		timeQuery = bleve.NewNumericRangeQuery(&startFloat, &endFloat)
+		timeQuery.(*query.NumericRangeQuery).SetField("timestamp")
+	} else if startTime != 0 {
+		startFloat := float64(startTime)
+		timeQuery = bleve.NewNumericRangeQuery(&startFloat, nil)
+		timeQuery.(*query.NumericRangeQuery).SetField("timestamp")
+	} else if endTime != 0 {
+		// Add 1 second to endTime to ensure boundary values are included
+		inclusiveEndTime := endTime + 1
+		endFloat := float64(inclusiveEndTime)
+		timeQuery = bleve.NewNumericRangeQuery(nil, &endFloat)
+		timeQuery.(*query.NumericRangeQuery).SetField("timestamp")
 	} else {
 		return bleve.NewMatchAllQuery()
 	}
 
 	return timeQuery
-}
+}
+

+ 17 - 11
internal/nginx_log/bleve_stats_service_time.go

@@ -12,7 +12,7 @@ import (
 
 // calculateHourlyStatsFromBleve calculates 24-hour UV/PV statistics using Bleve aggregations
 // Shows stats for the End Date (target day) only
-func (s *BleveStatsService) calculateHourlyStatsFromBleve(ctx context.Context, baseQuery query.Query, startTime, endTime time.Time) ([]HourlyAccessStats, error) {
+func (s *BleveStatsService) calculateHourlyStatsFromBleve(ctx context.Context, baseQuery query.Query, startTime, endTime int64) ([]HourlyAccessStats, error) {
 	logger.Info("BleveStatsService: Starting hourly stats calculation")
 
 	hourStats := make(map[int]map[string]bool) // hour -> unique IPs
@@ -54,8 +54,9 @@ func (s *BleveStatsService) calculateHourlyStatsFromBleve(ctx context.Context, b
 
 			if timestamp != nil && ip != "" {
 				// For hourly stats, only process entries from the target date (endTime)
-				if !endTime.IsZero() {
-					targetDate := endTime.Truncate(24 * time.Hour)
+				if endTime != 0 {
+					targetTime := time.Unix(endTime, 0)
+					targetDate := targetTime.Truncate(24 * time.Hour)
 					entryDate := timestamp.Truncate(24 * time.Hour)
 					if !entryDate.Equal(targetDate) {
 						continue // Skip entries not from the target date
@@ -84,8 +85,9 @@ func (s *BleveStatsService) calculateHourlyStatsFromBleve(ctx context.Context, b
 
 	// Use endTime (target date) for hour timestamps, or current date if not specified
 	var targetDate time.Time
-	if !endTime.IsZero() {
-		targetDate = endTime.Truncate(24 * time.Hour)
+	if endTime != 0 {
+		endDateTime := time.Unix(endTime, 0)
+		targetDate = endDateTime.Truncate(24 * time.Hour)
 	} else {
 		now := time.Now()
 		targetDate = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
@@ -106,7 +108,7 @@ func (s *BleveStatsService) calculateHourlyStatsFromBleve(ctx context.Context, b
 }
 
 // calculateDailyStatsFromBleve calculates daily UV/PV statistics using Bleve
-func (s *BleveStatsService) calculateDailyStatsFromBleve(ctx context.Context, baseQuery query.Query, startTime, endTime time.Time) ([]DailyAccessStats, error) {
+func (s *BleveStatsService) calculateDailyStatsFromBleve(ctx context.Context, baseQuery query.Query, startTime, endTime int64) ([]DailyAccessStats, error) {
 	dailyStats := make(map[string]map[string]bool) // date -> unique IPs
 	dailyPV := make(map[string]int)                // date -> page views
 
@@ -152,13 +154,17 @@ func (s *BleveStatsService) calculateDailyStatsFromBleve(ctx context.Context, ba
 	result := make([]DailyAccessStats, 0)
 
 	// Use default time range if not provided
-	if startTime.IsZero() || endTime.IsZero() {
-		endTime = time.Now()
-		startTime = endTime.AddDate(0, 0, -30) // 30 days ago
+	var startDateTime, endDateTime time.Time
+	if startTime == 0 || endTime == 0 {
+		endDateTime = time.Now()
+		startDateTime = endDateTime.AddDate(0, 0, -30) // 30 days ago
+	} else {
+		startDateTime = time.Unix(startTime, 0)
+		endDateTime = time.Unix(endTime, 0)
 	}
 
-	currentDate := startTime.Truncate(24 * time.Hour)
-	for currentDate.Before(endTime) || currentDate.Equal(endTime.Truncate(24*time.Hour)) {
+	currentDate := startDateTime.Truncate(24 * time.Hour)
+	for currentDate.Before(endDateTime) || currentDate.Equal(endDateTime.Truncate(24*time.Hour)) {
 		dateKey := currentDate.Format("2006-01-02")
 
 		if ips, exists := dailyStats[dateKey]; exists {

+ 37 - 6
internal/nginx_log/bleve_stats_service_utils.go

@@ -78,7 +78,11 @@ func (s *BleveStatsService) extractTimestampIPAndPath(hit *search.DocumentMatch)
 	var filePath string
 
 	if timestampField, ok := hit.Fields["timestamp"]; ok {
-		if timestampStr, ok := timestampField.(string); ok {
+		if timestampFloat, ok := timestampField.(float64); ok {
+			t := time.Unix(int64(timestampFloat), 0)
+			timestamp = &t
+		} else if timestampStr, ok := timestampField.(string); ok {
+			// Fallback for old RFC3339 format
 			if t, err := time.Parse(time.RFC3339, timestampStr); err == nil {
 				timestamp = &t
 			}
@@ -103,16 +107,24 @@ func (s *BleveStatsService) extractTimestampIPAndPath(hit *search.DocumentMatch)
 // GetTimeRangeFromBleve returns the available time range from Bleve index
 func (s *BleveStatsService) GetTimeRangeFromBleve(logPath string) (start, end time.Time) {
 	if s.indexer == nil {
-		logger.Warn("BleveStatsService.GetTimeRangeFromBleve: indexer is nil")
+		logger.Error("BleveStatsService.GetTimeRangeFromBleve: indexer is nil")
 		return time.Time{}, time.Time{}
 	}
 
 	if s.indexer.index == nil {
-		logger.Warn("BleveStatsService.GetTimeRangeFromBleve: index is nil")
+		logger.Error("BleveStatsService.GetTimeRangeFromBleve: index is nil")
 		return time.Time{}, time.Time{}
 	}
 
 	logger.Infof("BleveStatsService.GetTimeRangeFromBleve: Getting time range for log_path='%s'", logPath)
+	
+	// First, let's check if the index has any documents at all
+	docCount, err := s.indexer.index.DocCount()
+	if err != nil {
+		logger.Errorf("BleveStatsService.GetTimeRangeFromBleve: Failed to get doc count: %v", err)
+	} else {
+		logger.Infof("BleveStatsService.GetTimeRangeFromBleve: Total documents in index: %d", docCount)
+	}
 
 	var searchQuery query.Query = bleve.NewMatchAllQuery()
 
@@ -152,13 +164,29 @@ func (s *BleveStatsService) GetTimeRangeFromBleve(logPath string) (start, end ti
 		return time.Time{}, time.Time{}
 	}
 
-	logger.Debugf("BleveStatsService.GetTimeRangeFromBleve: Found %d entries (total=%d)", len(searchResult.Hits), searchResult.Total)
+	logger.Infof("BleveStatsService.GetTimeRangeFromBleve: Found %d entries (total=%d)", len(searchResult.Hits), searchResult.Total)
 
 	if timestampField, ok := searchResult.Hits[0].Fields["timestamp"]; ok {
-		if timestampStr, ok := timestampField.(string); ok {
+		logger.Infof("BleveStatsService.GetTimeRangeFromBleve: timestamp field exists, type=%T, value=%v", timestampField, timestampField)
+		if timestampFloat, ok := timestampField.(float64); ok {
+			start = time.Unix(int64(timestampFloat), 0)
+			logger.Infof("BleveStatsService.GetTimeRangeFromBleve: Parsed start time from float64: %v", start)
+		} else if timestampStr, ok := timestampField.(string); ok {
+			// Fallback for old RFC3339 format (backward compatibility)
 			if t, err := time.Parse(time.RFC3339, timestampStr); err == nil {
 				start = t
+				logger.Infof("BleveStatsService.GetTimeRangeFromBleve: Parsed start time from string: %v", start)
+			} else {
+				logger.Errorf("BleveStatsService.GetTimeRangeFromBleve: Failed to parse RFC3339 string: %v, error: %v", timestampStr, err)
 			}
+		} else {
+			logger.Errorf("BleveStatsService.GetTimeRangeFromBleve: timestamp field has unexpected type: %T", timestampField)
+		}
+	} else {
+		logger.Error("BleveStatsService.GetTimeRangeFromBleve: timestamp field not found in search result")
+		// Let's see what fields are actually available
+		for key, value := range searchResult.Hits[0].Fields {
+			logger.Infof("BleveStatsService.GetTimeRangeFromBleve: Available field: %s = %v (type: %T)", key, value, value)
 		}
 	}
 
@@ -170,7 +198,10 @@ func (s *BleveStatsService) GetTimeRangeFromBleve(logPath string) (start, end ti
 	}
 
 	if timestampField, ok := searchResult.Hits[0].Fields["timestamp"]; ok {
-		if timestampStr, ok := timestampField.(string); ok {
+		if timestampFloat, ok := timestampField.(float64); ok {
+			end = time.Unix(int64(timestampFloat), 0)
+		} else if timestampStr, ok := timestampField.(string); ok {
+			// Fallback for old RFC3339 format (backward compatibility)
 			if t, err := time.Parse(time.RFC3339, timestampStr); err == nil {
 				end = t
 			}

+ 3 - 5
internal/nginx_log/dashboard_types.go

@@ -1,12 +1,10 @@
 package nginx_log
 
-import "time"
-
 // DashboardQueryRequest represents a request for dashboard analytics
 type DashboardQueryRequest struct {
-	StartTime time.Time `json:"start_time"`
-	EndTime   time.Time `json:"end_time"`
-	LogPath   string    `json:"log_path,omitempty"`
+	StartTime int64  `json:"start_time"` // Unix timestamp
+	EndTime   int64  `json:"end_time"`   // Unix timestamp
+	LogPath   string `json:"log_path,omitempty"`
 }
 
 // DashboardAnalytics represents comprehensive dashboard analytics data

+ 5 - 6
internal/nginx_log/indexer_file_batch.go

@@ -2,13 +2,12 @@ package nginx_log
 
 import (
 	"fmt"
-	"time"
 
 	"github.com/blevesearch/bleve/v2"
 )
 
 // processBatchStreaming processes a batch of lines using parallel parsing
-func (li *LogIndexer) processBatchStreaming(lines []string, filePath string, mainLogPath string, startPosition int64, batch **bleve.Batch, entryCount *int, newTimeStart, newTimeEnd **time.Time) error {
+func (li *LogIndexer) processBatchStreaming(lines []string, filePath string, mainLogPath string, startPosition int64, batch **bleve.Batch, entryCount *int, newTimeStart, newTimeEnd *int64) error {
 	if len(lines) == 0 {
 		return nil
 	}
@@ -23,11 +22,11 @@ func (li *LogIndexer) processBatchStreaming(lines []string, filePath string, mai
 	// Index entries
 	for i, entry := range entries {
 		// Track time range for new entries
-		if *newTimeStart == nil || entry.Timestamp.Before(**newTimeStart) {
-			*newTimeStart = &entry.Timestamp
+		if *newTimeStart == 0 || entry.Timestamp < *newTimeStart {
+			*newTimeStart = entry.Timestamp
 		}
-		if *newTimeEnd == nil || entry.Timestamp.After(**newTimeEnd) {
-			*newTimeEnd = &entry.Timestamp
+		if *newTimeEnd == 0 || entry.Timestamp > *newTimeEnd {
+			*newTimeEnd = entry.Timestamp
 		}
 
 		// Create indexed entry with unique ID

+ 2 - 3
internal/nginx_log/indexer_file_management.go

@@ -5,7 +5,6 @@ import (
 	"path/filepath"
 	"strings"
 	"sync"
-	"time"
 
 	"github.com/fsnotify/fsnotify"
 	"github.com/uozi-tech/cosy/logger"
@@ -34,8 +33,8 @@ func (li *LogIndexer) AddLogPath(logPath string) error {
 		// Add new log path with zero time to trigger initial indexing check
 		li.logPaths[logPath] = &LogFileInfo{
 			Path:         logPath,
-			LastModified: time.Time{}, // Will trigger indexing check on first scan
-			LastSize:     0,           // Will trigger indexing check on first scan
+			LastModified: 0, // Will trigger indexing check on first scan
+			LastSize:     0, // Will trigger indexing check on first scan
 			IsCompressed: isCompressed,
 		}
 		logger.Infof("Added new log path %s (compressed=%v)", logPath, isCompressed)

+ 27 - 14
internal/nginx_log/indexer_file_streaming.go

@@ -123,7 +123,7 @@ func (li *LogIndexer) indexFileFromPositionStreamingWithMainLogPath(filePath, ma
 	lineCount := 0
 	entryCount := 0
 	batch := li.index.NewBatch()
-	var newTimeStart, newTimeEnd *time.Time
+	var newTimeStart, newTimeEnd int64
 
 	logger.Infof("Starting index for file %s -> %s (size: %d bytes)", filePath, mainLogPath, fileInfo.Size())
 
@@ -197,7 +197,16 @@ func (li *LogIndexer) indexFileFromPositionStreamingWithMainLogPath(filePath, ma
 	}
 
 	// Update persistence with final status
-	logIndex.UpdateProgress(fileInfo.ModTime(), fileInfo.Size(), currentPosition, uint64(entryCount), newTimeStart, newTimeEnd)
+	var newTimeStartPtr, newTimeEndPtr *time.Time
+	if newTimeStart != 0 {
+		t := time.Unix(newTimeStart, 0)
+		newTimeStartPtr = &t
+	}
+	if newTimeEnd != 0 {
+		t := time.Unix(newTimeEnd, 0)
+		newTimeEndPtr = &t
+	}
+	logIndex.UpdateProgress(fileInfo.ModTime(), fileInfo.Size(), currentPosition, uint64(entryCount), newTimeStartPtr, newTimeEndPtr)
 	logIndex.SetIndexDuration(startTime)
 
 	// Save the updated log index
@@ -229,7 +238,7 @@ func (li *LogIndexer) indexFileFromPositionStreaming(filePath string, startPosit
 	lineCount := 0
 	entryCount := 0
 	batch := li.index.NewBatch()
-	var newTimeStart, newTimeEnd *time.Time
+	var newTimeStart, newTimeEnd int64
 
 	// Get main log path first (for statistics grouping)
 	mainLogPath := li.getMainLogPath(filePath)
@@ -355,21 +364,25 @@ func (li *LogIndexer) indexFileFromPositionStreaming(filePath string, startPosit
 	var timeRangeStart, timeRangeEnd *time.Time
 	if logIndex.TimeRangeStart != nil {
 		timeRangeStart = logIndex.TimeRangeStart
-	} else {
-		timeRangeStart = newTimeStart
+	} else if newTimeStart != 0 {
+		t := time.Unix(newTimeStart, 0)
+		timeRangeStart = &t
 	}
 	if logIndex.TimeRangeEnd != nil {
 		timeRangeEnd = logIndex.TimeRangeEnd
-	} else {
-		timeRangeEnd = newTimeEnd
+	} else if newTimeEnd != 0 {
+		t := time.Unix(newTimeEnd, 0)
+		timeRangeEnd = &t
 	}
 
 	// Expand time range if needed
-	if newTimeStart != nil && (timeRangeStart == nil || newTimeStart.Before(*timeRangeStart)) {
-		timeRangeStart = newTimeStart
+	if newTimeStart != 0 && (timeRangeStart == nil || time.Unix(newTimeStart, 0).Before(*timeRangeStart)) {
+		t := time.Unix(newTimeStart, 0)
+		timeRangeStart = &t
 	}
-	if newTimeEnd != nil && (timeRangeEnd == nil || newTimeEnd.After(*timeRangeEnd)) {
-		timeRangeEnd = newTimeEnd
+	if newTimeEnd != 0 && (timeRangeEnd == nil || time.Unix(newTimeEnd, 0).After(*timeRangeEnd)) {
+		t := time.Unix(newTimeEnd, 0)
+		timeRangeEnd = &t
 	}
 
 	// Calculate total index size of related log files for this log group
@@ -387,11 +400,11 @@ func (li *LogIndexer) indexFileFromPositionStreaming(filePath string, startPosit
 	// Update in-memory file info for compatibility
 	li.mu.Lock()
 	if fileInfo, exists := li.logPaths[filePath]; exists {
-		fileInfo.LastModified = logIndex.LastModified
+		fileInfo.LastModified = logIndex.LastModified.Unix()
 		fileInfo.LastSize = logIndex.LastSize
-		fileInfo.LastIndexed = logIndex.LastIndexed
+		fileInfo.LastIndexed = logIndex.LastIndexed.Unix()
 		if timeRangeStart != nil && timeRangeEnd != nil {
-			fileInfo.TimeRange = &TimeRange{Start: *timeRangeStart, End: *timeRangeEnd}
+			fileInfo.TimeRange = &TimeRange{Start: timeRangeStart.Unix(), End: timeRangeEnd.Unix()}
 		}
 	}
 	li.mu.Unlock()

+ 19 - 16
internal/nginx_log/indexer_file_utils.go

@@ -77,30 +77,33 @@ func (li *LogIndexer) RepairFileMetadata() error {
 		var timeRange *TimeRange
 		for _, hit := range searchResult.Hits {
 			if timestampField, ok := hit.Fields["timestamp"]; ok {
-				if timestampStr, ok := timestampField.(string); ok {
-					timestamp, err := time.Parse(time.RFC3339, timestampStr)
-					if err != nil {
-						continue
-					}
+				var timestamp int64
+				switch v := timestampField.(type) {
+				case float64:
+					timestamp = int64(v)
+				case int64:
+					timestamp = v
+				default:
+					continue
+				}
 
-					if timeRange == nil {
-						timeRange = &TimeRange{Start: timestamp, End: timestamp}
-					} else {
-						if timestamp.Before(timeRange.Start) {
-							timeRange.Start = timestamp
-						}
-						if timestamp.After(timeRange.End) {
-							timeRange.End = timestamp
-						}
+				if timeRange == nil {
+					timeRange = &TimeRange{Start: timestamp, End: timestamp}
+				} else {
+					if timestamp < timeRange.Start {
+						timeRange.Start = timestamp
+					}
+					if timestamp > timeRange.End {
+						timeRange.End = timestamp
 					}
 				}
 			}
 		}
 
 		// Update file info
-		fileInfo.LastModified = currentInfo.ModTime()
+		fileInfo.LastModified = currentInfo.ModTime().Unix()
 		fileInfo.LastSize = currentInfo.Size()
-		fileInfo.LastIndexed = time.Now()
+		fileInfo.LastIndexed = time.Now().Unix()
 		fileInfo.TimeRange = timeRange
 
 		if timeRange != nil {

+ 17 - 16
internal/nginx_log/indexer_search.go

@@ -32,7 +32,7 @@ func (li *LogIndexer) SearchLogs(ctx context.Context, req *QueryRequest) (*Query
 		return &QueryResult{
 			Entries: cached.Entries,
 			Total:   cached.Total,
-			Took:    time.Since(start),
+			Took:    time.Since(start).Milliseconds(),
 			Summary: summaryStats,
 		}, nil
 	}
@@ -119,7 +119,7 @@ func (li *LogIndexer) SearchLogs(ctx context.Context, req *QueryRequest) (*Query
 	result := &QueryResult{
 		Entries: entries,
 		Total:   int(searchResult.Total),
-		Took:    time.Since(start),
+		Took:    time.Since(start).Milliseconds(),
 		Summary: summaryStats,
 	}
 
@@ -131,18 +131,21 @@ func (li *LogIndexer) buildSearchQuery(req *QueryRequest) query.Query {
 	var queries []query.Query
 
 	// Time range query - only add if we have meaningful time constraints
-	if !req.StartTime.IsZero() && !req.EndTime.IsZero() {
+	if req.StartTime != 0 && req.EndTime != 0 {
 		// Check if the time range is reasonable (not too wide)
-		if req.EndTime.Sub(req.StartTime) < 400*24*time.Hour { // Less than ~400 days
-			// Add 1 millisecond to endTime to ensure boundary values are included
+		duration := req.EndTime - req.StartTime
+		if duration < 400*24*3600 { // Less than ~400 days in seconds
+			// Add 1 second to endTime to ensure boundary values are included
 			// This fixes the issue where records with exact endTime are excluded due to exclusive upper bound
-			inclusiveEndTime := req.EndTime.Add(1 * time.Millisecond)
-			logger.Infof("Using time range filter: %s to %s (inclusive)", req.StartTime.Format(time.RFC3339), inclusiveEndTime.Format(time.RFC3339))
-			timeQuery := bleve.NewDateRangeQuery(req.StartTime, inclusiveEndTime)
+			inclusiveEndTime := req.EndTime + 1
+			logger.Infof("Using time range filter: %d to %d (inclusive)", req.StartTime, inclusiveEndTime)
+			startFloat := float64(req.StartTime)
+			endFloat := float64(inclusiveEndTime)
+			timeQuery := bleve.NewNumericRangeQuery(&startFloat, &endFloat)
 			timeQuery.SetField("timestamp")
 			queries = append(queries, timeQuery)
 		} else {
-			logger.Infof("Time range too wide (%v), ignoring time filter to search all data", req.EndTime.Sub(req.StartTime))
+			logger.Infof("Time range too wide (%d seconds), ignoring time filter to search all data", duration)
 		}
 	} else {
 		logger.Infof("No meaningful time range specified, searching all data")
@@ -345,10 +348,8 @@ func (li *LogIndexer) convertHitToEntry(hit interface{}) *AccessLogEntry {
 			entry.RequestTime = li.getFloatField(fields, "request_time")
 
 			// Handle timestamp
-			if timestampStr := li.getStringField(fields, "timestamp"); timestampStr != "" {
-				if ts, err := time.Parse(time.RFC3339, timestampStr); err == nil {
-					entry.Timestamp = ts
-				}
+			if timestampField := li.getFloatField(fields, "timestamp"); timestampField != 0 {
+				entry.Timestamp = int64(timestampField)
 			}
 
 		} else {
@@ -371,9 +372,9 @@ func (li *LogIndexer) createCacheKey(req *QueryRequest) string {
 		statusStr = fmt.Sprintf("%v", req.Status)
 	}
 
-	return fmt.Sprintf("search_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%d_%d_%s_%s",
-		req.StartTime.Format("20060102150405"),
-		req.EndTime.Format("20060102150405"),
+	return fmt.Sprintf("search_%d_%d_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%s_%d_%d_%s_%s",
+		req.StartTime,
+		req.EndTime,
 		req.Query,
 		req.IP,
 		req.Method,

+ 7 - 6
internal/nginx_log/indexer_stats.go

@@ -25,8 +25,9 @@ func (li *LogIndexer) getLatestFilesModTime() time.Time {
 
 	var latest time.Time
 	for _, fileInfo := range li.logPaths {
-		if fileInfo.LastModified.After(latest) {
-			latest = fileInfo.LastModified
+		modTime := time.Unix(fileInfo.LastModified, 0)
+		if modTime.After(latest) {
+			latest = modTime
 		}
 	}
 	return latest
@@ -49,8 +50,8 @@ func (li *LogIndexer) isCacheValid(cached *CachedStatsResult) bool {
 	// 2. No files have been modified since cache was created
 	// 3. Cache is not older than 5 minutes (safety fallback)
 	isValid := cached.DocCount == docCount &&
-		!latestModTime.After(cached.FilesModTime) &&
-		time.Since(cached.LastCalculated) < 5*time.Minute
+		!latestModTime.After(time.Unix(cached.FilesModTime, 0)) &&
+		time.Since(time.Unix(cached.LastCalculated, 0)) < 5*time.Minute
 
 
 	return isValid
@@ -164,8 +165,8 @@ func (li *LogIndexer) calculateSummaryStatsFromQuery(ctx context.Context, query
 	cachedResult := &CachedStatsResult{
 		Stats:          stats,
 		QueryHash:      cacheKey,
-		LastCalculated: time.Now(),
-		FilesModTime:   li.getLatestFilesModTime(),
+		LastCalculated: time.Now().Unix(),
+		FilesModTime:   li.getLatestFilesModTime().Unix(),
 		DocCount:       docCount,
 	}
 

+ 48 - 49
internal/nginx_log/indexer_types.go

@@ -2,23 +2,22 @@ package nginx_log
 
 import (
 	"sync"
-	"time"
 )
 
 // LogFileInfo holds metadata about a log file
 type LogFileInfo struct {
 	Path         string
-	LastModified time.Time
+	LastModified int64 // Unix timestamp
 	LastSize     int64
-	LastIndexed  time.Time
+	LastIndexed  int64 // Unix timestamp
 	IsCompressed bool
 	TimeRange    *TimeRange
 }
 
 // TimeRange represents a time range for log entries
 type TimeRange struct {
-	Start time.Time
-	End   time.Time
+	Start int64 // Unix timestamp
+	End   int64 // Unix timestamp
 }
 
 // CachedSearchResult represents a cached search result with total count
@@ -31,8 +30,8 @@ type CachedSearchResult struct {
 type CachedStatsResult struct {
 	Stats          *SummaryStats `json:"stats"`
 	QueryHash      string        `json:"query_hash"`      // Hash of the query parameters
-	LastCalculated time.Time     `json:"last_calculated"` // When stats were calculated
-	FilesModTime   time.Time     `json:"files_mod_time"`  // Latest modification time of all log files
+	LastCalculated int64         `json:"last_calculated"` // Unix timestamp when stats were calculated
+	FilesModTime   int64         `json:"files_mod_time"`  // Unix timestamp of latest modification time of all log files
 	DocCount       uint64        `json:"doc_count"`       // Document count when stats were calculated
 }
 
@@ -46,51 +45,51 @@ type IndexTask struct {
 
 // IndexedLogEntry represents a log entry stored in the index
 type IndexedLogEntry struct {
-	ID           string    `json:"id"`
-	FilePath     string    `json:"file_path"`
-	Timestamp    time.Time `json:"timestamp"`
-	IP           string    `json:"ip"`
-	RegionCode   string    `json:"region_code"`
-	Province     string    `json:"province"`
-	City         string    `json:"city"`
-	ISP          string    `json:"isp"`
-	Method       string    `json:"method"`
-	Path         string    `json:"path"`
-	Protocol     string    `json:"protocol"`
-	Status       int       `json:"status"`
-	BytesSent    int64     `json:"bytes_sent"`
-	Referer      string    `json:"referer"`
-	UserAgent    string    `json:"user_agent"`
-	Browser      string    `json:"browser"`
-	BrowserVer   string    `json:"browser_version"`
-	OS           string    `json:"os"`
-	OSVersion    string    `json:"os_version"`
-	DeviceType   string    `json:"device_type"`
-	RequestTime  float64   `json:"request_time"`
-	UpstreamTime *float64  `json:"upstream_time,omitempty"`
-	Raw          string    `json:"raw"`
+	ID           string   `json:"id"`
+	FilePath     string   `json:"file_path"`
+	Timestamp    int64    `json:"timestamp"` // Unix timestamp
+	IP           string   `json:"ip"`
+	RegionCode   string   `json:"region_code"`
+	Province     string   `json:"province"`
+	City         string   `json:"city"`
+	ISP          string   `json:"isp"`
+	Method       string   `json:"method"`
+	Path         string   `json:"path"`
+	Protocol     string   `json:"protocol"`
+	Status       int      `json:"status"`
+	BytesSent    int64    `json:"bytes_sent"`
+	Referer      string   `json:"referer"`
+	UserAgent    string   `json:"user_agent"`
+	Browser      string   `json:"browser"`
+	BrowserVer   string   `json:"browser_version"`
+	OS           string   `json:"os"`
+	OSVersion    string   `json:"os_version"`
+	DeviceType   string   `json:"device_type"`
+	RequestTime  float64  `json:"request_time"`
+	UpstreamTime *float64 `json:"upstream_time,omitempty"`
+	Raw          string   `json:"raw"`
 }
 
 // QueryRequest represents a search query for logs
 type QueryRequest struct {
-	StartTime      time.Time `json:"start_time"`
-	EndTime        time.Time `json:"end_time"`
-	Query          string    `json:"query,omitempty"`
-	IP             string    `json:"ip,omitempty"`
-	Method         string    `json:"method,omitempty"`
-	Status         []int     `json:"status,omitempty"`
-	Path           string    `json:"path,omitempty"`
-	UserAgent      string    `json:"user_agent,omitempty"`
-	Referer        string    `json:"referer,omitempty"`
-	Browser        string    `json:"browser,omitempty"`
-	OS             string    `json:"os,omitempty"`
-	Device         string    `json:"device,omitempty"`
-	Limit          int       `json:"limit"`
-	Offset         int       `json:"offset"`
-	SortBy         string    `json:"sort_by"`
-	SortOrder      string    `json:"sort_order"`
-	LogPath        string    `json:"log_path,omitempty"`
-	IncludeSummary bool      `json:"include_summary,omitempty"`
+	StartTime      int64  `json:"start_time"` // Unix timestamp
+	EndTime        int64  `json:"end_time"`   // Unix timestamp
+	Query          string `json:"query,omitempty"`
+	IP             string `json:"ip,omitempty"`
+	Method         string `json:"method,omitempty"`
+	Status         []int  `json:"status,omitempty"`
+	Path           string `json:"path,omitempty"`
+	UserAgent      string `json:"user_agent,omitempty"`
+	Referer        string `json:"referer,omitempty"`
+	Browser        string `json:"browser,omitempty"`
+	OS             string `json:"os,omitempty"`
+	Device         string `json:"device,omitempty"`
+	Limit          int    `json:"limit"`
+	Offset         int    `json:"offset"`
+	SortBy         string `json:"sort_by"`
+	SortOrder      string `json:"sort_order"`
+	LogPath        string `json:"log_path,omitempty"`
+	IncludeSummary bool   `json:"include_summary,omitempty"`
 }
 
 // SummaryStats represents the summary statistics for log entries
@@ -106,7 +105,7 @@ type SummaryStats struct {
 type QueryResult struct {
 	Entries      []*AccessLogEntry `json:"entries"`
 	Total        int               `json:"total"`
-	Took         time.Duration     `json:"took"`
+	Took         int64             `json:"took"` // Duration in milliseconds
 	Aggregations map[string]int    `json:"aggregations,omitempty"`
 	Summary      *SummaryStats     `json:"summary,omitempty"`
 	FromCache    bool              `json:"from_cache,omitempty"`

+ 45 - 33
internal/nginx_log/log_cache_grouping.go

@@ -105,24 +105,24 @@ func GetAllLogsWithIndexGrouped(filters ...func(*NginxLogWithIndex) bool) []*Ngi
 			
 			// Use persistence data
 			if !persistenceIndex.LastModified.IsZero() {
-				log.LastModified = &persistenceIndex.LastModified
+				log.LastModified = persistenceIndex.LastModified.Unix()
 			}
 			log.LastSize = persistenceIndex.LastSize
 			if !persistenceIndex.LastIndexed.IsZero() {
-				log.LastIndexed = &persistenceIndex.LastIndexed
+				log.LastIndexed = persistenceIndex.LastIndexed.Unix()
 			}
 			if persistenceIndex.IndexStartTime != nil {
-				log.IndexStartTime = persistenceIndex.IndexStartTime
+				log.IndexStartTime = persistenceIndex.IndexStartTime.Unix()
 			}
 			if persistenceIndex.IndexDuration != nil {
-				log.IndexDuration = persistenceIndex.IndexDuration
+				log.IndexDuration = *persistenceIndex.IndexDuration
 			}
 			if persistenceIndex.TimeRangeStart != nil {
-				log.TimeRangeStart = persistenceIndex.TimeRangeStart
+				log.TimeRangeStart = persistenceIndex.TimeRangeStart.Unix()
 				log.HasTimeRange = true
 			}
 			if persistenceIndex.TimeRangeEnd != nil {
-				log.TimeRangeEnd = persistenceIndex.TimeRangeEnd
+				log.TimeRangeEnd = persistenceIndex.TimeRangeEnd.Unix()
 				log.HasTimeRange = true
 			}
 			log.DocumentCount = persistenceIndex.DocumentCount
@@ -131,20 +131,20 @@ func GetAllLogsWithIndexGrouped(filters ...func(*NginxLogWithIndex) bool) []*Ngi
 			if log.IndexStatus != IndexStatusIndexing {
 				log.IndexStatus = IndexStatusIndexed
 			}
-			if !fileStatus.LastModified.IsZero() {
-				log.LastModified = &fileStatus.LastModified
+			if fileStatus.LastModified != 0 {
+				log.LastModified = fileStatus.LastModified
 			}
 			log.LastSize = fileStatus.LastSize
-			if !fileStatus.LastIndexed.IsZero() {
-				log.LastIndexed = &fileStatus.LastIndexed
+			if fileStatus.LastIndexed != 0 {
+				log.LastIndexed = fileStatus.LastIndexed
 			}
 			log.IsCompressed = fileStatus.IsCompressed
 			log.HasTimeRange = fileStatus.HasTimeRange
-			if !fileStatus.TimeRangeStart.IsZero() {
-				log.TimeRangeStart = &fileStatus.TimeRangeStart
+			if fileStatus.TimeRangeStart != 0 {
+				log.TimeRangeStart = fileStatus.TimeRangeStart
 			}
-			if !fileStatus.TimeRangeEnd.IsZero() {
-				log.TimeRangeEnd = &fileStatus.TimeRangeEnd
+			if fileStatus.TimeRangeEnd != 0 {
+				log.TimeRangeEnd = fileStatus.TimeRangeEnd
 			}
 		}
 	}
@@ -287,50 +287,62 @@ func aggregateLogGroupStats(aggregatedLog *NginxLogWithIndex, group []*NginxLogW
 		}
 		
 		// Find the most recent indexed time
-		if log.LastIndexed != nil {
-			if mostRecentIndexed == nil || log.LastIndexed.After(*mostRecentIndexed) {
-				mostRecentIndexed = log.LastIndexed
+		if log.LastIndexed != 0 {
+			indexedTime := time.Unix(log.LastIndexed, 0)
+			if mostRecentIndexed == nil || indexedTime.After(*mostRecentIndexed) {
+				mostRecentIndexed = &indexedTime
 			}
 		}
 		
 		// Aggregate time ranges
-		if log.TimeRangeStart != nil {
-			if earliestTimeStart == nil || log.TimeRangeStart.Before(*earliestTimeStart) {
-				earliestTimeStart = log.TimeRangeStart
+		if log.TimeRangeStart != 0 {
+			startTime := time.Unix(log.TimeRangeStart, 0)
+			if earliestTimeStart == nil || startTime.Before(*earliestTimeStart) {
+				earliestTimeStart = &startTime
 			}
 		}
 		
-		if log.TimeRangeEnd != nil {
-			if latestTimeEnd == nil || log.TimeRangeEnd.After(*latestTimeEnd) {
-				latestTimeEnd = log.TimeRangeEnd
+		if log.TimeRangeEnd != 0 {
+			endTime := time.Unix(log.TimeRangeEnd, 0)
+			if latestTimeEnd == nil || endTime.After(*latestTimeEnd) {
+				latestTimeEnd = &endTime
 			}
 		}
 		
 		// Use properties from the most recent file
-		if log.LastModified != nil && (aggregatedLog.LastModified == nil || log.LastModified.After(*aggregatedLog.LastModified)) {
+		if log.LastModified != 0 && (aggregatedLog.LastModified == 0 || log.LastModified > aggregatedLog.LastModified) {
 			aggregatedLog.LastModified = log.LastModified
 		}
 		
 		// Find the EARLIEST IndexStartTime for the log group (when the group indexing started)
-		if log.IndexStartTime != nil && (earliestIndexStartTime == nil || log.IndexStartTime.Before(*earliestIndexStartTime)) {
-			earliestIndexStartTime = log.IndexStartTime
+		if log.IndexStartTime != 0 {
+			startTime := time.Unix(log.IndexStartTime, 0)
+			if earliestIndexStartTime == nil || startTime.Before(*earliestIndexStartTime) {
+				earliestIndexStartTime = &startTime
+			}
 		}
 		
 		// Sum up individual file durations to get total group duration
-		if log.IndexDuration != nil {
+		if log.IndexDuration != 0 {
 			if totalIndexDuration == nil {
 				totalIndexDuration = new(int64)
 			}
-			*totalIndexDuration += *log.IndexDuration
+			*totalIndexDuration += log.IndexDuration
 		}
 	}
 	
 	// Set aggregated values
-	aggregatedLog.IndexStartTime = earliestIndexStartTime
+	if earliestIndexStartTime != nil {
+		aggregatedLog.IndexStartTime = earliestIndexStartTime.Unix()
+	}
 	aggregatedLog.LastSize = totalSize
 	aggregatedLog.DocumentCount = totalDocuments
-	aggregatedLog.LastIndexed = mostRecentIndexed
-	aggregatedLog.IndexDuration = totalIndexDuration  // Sum of all individual file durations
+	if mostRecentIndexed != nil {
+		aggregatedLog.LastIndexed = mostRecentIndexed.Unix()
+	}
+	if totalIndexDuration != nil {
+		aggregatedLog.IndexDuration = *totalIndexDuration
+	}
 	
 	// Set index status based on group status
 	if indexingInProgress {
@@ -343,8 +355,8 @@ func aggregateLogGroupStats(aggregatedLog *NginxLogWithIndex, group []*NginxLogW
 	
 	// Set time range
 	if earliestTimeStart != nil && latestTimeEnd != nil {
-		aggregatedLog.TimeRangeStart = earliestTimeStart
-		aggregatedLog.TimeRangeEnd = latestTimeEnd
+		aggregatedLog.TimeRangeStart = earliestTimeStart.Unix()
+		aggregatedLog.TimeRangeEnd = latestTimeEnd.Unix()
 		aggregatedLog.HasTimeRange = true
 	}
 }

+ 14 - 14
internal/nginx_log/log_cache_index.go

@@ -73,24 +73,24 @@ func GetAllLogsWithIndex(filters ...func(*NginxLogWithIndex) bool) []*NginxLogWi
 			
 			// Use persistence data
 			if !persistenceIndex.LastModified.IsZero() {
-				logWithIndex.LastModified = &persistenceIndex.LastModified
+				logWithIndex.LastModified = persistenceIndex.LastModified.Unix()
 			}
 			logWithIndex.LastSize = persistenceIndex.LastSize
 			if !persistenceIndex.LastIndexed.IsZero() {
-				logWithIndex.LastIndexed = &persistenceIndex.LastIndexed
+				logWithIndex.LastIndexed = persistenceIndex.LastIndexed.Unix()
 			}
 			if persistenceIndex.IndexStartTime != nil {
-				logWithIndex.IndexStartTime = persistenceIndex.IndexStartTime
+				logWithIndex.IndexStartTime = persistenceIndex.IndexStartTime.Unix()
 			}
 			if persistenceIndex.IndexDuration != nil {
-				logWithIndex.IndexDuration = persistenceIndex.IndexDuration
+				logWithIndex.IndexDuration = *persistenceIndex.IndexDuration
 			}
 			if persistenceIndex.TimeRangeStart != nil {
-				logWithIndex.TimeRangeStart = persistenceIndex.TimeRangeStart
+				logWithIndex.TimeRangeStart = persistenceIndex.TimeRangeStart.Unix()
 				logWithIndex.HasTimeRange = true
 			}
 			if persistenceIndex.TimeRangeEnd != nil {
-				logWithIndex.TimeRangeEnd = persistenceIndex.TimeRangeEnd
+				logWithIndex.TimeRangeEnd = persistenceIndex.TimeRangeEnd.Unix()
 				logWithIndex.HasTimeRange = true
 			}
 			logWithIndex.DocumentCount = persistenceIndex.DocumentCount
@@ -99,20 +99,20 @@ func GetAllLogsWithIndex(filters ...func(*NginxLogWithIndex) bool) []*NginxLogWi
 			if logWithIndex.IndexStatus != IndexStatusIndexing {
 				logWithIndex.IndexStatus = IndexStatusIndexed
 			}
-			if !fileStatus.LastModified.IsZero() {
-				logWithIndex.LastModified = &fileStatus.LastModified
+			if fileStatus.LastModified != 0 {
+				logWithIndex.LastModified = fileStatus.LastModified
 			}
 			logWithIndex.LastSize = fileStatus.LastSize
-			if !fileStatus.LastIndexed.IsZero() {
-				logWithIndex.LastIndexed = &fileStatus.LastIndexed
+			if fileStatus.LastIndexed != 0 {
+				logWithIndex.LastIndexed = fileStatus.LastIndexed
 			}
 			logWithIndex.IsCompressed = fileStatus.IsCompressed
 			logWithIndex.HasTimeRange = fileStatus.HasTimeRange
-			if !fileStatus.TimeRangeStart.IsZero() {
-				logWithIndex.TimeRangeStart = &fileStatus.TimeRangeStart
+			if fileStatus.TimeRangeStart != 0 {
+				logWithIndex.TimeRangeStart = fileStatus.TimeRangeStart
 			}
-			if !fileStatus.TimeRangeEnd.IsZero() {
-				logWithIndex.TimeRangeEnd = &fileStatus.TimeRangeEnd
+			if fileStatus.TimeRangeEnd != 0 {
+				logWithIndex.TimeRangeEnd = fileStatus.TimeRangeEnd
 			}
 		}
 

+ 16 - 18
internal/nginx_log/log_cache_types.go

@@ -1,8 +1,6 @@
 package nginx_log
 
-import (
-	"time"
-)
+import ()
 
 // IndexStatus constants
 const (
@@ -21,19 +19,19 @@ type NginxLogCache struct {
 
 // NginxLogWithIndex represents a log file with its index status information
 type NginxLogWithIndex struct {
-	Path           string     `json:"path"`                      // Path to the log file
-	Type           string     `json:"type"`                      // Type of log: "access" or "error"
-	Name           string     `json:"name"`                      // Name of the log file
-	ConfigFile     string     `json:"config_file"`               // Path to the configuration file
-	IndexStatus    string     `json:"index_status"`              // Index status: indexed, indexing, not_indexed
-	LastModified   *time.Time `json:"last_modified,omitempty"`   // Last modification time of the file
-	LastSize       int64      `json:"last_size,omitempty"`       // Last known size of the file
-	LastIndexed    *time.Time `json:"last_indexed,omitempty"`    // When the file was last indexed
-	IndexStartTime *time.Time `json:"index_start_time,omitempty"` // When the last indexing operation started
-	IndexDuration  *int64     `json:"index_duration,omitempty"`  // Duration of last indexing operation in milliseconds
-	IsCompressed   bool       `json:"is_compressed"`             // Whether the file is compressed
-	HasTimeRange   bool       `json:"has_timerange"`             // Whether time range is available
-	TimeRangeStart *time.Time `json:"timerange_start,omitempty"` // Start of time range in the log
-	TimeRangeEnd   *time.Time `json:"timerange_end,omitempty"`   // End of time range in the log
-	DocumentCount  uint64     `json:"document_count,omitempty"`  // Number of indexed documents from this file
+	Path           string `json:"path"`                      // Path to the log file
+	Type           string `json:"type"`                      // Type of log: "access" or "error"
+	Name           string `json:"name"`                      // Name of the log file
+	ConfigFile     string `json:"config_file"`               // Path to the configuration file
+	IndexStatus    string `json:"index_status"`              // Index status: indexed, indexing, not_indexed
+	LastModified   int64  `json:"last_modified,omitempty"`   // Unix timestamp of last modification time
+	LastSize       int64  `json:"last_size,omitempty"`       // Last known size of the file
+	LastIndexed    int64  `json:"last_indexed,omitempty"`    // Unix timestamp when the file was last indexed
+	IndexStartTime int64  `json:"index_start_time,omitempty"` // Unix timestamp when the last indexing operation started
+	IndexDuration  int64  `json:"index_duration,omitempty"`  // Duration of last indexing operation in milliseconds
+	IsCompressed   bool   `json:"is_compressed"`             // Whether the file is compressed
+	HasTimeRange   bool   `json:"has_timerange"`             // Whether time range is available
+	TimeRangeStart int64  `json:"timerange_start,omitempty"` // Unix timestamp of start of time range in the log
+	TimeRangeEnd   int64  `json:"timerange_end,omitempty"`   // Unix timestamp of end of time range in the log
+	DocumentCount  uint64 `json:"document_count,omitempty"`  // Number of indexed documents from this file
 }

+ 20 - 21
internal/nginx_log/log_formats.go

@@ -2,31 +2,30 @@ package nginx_log
 
 import (
 	"regexp"
-	"time"
 )
 
 // AccessLogEntry represents a parsed access log entry
 type AccessLogEntry struct {
-	Timestamp    time.Time `json:"timestamp"`
-	IP           string    `json:"ip"`
-	RegionCode   string    `json:"region_code"`
-	Province     string    `json:"province"`
-	City         string    `json:"city"`
-	Method       string    `json:"method"`
-	Path         string    `json:"path"`
-	Protocol     string    `json:"protocol"`
-	Status       int       `json:"status"`
-	BytesSent    int64     `json:"bytes_sent"`
-	Referer      string    `json:"referer"`
-	UserAgent    string    `json:"user_agent"`
-	Browser      string    `json:"browser"`
-	BrowserVer   string    `json:"browser_version"`
-	OS           string    `json:"os"`
-	OSVersion    string    `json:"os_version"`
-	DeviceType   string    `json:"device_type"`
-	RequestTime  float64   `json:"request_time,omitempty"`
-	UpstreamTime *float64  `json:"upstream_time,omitempty"`
-	Raw          string    `json:"raw"`
+	Timestamp    int64    `json:"timestamp"` // Unix timestamp
+	IP           string   `json:"ip"`
+	RegionCode   string   `json:"region_code"`
+	Province     string   `json:"province"`
+	City         string   `json:"city"`
+	Method       string   `json:"method"`
+	Path         string   `json:"path"`
+	Protocol     string   `json:"protocol"`
+	Status       int      `json:"status"`
+	BytesSent    int64    `json:"bytes_sent"`
+	Referer      string   `json:"referer"`
+	UserAgent    string   `json:"user_agent"`
+	Browser      string   `json:"browser"`
+	BrowserVer   string   `json:"browser_version"`
+	OS           string   `json:"os"`
+	OSVersion    string   `json:"os_version"`
+	DeviceType   string   `json:"device_type"`
+	RequestTime  float64  `json:"request_time,omitempty"`
+	UpstreamTime *float64 `json:"upstream_time,omitempty"`
+	Raw          string   `json:"raw"`
 }
 
 // LogFormat represents different nginx log format patterns

+ 1 - 1
internal/nginx_log/log_indexer_core.go

@@ -196,7 +196,7 @@ func createIndexMapping() mapping.IndexMapping {
 
 	// Map fields to their types
 	logMapping.AddFieldMappingsAt("file_path", filePathFieldMapping) // Use keyword analyzer for exact matching
-	logMapping.AddFieldMappingsAt("timestamp", dateFieldMapping)
+	logMapping.AddFieldMappingsAt("timestamp", numericFieldMapping) // Use numeric mapping for Unix timestamps
 	logMapping.AddFieldMappingsAt("ip", textFieldMapping)
 	logMapping.AddFieldMappingsAt("location", textFieldMapping)
 	logMapping.AddFieldMappingsAt("region_code", textFieldMapping)

+ 2 - 2
internal/nginx_log/log_indexer_rebuild.go

@@ -71,9 +71,9 @@ func (li *LogIndexer) RebuildIndex() error {
 	// Reset file tracking
 	li.mu.Lock()
 	for path := range li.logPaths {
-		li.logPaths[path].LastModified = time.Time{}
+		li.logPaths[path].LastModified = 0
 		li.logPaths[path].LastSize = 0
-		li.logPaths[path].LastIndexed = time.Time{}
+		li.logPaths[path].LastIndexed = 0
 		li.logPaths[path].TimeRange = nil // Clear in-memory time range
 	}
 	li.mu.Unlock()

+ 16 - 4
internal/nginx_log/log_indexer_status.go

@@ -26,7 +26,10 @@ func (li *LogIndexer) GetTimeRange() (start, end time.Time) {
 	}
 
 	if searchResultMin.Total > 0 && len(searchResultMin.Hits) > 0 {
-		if tsVal, ok := searchResultMin.Hits[0].Fields["timestamp"].(string); ok {
+		if tsFloat, ok := searchResultMin.Hits[0].Fields["timestamp"].(float64); ok {
+			start = time.Unix(int64(tsFloat), 0)
+		} else if tsVal, ok := searchResultMin.Hits[0].Fields["timestamp"].(string); ok {
+			// Fallback for old RFC3339 format
 			start, _ = time.Parse(time.RFC3339, tsVal)
 		}
 	}
@@ -45,7 +48,10 @@ func (li *LogIndexer) GetTimeRange() (start, end time.Time) {
 	}
 
 	if searchResultMax.Total > 0 && len(searchResultMax.Hits) > 0 {
-		if tsVal, ok := searchResultMax.Hits[0].Fields["timestamp"].(string); ok {
+		if tsFloat, ok := searchResultMax.Hits[0].Fields["timestamp"].(float64); ok {
+			end = time.Unix(int64(tsFloat), 0)
+		} else if tsVal, ok := searchResultMax.Hits[0].Fields["timestamp"].(string); ok {
+			// Fallback for old RFC3339 format
 			end, _ = time.Parse(time.RFC3339, tsVal)
 		}
 	}
@@ -79,7 +85,10 @@ func (li *LogIndexer) GetTimeRangeForPath(logPath string) (start, end time.Time)
 	}
 
 	if searchResultMin.Total > 0 && len(searchResultMin.Hits) > 0 {
-		if tsVal, ok := searchResultMin.Hits[0].Fields["timestamp"].(string); ok {
+		if tsFloat, ok := searchResultMin.Hits[0].Fields["timestamp"].(float64); ok {
+			start = time.Unix(int64(tsFloat), 0)
+		} else if tsVal, ok := searchResultMin.Hits[0].Fields["timestamp"].(string); ok {
+			// Fallback for old RFC3339 format
 			start, _ = time.Parse(time.RFC3339, tsVal)
 		}
 	}
@@ -97,7 +106,10 @@ func (li *LogIndexer) GetTimeRangeForPath(logPath string) (start, end time.Time)
 	}
 
 	if searchResultMax.Total > 0 && len(searchResultMax.Hits) > 0 {
-		if tsVal, ok := searchResultMax.Hits[0].Fields["timestamp"].(string); ok {
+		if tsFloat, ok := searchResultMax.Hits[0].Fields["timestamp"].(float64); ok {
+			end = time.Unix(int64(tsFloat), 0)
+		} else if tsVal, ok := searchResultMax.Hits[0].Fields["timestamp"].(string); ok {
+			// Fallback for old RFC3339 format
 			end, _ = time.Parse(time.RFC3339, tsVal)
 		}
 	}

+ 6 - 6
internal/nginx_log/log_list.go

@@ -67,12 +67,12 @@ func sortCompareWithIndex(i, j *NginxLogWithIndex, key string, order string) boo
 		flag = iOrder < jOrder
 	case "last_indexed":
 		// Sort by last indexed time (more recent first)
-		if i.LastIndexed != nil && j.LastIndexed != nil {
-			flag = i.LastIndexed.After(*j.LastIndexed)
-		} else if i.LastIndexed == nil && j.LastIndexed != nil {
-			flag = true // nil comes after non-nil
-		} else if i.LastIndexed != nil && j.LastIndexed == nil {
-			flag = false // non-nil comes before nil
+		if i.LastIndexed != 0 && j.LastIndexed != 0 {
+			flag = i.LastIndexed > j.LastIndexed
+		} else if i.LastIndexed == 0 && j.LastIndexed != 0 {
+			flag = true // 0 comes after non-zero
+		} else if i.LastIndexed != 0 && j.LastIndexed == 0 {
+			flag = false // non-zero comes before 0
 		}
 	case "last_size":
 		// Sort by file size

+ 6 - 6
internal/nginx_log/log_parser_parse_test.go

@@ -31,7 +31,7 @@ func TestLogParser_ParseLine(t *testing.T) {
 			logLine: `192.168.1.1 - - [25/Dec/2023:10:00:00 +0000] "GET /api/test HTTP/1.1" 200 1024 "https://example.com" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" 0.123 0.050`,
 			expected: &AccessLogEntry{
 				IP:           "192.168.1.1",
-				Timestamp:    time.Date(2023, 12, 25, 10, 0, 0, 0, time.UTC),
+				Timestamp:    time.Date(2023, 12, 25, 10, 0, 0, 0, time.UTC).Unix(),
 				Method:       "GET",
 				Path:         "/api/test",
 				Protocol:     "HTTP/1.1",
@@ -54,7 +54,7 @@ func TestLogParser_ParseLine(t *testing.T) {
 			logLine: `10.0.0.1 - - [01/Jan/2023:12:00:00 +0000] "POST /submit HTTP/1.1" 201 512`,
 			expected: &AccessLogEntry{
 				IP:        "10.0.0.1",
-				Timestamp: time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC),
+				Timestamp: time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC).Unix(),
 				Method:    "POST",
 				Path:      "/submit",
 				Protocol:  "HTTP/1.1",
@@ -83,7 +83,7 @@ func TestLogParser_ParseLine(t *testing.T) {
 			logLine: `127.0.0.1 - - [01/Jan/2023:00:00:00 +0000] "GET /path%20with%20spaces?param=value HTTP/1.1" 200 0 "-" "-"`,
 			expected: &AccessLogEntry{
 				IP:        "127.0.0.1",
-				Timestamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC),
+				Timestamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
 				Method:    "GET",
 				Path:      "/path%20with%20spaces?param=value",
 				Protocol:  "HTTP/1.1",
@@ -99,7 +99,7 @@ func TestLogParser_ParseLine(t *testing.T) {
 			logLine: `2001:db8::1 - - [01/Jan/2023:00:00:00 +0000] "GET /ipv6 HTTP/1.1" 200 100 "-" "-"`,
 			expected: &AccessLogEntry{
 				IP:        "2001:db8::1",
-				Timestamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC),
+				Timestamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
 				Method:    "GET",
 				Path:      "/ipv6",
 				Protocol:  "HTTP/1.1",
@@ -115,7 +115,7 @@ func TestLogParser_ParseLine(t *testing.T) {
 			logLine: `192.168.1.1 - - [01/Jan/2023:00:00:00 +0000] "GET /error HTTP/1.1" 500 0 "-" "-"`,
 			expected: &AccessLogEntry{
 				IP:        "192.168.1.1",
-				Timestamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC),
+				Timestamp: time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
 				Method:    "GET",
 				Path:      "/error",
 				Protocol:  "HTTP/1.1",
@@ -153,7 +153,7 @@ func TestLogParser_ParseLine(t *testing.T) {
 			if result.IP != tc.expected.IP {
 				t.Errorf("IP mismatch. Expected: %s, Got: %s", tc.expected.IP, result.IP)
 			}
-			if !result.Timestamp.Equal(tc.expected.Timestamp) {
+			if result.Timestamp != tc.expected.Timestamp {
 				t.Errorf("Timestamp mismatch. Expected: %v, Got: %v", tc.expected.Timestamp, result.Timestamp)
 			}
 			if result.Method != tc.expected.Method {

+ 1 - 1
internal/nginx_log/optimized_parser.go

@@ -175,7 +175,7 @@ func (p *OptimizedLogParser) parseTimestamp(line []byte, pos int, entry *AccessL
 	if pos > start {
 		timeStr := bytesToString(line[start:pos])
 		if t, err := time.Parse("02/Jan/2006:15:04:05 -0700", timeStr); err == nil {
-			entry.Timestamp = t
+			entry.Timestamp = t.Unix()
 		}
 	}
 	

+ 4 - 4
internal/nginx_log/optimized_search_indexer.go

@@ -118,8 +118,8 @@ func createOptimizedIndexMapping() mapping.IndexMapping {
 	docMapping := bleve.NewDocumentMapping()
 	
 	// Optimize field mappings for better search performance
-	timestampMapping := bleve.NewDateTimeFieldMapping()
-	timestampMapping.Store = false // Don't store, only index for searching
+	timestampMapping := bleve.NewNumericFieldMapping()
+	timestampMapping.Store = true // Store for time range queries
 	timestampMapping.Index = true
 	docMapping.AddFieldMappingsAt("timestamp", timestampMapping)
 	
@@ -339,7 +339,7 @@ func (osi *OptimizedSearchIndexer) indexBatch(entries []*AccessLogEntry) error {
 	for _, entry := range entries {
 		doc := osi.createIndexDocument(entry)
 		docID := fmt.Sprintf("%d_%s_%s", 
-			entry.Timestamp.Unix(), 
+			entry.Timestamp, 
 			entry.IP, 
 			entry.Path)
 		
@@ -360,7 +360,7 @@ func (osi *OptimizedSearchIndexer) indexBatch(entries []*AccessLogEntry) error {
 // createIndexDocument creates an optimized document for indexing
 func (osi *OptimizedSearchIndexer) createIndexDocument(entry *AccessLogEntry) map[string]interface{} {
 	doc := map[string]interface{}{
-		"timestamp":    entry.Timestamp.Format(time.RFC3339),
+		"timestamp":    entry.Timestamp,
 		"ip":           entry.IP,
 		"method":       entry.Method,
 		"path":         entry.Path,

+ 20 - 20
internal/nginx_log/optimized_search_query.go

@@ -106,7 +106,7 @@ func (osq *OptimizedSearchQuery) SearchLogsOptimized(ctx context.Context, req *Q
 		
 		// Clone cached result to avoid mutation
 		result := osq.cloneCachedResult(cached)
-		result.Took = time.Since(start)
+		result.Took = time.Since(start).Milliseconds()
 		result.FromCache = true
 		
 		return result, nil
@@ -125,10 +125,10 @@ func (osq *OptimizedSearchQuery) SearchLogsOptimized(ctx context.Context, req *Q
 		return nil, err
 	}
 	
-	result.Took = time.Since(start)
+	result.Took = time.Since(start).Milliseconds()
 	
 	// Update average query time
-	osq.updateQueryTime(result.Took)
+	osq.updateQueryTime(time.Since(start))
 	
 	// Cache the result
 	osq.cacheResult(cacheKey, result)
@@ -146,15 +146,15 @@ func (osq *OptimizedSearchQuery) optimizeRequest(req *QueryRequest) *QueryReques
 	}
 	
 	// Optimize time range queries
-	if !optimized.StartTime.IsZero() && !optimized.EndTime.IsZero() {
-		duration := optimized.EndTime.Sub(optimized.StartTime)
+	if optimized.StartTime != 0 && optimized.EndTime != 0 {
+		duration := optimized.EndTime - optimized.StartTime
 		
 		// If time range is too wide, use index optimization
-		if duration > 365*24*time.Hour {
+		if duration > 365*24*3600 { // 365 days in seconds
 			// For very wide ranges, don't use time filter to avoid poor performance
-			logger.Debugf("Time range too wide (%v), removing time filter for optimization", duration)
-			optimized.StartTime = time.Time{}
-			optimized.EndTime = time.Time{}
+			logger.Debugf("Time range too wide (%d seconds), removing time filter for optimization", duration)
+			optimized.StartTime = 0
+			optimized.EndTime = 0
 		}
 	}
 	
@@ -229,10 +229,12 @@ func (osq *OptimizedSearchQuery) buildOptimizedQuery(req *QueryRequest) query.Qu
 	}
 	
 	// 3. Time range queries (if not too wide)
-	if !req.StartTime.IsZero() && !req.EndTime.IsZero() {
+	if req.StartTime != 0 && req.EndTime != 0 {
 		// Add small buffer to end time for inclusive search
-		inclusiveEndTime := req.EndTime.Add(1 * time.Millisecond)
-		timeQuery := bleve.NewDateRangeQuery(req.StartTime, inclusiveEndTime)
+		inclusiveEndTime := req.EndTime + 1
+		startFloat := float64(req.StartTime)
+		endFloat := float64(inclusiveEndTime)
+		timeQuery := bleve.NewNumericRangeQuery(&startFloat, &endFloat)
 		timeQuery.SetField("timestamp")
 		queries = append(queries, timeQuery)
 	}
@@ -532,10 +534,8 @@ func (osq *OptimizedSearchQuery) convertSearchResults(hits []*search.DocumentMat
 		}
 		
 		// Parse timestamp
-		if timestampStr := osq.getStringField(hit.Fields, "timestamp"); timestampStr != "" {
-			if ts, err := time.Parse(time.RFC3339, timestampStr); err == nil {
-				entry.Timestamp = ts
-			}
+		if timestampField := osq.getFloatField(hit.Fields, "timestamp"); timestampField != 0 {
+			entry.Timestamp = int64(timestampField)
 		}
 		
 		entries = append(entries, entry)
@@ -598,11 +598,11 @@ func (osq *OptimizedSearchQuery) createOptimizedCacheKey(req *QueryRequest) stri
 	// Create a more efficient cache key
 	var keyParts []string
 	
-	if !req.StartTime.IsZero() {
-		keyParts = append(keyParts, req.StartTime.Format("20060102150405"))
+	if req.StartTime != 0 {
+		keyParts = append(keyParts, fmt.Sprintf("%d", req.StartTime))
 	}
-	if !req.EndTime.IsZero() {
-		keyParts = append(keyParts, req.EndTime.Format("20060102150405"))
+	if req.EndTime != 0 {
+		keyParts = append(keyParts, fmt.Sprintf("%d", req.EndTime))
 	}
 	if req.Query != "" {
 		keyParts = append(keyParts, req.Query)

+ 4 - 4
internal/nginx_log/persistence.go

@@ -168,9 +168,9 @@ func (pm *PersistenceManager) GetLogFileInfo(path string) (*LogFileInfo, error)
 	}
 	return &LogFileInfo{
 		Path:         logIndex.Path,
-		LastModified: logIndex.LastModified,
+		LastModified: logIndex.LastModified.Unix(),
 		LastSize:     logIndex.LastSize,
-		LastIndexed:  logIndex.LastIndexed,
+		LastIndexed:  logIndex.LastIndexed.Unix(),
 	}, nil
 }
 
@@ -180,9 +180,9 @@ func (pm *PersistenceManager) SaveLogFileInfo(path string, info *LogFileInfo) er
 	if err != nil {
 		return err
 	}
-	logIndex.LastModified = info.LastModified
+	logIndex.LastModified = time.Unix(info.LastModified, 0)
 	logIndex.LastSize = info.LastSize
-	logIndex.LastIndexed = info.LastIndexed
+	logIndex.LastIndexed = time.Unix(info.LastIndexed, 0)
 	return pm.SaveLogIndex(logIndex)
 }
 

+ 12 - 14
internal/nginx_log/progress_tracker.go

@@ -12,13 +12,13 @@ import (
 type ProgressTracker struct {
 	mu                 sync.RWMutex
 	logGroupPath       string
-	startTime          time.Time
+	startTime          int64 // Unix timestamp
 	files              map[string]*FileProgress
 	totalEstimate      int64 // Total estimated lines across all files
 	totalActual        int64 // Total actual lines processed
 	isCompleted        bool
 	completionNotified bool // Flag to prevent duplicate completion notifications
-	lastNotify         time.Time
+	lastNotify         int64 // Unix timestamp
 }
 
 // FileProgress tracks progress for individual files
@@ -32,8 +32,8 @@ type FileProgress struct {
 	AvgLineSize    int64 // Dynamic average line size in bytes (for compressed files)
 	SampleCount    int64 // Number of lines sampled for average calculation
 	IsCompressed   bool
-	StartTime      *time.Time
-	CompletedTime  *time.Time
+	StartTime      int64 // Unix timestamp
+	CompletedTime  int64 // Unix timestamp
 }
 
 // FileState represents the current state of file processing
@@ -62,7 +62,7 @@ func (fs FileState) String() string {
 func NewProgressTracker(logGroupPath string) *ProgressTracker {
 	return &ProgressTracker{
 		logGroupPath:       logGroupPath,
-		startTime:          time.Now(),
+		startTime:          time.Now().Unix(),
 		files:              make(map[string]*FileProgress),
 		completionNotified: false,
 	}
@@ -154,8 +154,7 @@ func (pt *ProgressTracker) StartFile(filePath string) {
 
 	if progress, exists := pt.files[filePath]; exists {
 		progress.State = FileStateProcessing
-		now := time.Now()
-		progress.StartTime = &now
+		progress.StartTime = time.Now().Unix()
 
 		logger.Debugf("Started processing file: %s", filePath)
 		pt.notifyProgressLocked()
@@ -188,8 +187,7 @@ func (pt *ProgressTracker) CompleteFile(filePath string, finalProcessedLines int
 		oldProcessed := progress.ProcessedLines
 		progress.ProcessedLines = finalProcessedLines
 		progress.State = FileStateCompleted
-		now := time.Now()
-		progress.CompletedTime = &now
+		progress.CompletedTime = time.Now().Unix()
 
 		// Update total actual processed
 		pt.totalActual = pt.totalActual - oldProcessed + finalProcessedLines
@@ -265,22 +263,22 @@ type ProgressStats struct {
 	ProcessingFiles int
 	ProcessedLines  int64
 	EstimatedLines  int64
-	StartTime       time.Time
+	StartTime       int64 // Unix timestamp
 	IsCompleted     bool
 }
 
 // notifyProgressLocked sends progress notification (must be called with lock held)
 func (pt *ProgressTracker) notifyProgressLocked() {
 	// Throttle notifications to avoid spam
-	now := time.Now()
-	if now.Sub(pt.lastNotify) < 2*time.Second {
+	now := time.Now().Unix()
+	if now-pt.lastNotify < 2 {
 		return
 	}
 	pt.lastNotify = now
 
 	percentage, stats := pt.getProgressLocked()
 
-	elapsed := time.Since(pt.startTime).Milliseconds()
+	elapsed := (time.Now().Unix() - pt.startTime) * 1000 // Convert to milliseconds
 	var estimatedRemain int64
 
 	if percentage > 0 && percentage < 100 {
@@ -310,7 +308,7 @@ func (pt *ProgressTracker) notifyProgressLocked() {
 
 // notifyCompletionLocked sends completion notification (must be called with lock held)
 func (pt *ProgressTracker) notifyCompletionLocked() {
-	elapsed := time.Since(pt.startTime).Milliseconds()
+	elapsed := (time.Now().Unix() - pt.startTime) * 1000 // Convert to milliseconds
 
 	// Calculate total size processed using improved estimation
 	var totalSize int64

+ 4 - 4
internal/nginx_log/search_performance_bench_test.go

@@ -174,7 +174,7 @@ func setupBenchmarkIndexer(b *testing.B, entryCount int) (*LogIndexer, string, f
 	for i, entry := range entries {
 		docID := fmt.Sprintf("doc_%d", i)
 		doc := map[string]interface{}{
-			"timestamp":    entry.Timestamp.Format(time.RFC3339),
+			"timestamp":    entry.Timestamp,
 			"ip":           entry.IP,
 			"method":       entry.Method,
 			"path":         entry.Path,
@@ -346,8 +346,8 @@ func BenchmarkSearchLogs_TimeRange(b *testing.B) {
 	endTime := startTime.Add(24 * time.Hour)
 	
 	req := &QueryRequest{
-		StartTime: startTime,
-		EndTime:   endTime,
+		StartTime: startTime.Unix(),
+		EndTime:   endTime.Unix(),
 		Limit:     100,
 	}
 	
@@ -613,7 +613,7 @@ func BenchmarkSearchLogs_Comprehensive(b *testing.B) {
 				if i == 0 {
 					b.ReportMetric(float64(result.Total), "total_results")
 					b.ReportMetric(float64(len(result.Entries)), "returned_results")
-					b.ReportMetric(float64(result.Took.Nanoseconds()), "search_time_ns")
+					b.ReportMetric(float64(result.Took*1000000), "search_time_ns")
 				}
 			}
 		})