Просмотр исходного кода

Migrate from logrus to log/slog

DarthSim 5 месяцев назад
Родитель
Сommit
89821daa9f
53 измененных файлов с 2821 добавлено и 508 удалено
  1. 6 3
      asyncbuffer/buffer.go
  2. 2 2
      cli/main.go
  3. 11 12
      config/config.go
  4. 6 2
      errorreport/bugsnag/bugsnag.go
  5. 3 3
      fetcher/transport/azure/azure_test.go
  6. 2 2
      go.mod
  7. 3 3
      go.sum
  8. 12 13
      handlers/processing/request_methods.go
  9. 3 6
      handlers/stream/handler.go
  10. 3 4
      handlers/stream/handler_test.go
  11. 10 7
      init.go
  12. 3 6
      integration/suite.go
  13. 223 0
      logger/buffer.go
  14. 90 0
      logger/config.go
  15. 0 177
      logger/formatter.go
  16. 119 0
      logger/formatter_common.go
  17. 351 0
      logger/formatter_json.go
  18. 321 0
      logger/formatter_json_test.go
  19. 190 0
      logger/formatter_pretty.go
  20. 345 0
      logger/formatter_pretty_test.go
  21. 133 0
      logger/formatter_structured.go
  22. 299 0
      logger/formatter_structured_test.go
  23. 0 0
      logger/gliblog/gliblog.c
  24. 3 5
      logger/gliblog/gliblog.go
  25. 0 0
      logger/gliblog/gliblog.h
  26. 225 0
      logger/handler.go
  27. 105 0
      logger/handler_test.go
  28. 0 61
      logger/log.go
  29. 70 0
      logger/logger.go
  30. 0 85
      logger/syslog.go
  31. 76 0
      logger/syslog/config.go
  32. 54 0
      logger/syslog/syslog.go
  33. 11 8
      memory/stats.go
  34. 2 2
      monitoring/cloudwatch/cloudwatch.go
  35. 4 3
      monitoring/datadog/datadog.go
  36. 8 3
      monitoring/newrelic/newrelic.go
  37. 13 15
      monitoring/otel/otel.go
  38. 6 3
      monitoring/otel/otel_test.go
  39. 3 3
      monitoring/prometheus/prometheus.go
  40. 3 2
      options/apply.go
  41. 3 3
      options/parse.go
  42. 5 0
      options/processing_options.go
  43. 18 5
      processing/fix_size.go
  44. 7 7
      processing/processing.go
  45. 6 3
      processing/processing_test.go
  46. 2 3
      processing/scale_on_load.go
  47. 2 3
      reuseport/listen_no_reuseport.go
  48. 3 4
      security/config.go
  49. 27 26
      server/logging.go
  50. 5 6
      server/responsewriter/config_test.go
  51. 7 8
      server/server.go
  52. 9 0
      structdiff/diff.go
  53. 9 10
      vips/vips.go

+ 6 - 3
asyncbuffer/buffer.go

@@ -17,11 +17,10 @@ import (
 	"context"
 	"errors"
 	"io"
+	"log/slog"
 	"sync"
 	"sync/atomic"
 
-	"github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/ierrors"
 	"github.com/imgproxy/imgproxy/v3/ioutil"
 )
@@ -154,7 +153,11 @@ func (ab *AsyncBuffer) readChunks() {
 
 		// Close the upstream reader
 		if err := ab.r.Close(); err != nil {
-			logrus.WithField("source", "asyncbuffer.AsyncBuffer.readChunks").Warningf("error closing upstream reader: %s", err)
+			slog.Warn(
+				"error closing upstream reader",
+				"error", err,
+				"source", "asyncbuffer.AsyncBuffer.readChunks",
+			)
 		}
 
 		ab.callFinishFn()

+ 2 - 2
cli/main.go

@@ -3,12 +3,12 @@ package main
 import (
 	"context"
 	"fmt"
-	"log"
 	"os"
 	"os/signal"
 	"syscall"
 
 	"github.com/imgproxy/imgproxy/v3"
+	"github.com/imgproxy/imgproxy/v3/logger"
 	"github.com/imgproxy/imgproxy/v3/version"
 	"github.com/urfave/cli/v3"
 )
@@ -75,6 +75,6 @@ func main() {
 	}
 
 	if err := cmd.Run(context.Background(), os.Args); err != nil {
-		log.Fatal(err)
+		logger.Fatal(err.Error())
 	}
 }

+ 11 - 12
config/config.go

@@ -4,14 +4,13 @@ import (
 	"errors"
 	"flag"
 	"fmt"
+	"log/slog"
 	"math"
 	"os"
 	"regexp"
 	"runtime"
 	"strings"
 
-	log "github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/config/configurators"
 	"github.com/imgproxy/imgproxy/v3/imagetype"
 	"github.com/imgproxy/imgproxy/v3/version"
@@ -441,7 +440,7 @@ func Configure() error {
 	configurators.String(&Bind, "IMGPROXY_BIND")
 
 	if _, ok := os.LookupEnv("IMGPROXY_WRITE_TIMEOUT"); ok {
-		log.Warning("IMGPROXY_WRITE_TIMEOUT is deprecated, use IMGPROXY_TIMEOUT instead")
+		slog.Warn("IMGPROXY_WRITE_TIMEOUT is deprecated, use IMGPROXY_TIMEOUT instead")
 		configurators.Int(&Timeout, "IMGPROXY_WRITE_TIMEOUT")
 	}
 	configurators.Int(&Timeout, "IMGPROXY_TIMEOUT")
@@ -450,7 +449,7 @@ func Configure() error {
 	configurators.Int(&GracefulStopTimeout, "IMGPROXY_GRACEFUL_STOP_TIMEOUT")
 
 	if _, ok := os.LookupEnv("IMGPROXY_READ_TIMEOUT"); ok {
-		log.Warning("IMGPROXY_READ_TIMEOUT is deprecated, use IMGPROXY_READ_REQUEST_TIMEOUT instead")
+		slog.Warn("IMGPROXY_READ_TIMEOUT is deprecated, use IMGPROXY_READ_REQUEST_TIMEOUT instead")
 		configurators.Int(&ReadRequestTimeout, "IMGPROXY_READ_TIMEOUT")
 	}
 	configurators.Int(&ReadRequestTimeout, "IMGPROXY_READ_REQUEST_TIMEOUT")
@@ -464,7 +463,7 @@ func Configure() error {
 
 	if lambdaFn := os.Getenv("AWS_LAMBDA_FUNCTION_NAME"); len(lambdaFn) > 0 {
 		Workers = 1
-		log.Info("AWS Lambda environment detected, setting workers to 1")
+		slog.Info("AWS Lambda environment detected, setting workers to 1")
 	} else {
 		configurators.Int(&Workers, "IMGPROXY_CONCURRENCY")
 		configurators.Int(&Workers, "IMGPROXY_WORKERS")
@@ -528,11 +527,11 @@ func Configure() error {
 	configurators.Bool(&ReturnAttachment, "IMGPROXY_RETURN_ATTACHMENT")
 
 	if _, ok := os.LookupEnv("IMGPROXY_ENABLE_WEBP_DETECTION"); ok {
-		log.Warning("IMGPROXY_ENABLE_WEBP_DETECTION is deprecated, use IMGPROXY_AUTO_WEBP instead")
+		slog.Warn("IMGPROXY_ENABLE_WEBP_DETECTION is deprecated, use IMGPROXY_AUTO_WEBP instead")
 		configurators.Bool(&AutoWebp, "IMGPROXY_ENABLE_WEBP_DETECTION")
 	}
 	if _, ok := os.LookupEnv("IMGPROXY_ENABLE_AVIF_DETECTION"); ok {
-		log.Warning("IMGPROXY_ENABLE_AVIF_DETECTION is deprecated, use IMGPROXY_AUTO_AVIF instead")
+		slog.Warn("IMGPROXY_ENABLE_AVIF_DETECTION is deprecated, use IMGPROXY_AUTO_AVIF instead")
 		configurators.Bool(&AutoAvif, "IMGPROXY_ENABLE_AVIF_DETECTION")
 	}
 
@@ -695,10 +694,10 @@ func Configure() error {
 		return fmt.Errorf("Number of keys and number of salts should be equal. Keys: %d, salts: %d", len(Keys), len(Salts))
 	}
 	if len(Keys) == 0 {
-		log.Warning("No keys defined, so signature checking is disabled")
+		slog.Warn("No keys defined, so signature checking is disabled")
 	}
 	if len(Salts) == 0 {
-		log.Warning("No salts defined, so signature checking is disabled")
+		slog.Warn("No salts defined, so signature checking is disabled")
 	}
 
 	if SignatureSize < 1 || SignatureSize > 32 {
@@ -792,7 +791,7 @@ func Configure() error {
 	}
 
 	if IgnoreSslVerification {
-		log.Warning("Ignoring SSL verification is very unsafe")
+		slog.Warn("Ignoring SSL verification is very unsafe")
 	}
 
 	if LocalFileSystemRoot != "" {
@@ -806,12 +805,12 @@ func Configure() error {
 		}
 
 		if LocalFileSystemRoot == "/" {
-			log.Warning("Exposing root via IMGPROXY_LOCAL_FILESYSTEM_ROOT is unsafe")
+			slog.Warn("Exposing root via IMGPROXY_LOCAL_FILESYSTEM_ROOT is unsafe")
 		}
 	}
 
 	if _, ok := os.LookupEnv("IMGPROXY_USE_GCS"); !ok && len(GCSKey) > 0 {
-		log.Warning("Set IMGPROXY_USE_GCS to true since it may be required by future versions to enable GCS support")
+		slog.Warn("Set IMGPROXY_USE_GCS to true since it may be required by future versions to enable GCS support")
 		GCSEnabled = true
 	}
 

+ 6 - 2
errorreport/bugsnag/bugsnag.go

@@ -1,10 +1,11 @@
 package bugsnag
 
 import (
+	"fmt"
+	"log/slog"
 	"net/http"
 
 	"github.com/bugsnag/bugsnag-go/v2"
-	"github.com/sirupsen/logrus"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 )
@@ -14,7 +15,10 @@ var enabled bool
 type logger struct{}
 
 func (l logger) Printf(format string, v ...interface{}) {
-	logrus.WithField("source", "bugsnag").Debugf(format, v...)
+	slog.Debug(
+		fmt.Sprintf(format, v...),
+		"source", "bugsnag",
+	)
 }
 
 func Init() {

+ 3 - 3
fetcher/transport/azure/azure_test.go

@@ -3,16 +3,15 @@ package azure
 import (
 	"net/http"
 	"net/http/httptest"
-	"os"
 	"testing"
 	"time"
 
-	"github.com/sirupsen/logrus"
 	"github.com/stretchr/testify/suite"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/logger"
 )
 
 type AzureTestSuite struct {
@@ -27,7 +26,7 @@ type AzureTestSuite struct {
 func (s *AzureTestSuite) SetupSuite() {
 	data := make([]byte, 32)
 
-	logrus.SetOutput(os.Stdout)
+	logger.Mute()
 
 	s.etag = "testetag"
 	s.lastModified, _ = time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
@@ -60,6 +59,7 @@ func (s *AzureTestSuite) SetupSuite() {
 func (s *AzureTestSuite) TearDownSuite() {
 	s.server.Close()
 	config.IgnoreSslVerification = false
+	logger.Unmute()
 }
 
 func (s *AzureTestSuite) TestRoundTripWithETag() {

+ 2 - 2
go.mod

@@ -11,7 +11,6 @@ require (
 	github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
 	github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
 	github.com/DarthSim/godotenv v1.3.1
-	github.com/DataDog/datadog-agent/pkg/trace v0.67.0
 	github.com/DataDog/datadog-go/v5 v5.7.1
 	github.com/DataDog/dd-trace-go/v2 v2.2.3
 	github.com/airbrake/gobrake/v5 v5.6.2
@@ -33,13 +32,13 @@ require (
 	github.com/honeybadger-io/honeybadger-go v0.8.0
 	github.com/johannesboyne/gofakes3 v0.0.0-20250916175020-ebf3e50324d3
 	github.com/matoous/go-nanoid/v2 v2.1.0
+	github.com/mattn/go-isatty v0.0.20
 	github.com/ncw/swift/v2 v2.0.4
 	github.com/newrelic/go-agent/v3 v3.40.1
 	github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1
 	github.com/pkg/errors v0.9.1
 	github.com/prometheus/client_golang v1.23.2
 	github.com/shirou/gopsutil v3.21.11+incompatible
-	github.com/sirupsen/logrus v1.9.3
 	github.com/stretchr/testify v1.11.1
 	github.com/tdewolff/parse/v2 v2.8.3
 	github.com/trimmer-io/go-xmp v1.0.0
@@ -83,6 +82,7 @@ require (
 	github.com/DataDog/datadog-agent/pkg/obfuscate v0.67.0 // indirect
 	github.com/DataDog/datadog-agent/pkg/proto v0.67.0 // indirect
 	github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.69.0 // indirect
+	github.com/DataDog/datadog-agent/pkg/trace v0.67.0 // indirect
 	github.com/DataDog/datadog-agent/pkg/util/log v0.67.0 // indirect
 	github.com/DataDog/datadog-agent/pkg/util/scrubber v0.67.0 // indirect
 	github.com/DataDog/datadog-agent/pkg/version v0.67.0 // indirect

+ 3 - 3
go.sum

@@ -328,6 +328,8 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4
 github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
 github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE=
 github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
 github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
 github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
 github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
@@ -407,8 +409,6 @@ github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMT
 github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
 github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
-github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
 github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
 github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
@@ -622,8 +622,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
 golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

+ 12 - 13
handlers/processing/request_methods.go

@@ -3,6 +3,7 @@ package processing
 import (
 	"context"
 	"io"
+	"log/slog"
 	"net/http"
 	"strconv"
 
@@ -16,7 +17,6 @@ import (
 	"github.com/imgproxy/imgproxy/v3/options"
 	"github.com/imgproxy/imgproxy/v3/processing"
 	"github.com/imgproxy/imgproxy/v3/server"
-	log "github.com/sirupsen/logrus"
 )
 
 // makeImageRequestHeaders creates headers for the image request
@@ -106,9 +106,12 @@ func (r *request) handleDownloadError(
 		errorreport.Report(err, r.req)
 	}
 
-	log.
-		WithField("request_id", r.reqID).
-		Warningf("Could not load image %s. Using fallback image. %s", r.imageURL, err.Error())
+	slog.Warn(
+		"Could not load image. Using fallback image",
+		"request_id", r.reqID,
+		"image_url", r.imageURL,
+		"error", err.Error(),
+	)
 
 	var statusCode int
 
@@ -142,7 +145,7 @@ func (r *request) getFallbackImage(
 
 	data, h, err := fbi.Get(ctx, po)
 	if err != nil {
-		log.Warning(err.Error())
+		slog.Warn(err.Error())
 
 		if ierr := r.wrapDownloadingErr(err); ierr.ShouldReport() {
 			errorreport.Report(ierr, r.req)
@@ -201,10 +204,8 @@ func (r *request) respondWithNotModified() error {
 
 	server.LogResponse(
 		r.reqID, r.req, http.StatusNotModified, nil,
-		log.Fields{
-			"image_url":          r.imageURL,
-			"processing_options": r.po,
-		},
+		slog.String("image_url", r.imageURL),
+		slog.Any("processing_options", r.po),
 	)
 
 	return nil
@@ -255,10 +256,8 @@ func (r *request) respondWithImage(statusCode int, resultData imagedata.ImageDat
 
 	server.LogResponse(
 		r.reqID, r.req, statusCode, ierr,
-		log.Fields{
-			"image_url":          r.imageURL,
-			"processing_options": r.po,
-		},
+		slog.String("image_url", r.imageURL),
+		slog.Any("processing_options", r.po),
 	)
 
 	return nil

+ 3 - 6
handlers/stream/handler.go

@@ -3,11 +3,10 @@ package stream
 import (
 	"context"
 	"io"
+	"log/slog"
 	"net/http"
 	"sync"
 
-	log "github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/cookies"
 	"github.com/imgproxy/imgproxy/v3/fetcher"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
@@ -177,10 +176,8 @@ func (s *request) streamData(res *http.Response) {
 
 	server.LogResponse(
 		s.reqID, s.imageRequest, res.StatusCode, nil,
-		log.Fields{
-			"image_url":          s.imageURL,
-			"processing_options": s.po,
-		},
+		slog.String("image_url", s.imageURL),
+		slog.Any("processing_options", s.po),
 	)
 
 	// We've got to skip logging here

+ 3 - 4
handlers/stream/handler_test.go

@@ -5,17 +5,16 @@ import (
 	"io"
 	"net/http"
 	"net/http/httptest"
-	"os"
 	"strconv"
 	"testing"
 	"time"
 
-	"github.com/sirupsen/logrus"
 	"github.com/stretchr/testify/suite"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/fetcher"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/logger"
 	"github.com/imgproxy/imgproxy/v3/options"
 	"github.com/imgproxy/imgproxy/v3/server/responsewriter"
 	"github.com/imgproxy/imgproxy/v3/testutil"
@@ -79,11 +78,11 @@ func (s *HandlerTestSuite) SetupSuite() {
 	s.testServer, _ = testutil.NewLazySuiteTestServer(s)
 
 	// Silence logs during tests
-	logrus.SetOutput(io.Discard)
+	logger.Mute()
 }
 
 func (s *HandlerTestSuite) TearDownSuite() {
-	logrus.SetOutput(os.Stdout)
+	logger.Unmute()
 }
 
 func (s *HandlerTestSuite) SetupSubTest() {

+ 10 - 7
init.go

@@ -3,16 +3,18 @@
 package imgproxy
 
 import (
-	"github.com/DataDog/datadog-agent/pkg/trace/log"
+	"fmt"
+	"log/slog"
+
+	"go.uber.org/automaxprocs/maxprocs"
+
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/config/loadenv"
 	"github.com/imgproxy/imgproxy/v3/errorreport"
-	"github.com/imgproxy/imgproxy/v3/gliblog"
 	"github.com/imgproxy/imgproxy/v3/logger"
 	"github.com/imgproxy/imgproxy/v3/monitoring"
 	"github.com/imgproxy/imgproxy/v3/processing"
 	"github.com/imgproxy/imgproxy/v3/vips"
-	"go.uber.org/automaxprocs/maxprocs"
 )
 
 // Init performs the global resources initialization. This should be done once per process.
@@ -21,7 +23,8 @@ func Init() error {
 		return err
 	}
 
-	if err := logger.Init(); err != nil {
+	logCfg := logger.LoadConfigFromEnv(nil)
+	if err := logger.Init(logCfg); err != nil {
 		return err
 	}
 
@@ -33,9 +36,9 @@ func Init() error {
 	}
 	// NOTE: End of temporary workaround.
 
-	gliblog.Init()
-
-	maxprocs.Set(maxprocs.Logger(log.Debugf))
+	maxprocs.Set(maxprocs.Logger(func(msg string, args ...any) {
+		slog.Debug(fmt.Sprintf(msg, args...))
+	}))
 
 	if err := monitoring.Init(); err != nil {
 		return err

+ 3 - 6
integration/suite.go

@@ -3,15 +3,12 @@ package integration
 import (
 	"context"
 	"fmt"
-	"io"
 	"net"
 	"net/http"
-	"os"
-
-	"github.com/sirupsen/logrus"
 
 	"github.com/imgproxy/imgproxy/v3"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/logger"
 	"github.com/imgproxy/imgproxy/v3/testutil"
 )
 
@@ -41,7 +38,7 @@ type Suite struct {
 
 func (s *Suite) SetupSuite() {
 	// Silence all the logs
-	logrus.SetOutput(io.Discard)
+	logger.Mute()
 
 	// Initialize test data provider (local test files)
 	s.TestData = testutil.NewTestDataProvider(s.T)
@@ -76,7 +73,7 @@ func (s *Suite) SetupSuite() {
 }
 
 func (s *Suite) TearDownSuite() {
-	logrus.SetOutput(os.Stdout)
+	logger.Unmute()
 }
 
 // startServer starts imgproxy instance's server for the tests.

+ 223 - 0
logger/buffer.go

@@ -0,0 +1,223 @@
+package logger
+
+import (
+	"strconv"
+	"sync"
+	"unicode/utf8"
+)
+
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		// Reserve some capacity to not re-allocate on short logs.
+		buf := make(buffer, 0, 1024)
+		return &buf
+	},
+}
+
+// buffer is a slice of bytes with some additional convenience methods.
+type buffer []byte
+
+// newBuffer creates a new buffer from the pool.
+func newBuffer() *buffer {
+	return bufPool.Get().(*buffer)
+}
+
+// free truncates the buffer and returns it to the pool.
+func (b *buffer) free() {
+	// Don't keep large buffers around.
+	if len(*b) > 16*1024 {
+		return
+	}
+
+	*b = (*b)[:0]
+	bufPool.Put(b)
+}
+
+// Write writes data to the buffer.
+func (b *buffer) Write(p []byte) (n int, err error) {
+	b.append(p...)
+	return len(p), nil
+}
+
+// String returns the contents of the buffer as a string.
+func (b *buffer) String() string {
+	return string(*b)
+}
+
+// len returns the number of bytes written to the buffer.
+func (b *buffer) len() int {
+	return len(*b)
+}
+
+// append appends data to the buffer.
+func (b *buffer) append(data ...byte) {
+	*b = append(*b, data...)
+}
+
+// appendString appends a string value to the buffer.
+// If the string does not require escaping, it is appended directly.
+// Otherwise, it is escaped and quoted.
+func (b *buffer) appendString(data string) {
+	if b.isStringQuoteSafe(data) {
+		b.appendStringRaw(data)
+	} else {
+		b.appendStringQuoted(data)
+	}
+}
+
+// appendStringRaw appends a string value to the buffer without escaping.
+func (b *buffer) appendStringRaw(data string) {
+	*b = append(*b, data...)
+}
+
+// appendStringQuoted appends a string value to the buffer, escaping and quoting it as necessary.
+func (b *buffer) appendStringQuoted(data string) {
+	*b = strconv.AppendQuote(*b, data)
+}
+
+// appendInt appends an integer value to the buffer.
+func (b *buffer) appendInt(data int64) {
+	*b = strconv.AppendInt(*b, data, 10)
+}
+
+// appendUint appends an unsigned integer value to the buffer.
+func (b *buffer) appendUint(data uint64) {
+	*b = strconv.AppendUint(*b, data, 10)
+}
+
+// appendFloat appends a float value to the buffer.
+func (b *buffer) appendFloat(data float64) {
+	*b = strconv.AppendFloat(*b, data, 'g', -1, 64)
+}
+
+// appendBool appends a boolean value to the buffer.
+func (b *buffer) appendBool(data bool) {
+	*b = strconv.AppendBool(*b, data)
+}
+
+// remove removes the last n bytes from the buffer.
+func (b *buffer) remove(n int) {
+	n = max(0, n)
+	trimTo := max(0, len(*b)-n)
+	*b = (*b)[:trimTo]
+}
+
+// removeNewline removes the trailing newline character from the buffer, if present.
+func (b *buffer) removeNewline() {
+	if len(*b) > 0 && (*b)[len(*b)-1] == '\n' {
+		*b = (*b)[:len(*b)-1]
+	}
+}
+
+// isStringQuoteSafe checks if a string is safe to append without quoting.
+func (b *buffer) isStringQuoteSafe(val string) bool {
+	for i := 0; i < len(val); i++ {
+		if b := val[i]; b >= utf8.RuneSelf || !quoteSafeSet[b] {
+			return false
+		}
+	}
+	return true
+}
+
+// quoteSafeSet is a set of runes that are safe to append without quoting.
+// Some runes here are explicitly marked as unsafe for clarity.
+// The unlisted runes are considered unsafe by default.
+// Shamesly stolen from https://github.com/golang/go/blob/master/src/encoding/json/tables.go
+// and tuned for our needs.
+var quoteSafeSet = [utf8.RuneSelf]bool{
+	' ':  false,
+	'!':  true,
+	'"':  false,
+	'#':  true,
+	'$':  true,
+	'%':  true,
+	'&':  true,
+	'\'': false,
+	'(':  true,
+	')':  true,
+	'*':  true,
+	'+':  true,
+	',':  true,
+	'-':  true,
+	'.':  true,
+	'/':  true,
+	'0':  true,
+	'1':  true,
+	'2':  true,
+	'3':  true,
+	'4':  true,
+	'5':  true,
+	'6':  true,
+	'7':  true,
+	'8':  true,
+	'9':  true,
+	':':  false,
+	';':  true,
+	'<':  true,
+	'=':  false,
+	'>':  true,
+	'?':  true,
+	'@':  true,
+	'A':  true,
+	'B':  true,
+	'C':  true,
+	'D':  true,
+	'E':  true,
+	'F':  true,
+	'G':  true,
+	'H':  true,
+	'I':  true,
+	'J':  true,
+	'K':  true,
+	'L':  true,
+	'M':  true,
+	'N':  true,
+	'O':  true,
+	'P':  true,
+	'Q':  true,
+	'R':  true,
+	'S':  true,
+	'T':  true,
+	'U':  true,
+	'V':  true,
+	'W':  true,
+	'X':  true,
+	'Y':  true,
+	'Z':  true,
+	'[':  true,
+	'\\': false,
+	']':  true,
+	'^':  true,
+	'_':  true,
+	'`':  false,
+	'a':  true,
+	'b':  true,
+	'c':  true,
+	'd':  true,
+	'e':  true,
+	'f':  true,
+	'g':  true,
+	'h':  true,
+	'i':  true,
+	'j':  true,
+	'k':  true,
+	'l':  true,
+	'm':  true,
+	'n':  true,
+	'o':  true,
+	'p':  true,
+	'q':  true,
+	'r':  true,
+	's':  true,
+	't':  true,
+	'u':  true,
+	'v':  true,
+	'w':  true,
+	'x':  true,
+	'y':  true,
+	'z':  true,
+	'{':  false,
+	'|':  true,
+	'}':  false,
+	'~':  true,
+}

+ 90 - 0
logger/config.go

@@ -0,0 +1,90 @@
+package logger
+
+import (
+	"log/slog"
+	"os"
+	"strings"
+
+	"github.com/mattn/go-isatty"
+
+	"github.com/imgproxy/imgproxy/v3/config/configurators"
+	"github.com/imgproxy/imgproxy/v3/ensure"
+	"github.com/imgproxy/imgproxy/v3/logger/syslog"
+)
+
+type Config struct {
+	Level  slog.Leveler
+	Format Format
+
+	Syslog syslog.Config
+}
+
+func NewDefaultConfig() Config {
+	o := Config{
+		Level:  slog.LevelInfo,
+		Format: FormatStructured,
+
+		Syslog: syslog.NewDefaultConfig(),
+	}
+
+	if isatty.IsTerminal(os.Stdout.Fd()) {
+		o.Format = FormatPretty
+	}
+
+	return o
+}
+
+func LoadConfigFromEnv(o *Config) *Config {
+	o = ensure.Ensure(o, NewDefaultConfig)
+
+	var logFormat, logLevel string
+	configurators.String(&logFormat, "IMGPROXY_LOG_FORMAT")
+	configurators.String(&logLevel, "IMGPROXY_LOG_LEVEL")
+
+	if logFormat != "" {
+		o.Format = parseFormat(logFormat)
+	}
+	if logLevel != "" {
+		o.Level = parseLevel(logLevel)
+	}
+
+	// Load syslog config
+	syslog.LoadConfigFromEnv(&o.Syslog)
+
+	return o
+}
+
+func (c *Config) Validate() error {
+	return c.Syslog.Validate()
+}
+
+func parseFormat(str string) Format {
+	switch str {
+	case "pretty":
+		return FormatPretty
+	case "structured":
+		return FormatStructured
+	case "json":
+		return FormatJSON
+	case "gcp":
+		return FormatGCP
+	default:
+		if isatty.IsTerminal(os.Stdout.Fd()) {
+			return FormatPretty
+		}
+		return FormatStructured
+	}
+}
+
+func parseLevel(str string) slog.Level {
+	switch strings.ToLower(str) {
+	case "debug":
+		return slog.LevelDebug
+	case "warn":
+		return slog.LevelWarn
+	case "error":
+		return slog.LevelError
+	default:
+		return slog.LevelInfo
+	}
+}

+ 0 - 177
logger/formatter.go

@@ -1,177 +0,0 @@
-package logger
-
-import (
-	"bytes"
-	"fmt"
-	"regexp"
-	"slices"
-	"strings"
-	"time"
-	"unicode/utf8"
-
-	logrus "github.com/sirupsen/logrus"
-)
-
-var logQuotingRe = regexp.MustCompile(`^[a-zA-Z0-9\-._/@^+]+$`)
-
-func logKeyPriority(k string) int {
-	switch k {
-	case "request_id":
-		return 3
-	case "method":
-		return 2
-	case "status":
-		return 1
-	case "error":
-		return -1
-	case "source":
-		return -2
-	case "stack":
-		return -3
-	default:
-		return 0
-	}
-}
-
-func sortKeys(keys []string) {
-	slices.SortFunc(keys, func(key1, key2 string) int {
-		if d := logKeyPriority(key2) - logKeyPriority(key1); d != 0 {
-			return d
-		}
-		return strings.Compare(key1, key2)
-	})
-}
-
-type prettyFormatter struct {
-	levelFormat string
-}
-
-func newPrettyFormatter() *prettyFormatter {
-	f := new(prettyFormatter)
-
-	levelLenMax := 0
-	for _, level := range logrus.AllLevels {
-		levelLen := utf8.RuneCount([]byte(level.String()))
-		if levelLen > levelLenMax {
-			levelLenMax = levelLen
-		}
-	}
-
-	f.levelFormat = fmt.Sprintf("%%-%ds", levelLenMax)
-
-	return f
-}
-
-func (f *prettyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
-	var keys []string
-
-	if len(entry.Data) > 0 {
-		keys = make([]string, 0, len(entry.Data))
-
-		for k := range entry.Data {
-			if k != "stack" {
-				keys = append(keys, k)
-			}
-		}
-
-		sortKeys(keys)
-	}
-
-	levelColor := 36
-	switch entry.Level {
-	case logrus.DebugLevel, logrus.TraceLevel:
-		levelColor = 37
-	case logrus.WarnLevel:
-		levelColor = 33
-	case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel:
-		levelColor = 31
-	}
-
-	levelText := fmt.Sprintf(f.levelFormat, strings.ToUpper(entry.Level.String()))
-	msg := strings.TrimSuffix(entry.Message, "\n")
-
-	var b *bytes.Buffer
-	if entry.Buffer != nil {
-		b = entry.Buffer
-	} else {
-		b = new(bytes.Buffer)
-	}
-
-	fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m [%s] %s ", levelColor, levelText, entry.Time.Format(time.RFC3339), msg)
-
-	for _, k := range keys {
-		v := entry.Data[k]
-		fmt.Fprintf(b, " \x1b[1m%s\x1b[0m=", k)
-		f.appendValue(b, v)
-	}
-
-	b.WriteByte('\n')
-
-	if stack, ok := entry.Data["stack"]; ok {
-		fmt.Fprintln(b, stack)
-	}
-
-	return b.Bytes(), nil
-}
-
-func (f *prettyFormatter) appendValue(b *bytes.Buffer, value interface{}) {
-	strValue, ok := value.(string)
-	if !ok {
-		strValue = fmt.Sprint(value)
-	}
-
-	if logQuotingRe.MatchString(strValue) {
-		b.WriteString(strValue)
-	} else {
-		fmt.Fprintf(b, "%q", strValue)
-	}
-}
-
-type structuredFormatter struct{}
-
-func (f *structuredFormatter) Format(entry *logrus.Entry) ([]byte, error) {
-	var keys []string
-
-	if len(entry.Data) > 0 {
-		keys = make([]string, 0, len(entry.Data))
-
-		for k := range entry.Data {
-			keys = append(keys, k)
-		}
-
-		sortKeys(keys)
-	}
-
-	msg := strings.TrimSuffix(entry.Message, "\n")
-
-	var b *bytes.Buffer
-	if entry.Buffer != nil {
-		b = entry.Buffer
-	} else {
-		b = new(bytes.Buffer)
-	}
-
-	f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
-	f.appendKeyValue(b, "level", entry.Level.String())
-	f.appendKeyValue(b, "message", msg)
-
-	for _, k := range keys {
-		f.appendKeyValue(b, k, entry.Data[k])
-	}
-
-	b.WriteByte('\n')
-	return b.Bytes(), nil
-}
-
-func (f *structuredFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
-	if b.Len() != 0 {
-		b.WriteByte(' ')
-	}
-
-	strValue, ok := value.(string)
-	if !ok {
-		strValue = fmt.Sprint(value)
-	}
-
-	fmt.Fprintf(b, "%s=%q", key, strValue)
-}

+ 119 - 0
logger/formatter_common.go

@@ -0,0 +1,119 @@
+package logger
+
+import (
+	"encoding"
+	"fmt"
+	"log/slog"
+	"time"
+)
+
+// formatterCommon holds the common logic for both pretty and structured formatting.
+type formatterCommon struct {
+	buf *buffer
+
+	groups []attrGroup
+
+	// Attributes that should be handled specially
+	error  slog.Attr
+	source slog.Attr
+	stack  slog.Attr
+}
+
+// newHandlerFormatterCommon creates a new formatterCommon instance.
+func newFormatterCommon(groups []attrGroup, buf *buffer) formatterCommon {
+	return formatterCommon{
+		buf:    buf,
+		groups: groups,
+	}
+}
+
+// levelName returns the name of the log level.
+func (s *formatterCommon) levelName(lvl slog.Level) string {
+	switch {
+	case lvl < slog.LevelInfo:
+		return "DEBUG"
+	case lvl < slog.LevelWarn:
+		return "INFO"
+	case lvl < slog.LevelError:
+		return "WARNING"
+	case lvl < LevelCritical:
+		return "ERROR"
+	default:
+		return "CRITICAL"
+	}
+}
+
+// saveSpecialAttr saves special attributes for later use.
+// It returns true if the attribute was saved (meaning it was a special attribute).
+func (s *formatterCommon) saveSpecialAttr(attr slog.Attr) bool {
+	switch attr.Key {
+	case "error":
+		s.error = attr
+	case "source":
+		s.source = attr
+	case "stack":
+		s.stack = attr
+	default:
+		return false
+	}
+
+	return true
+}
+
+// appendValue appends a value to the buffer, applying quoting rules as necessary.
+func (s *formatterCommon) appendValue(val slog.Value, forceQuote bool) {
+	switch val.Kind() {
+	case slog.KindString:
+		s.appendString(val.String(), forceQuote)
+	case slog.KindInt64:
+		s.buf.appendInt(val.Int64())
+	case slog.KindUint64:
+		s.buf.appendUint(val.Uint64())
+	case slog.KindFloat64:
+		s.buf.appendFloat(val.Float64())
+	case slog.KindBool:
+		s.buf.appendBool(val.Bool())
+	case slog.KindDuration:
+		s.appendString(val.Duration().String(), forceQuote)
+	case slog.KindTime:
+		s.appendTime(val.Time())
+	default:
+		s.appendAny(val.Any(), forceQuote)
+	}
+}
+
+// appendString appends a string value to the buffer, applying quoting rules as necessary.
+func (s *formatterCommon) appendString(val string, forceQuote bool) {
+	if forceQuote {
+		s.buf.appendStringQuoted(val)
+	} else {
+		s.buf.appendString(val)
+	}
+}
+
+// appendTime appends a time value to the buffer, wrapping it in quotes,
+// ([time.DateTime] always contains a space)
+func (s *formatterCommon) appendTime(val time.Time) {
+	s.buf.append('"')
+	s.buf.appendStringRaw(val.Format(time.DateTime))
+	s.buf.append('"')
+}
+
+// appendAny appends a value of any type to the buffer, applying quoting rules as necessary.
+func (s *formatterCommon) appendAny(val any, forceQuote bool) {
+	switch v := val.(type) {
+	case fmt.Stringer:
+		s.appendString(v.String(), forceQuote)
+		return
+	case error:
+		s.appendString(v.Error(), forceQuote)
+		return
+	case encoding.TextMarshaler:
+		if data, err := v.MarshalText(); err == nil {
+			s.appendString(string(data), forceQuote)
+			return
+		}
+	}
+	// Fallback to default string representation
+	s.appendString(fmt.Sprintf("%+v", val), forceQuote)
+}

+ 351 - 0
logger/formatter_json.go

@@ -0,0 +1,351 @@
+package logger
+
+import (
+	"encoding/json"
+	"log/slog"
+	"time"
+	"unicode/utf8"
+)
+
+const (
+	jsonGroupOpenToken  = '{'
+	jsonGroupCloseToken = '}'
+)
+
+var jsonAttributeSep = []byte(",")
+
+// formatterJSON is a JSON log formatter.
+type formatterJSON struct {
+	formatterCommon
+
+	levelKey   string
+	messageKey string
+
+	sep          []byte
+	groupsOpened int
+}
+
+// newFormatterJSON creates a new formatterJSON instance.
+func newFormatterJSON(groups []attrGroup, buf *buffer, gcpStyle bool) *formatterJSON {
+	f := &formatterJSON{
+		formatterCommon: newFormatterCommon(groups, buf),
+	}
+
+	// Set the level and message keys based on the style.
+	if gcpStyle {
+		f.levelKey = "severity"
+		f.messageKey = "message"
+	} else {
+		f.levelKey = slog.LevelKey
+		f.messageKey = slog.MessageKey
+	}
+
+	return f
+}
+
+// format formats a log record.
+func (s *formatterJSON) format(r slog.Record) {
+	// Open the JSON object and defer closing it.
+	s.buf.append(jsonGroupOpenToken)
+	defer func() {
+		s.buf.append(jsonGroupCloseToken)
+	}()
+
+	// Append timestamp
+	s.appendKey(slog.TimeKey)
+	s.appendTime(r.Time)
+
+	// Append log level
+	s.appendKey(s.levelKey)
+	s.appendString(s.levelName(r.Level))
+
+	// Append message
+	s.appendKey(s.messageKey)
+	s.appendString(r.Message)
+
+	// Append groups added with [Handler.WithAttrs] and [Handler.WithGroup]
+	for _, g := range s.groups {
+		if g.name != "" {
+			s.openGroup(g.name)
+		}
+
+		s.appendAttributes(g.attrs)
+	}
+
+	// Append attributes from the record
+	r.Attrs(func(attr slog.Attr) bool {
+		s.appendAttribute(attr)
+		return true
+	})
+
+	// Close all opened groups.
+	for s.groupsOpened > 0 {
+		s.closeGroup()
+	}
+
+	// Append error, source, and stack if present
+	if s.error.Key != "" {
+		s.appendKey(s.error.Key)
+		s.appendValue(s.error.Value)
+	}
+	if s.source.Key != "" {
+		s.appendKey(s.source.Key)
+		s.appendValue(s.source.Value)
+	}
+	if s.stack.Key != "" {
+		s.appendKey(s.stack.Key)
+		s.appendValue(s.stack.Value)
+	}
+}
+
+// appendAttributes appends a list of attributes to the buffer.
+func (s *formatterJSON) appendAttributes(attrs []slog.Attr) {
+	for _, attr := range attrs {
+		s.appendAttribute(attr)
+	}
+}
+
+// appendAttribute appends a single attribute to the buffer.
+func (s *formatterJSON) appendAttribute(attr slog.Attr) {
+	// Resolve [slog.LogValuer] values
+	attr.Value = attr.Value.Resolve()
+
+	// If there are no groups opened, save special attributes for later
+	if s.groupsOpened == 0 && s.saveSpecialAttr(attr) {
+		return
+	}
+
+	// Groups need special handling
+	if attr.Value.Kind() == slog.KindGroup {
+		s.appendGroup(attr.Key, attr.Value.Group())
+		return
+	}
+
+	s.appendKey(attr.Key)
+	s.appendValue(attr.Value)
+}
+
+// appendKey appends an attribute key to the buffer.
+func (s *formatterJSON) appendKey(key string) {
+	s.buf.append(s.sep...)
+	s.sep = jsonAttributeSep
+
+	s.appendString(key)
+	s.buf.append(':')
+}
+
+// appendValue appends a value to the buffer, applying quoting rules as necessary.
+func (s *formatterJSON) appendValue(val slog.Value) {
+	switch val.Kind() {
+	case slog.KindString:
+		s.appendString(val.String())
+	case slog.KindInt64:
+		s.buf.appendInt(val.Int64())
+	case slog.KindUint64:
+		s.buf.appendUint(val.Uint64())
+	case slog.KindFloat64:
+		// strconv.FormatFloat result sometimes differs from json.Marshal,
+		// so we use json.Marshal for consistency.
+		s.appendJSONMarshal(val.Float64())
+	case slog.KindBool:
+		s.buf.appendBool(val.Bool())
+	case slog.KindDuration:
+		s.buf.appendInt(int64(val.Duration()))
+	case slog.KindTime:
+		s.appendTime(val.Time())
+	default:
+		s.appendJSONMarshal(val.Any())
+	}
+}
+
+// appendString appends a string value to the buffer.
+// If the string does not require escaping, it is appended directly.
+// Otherwise, it is JSON marshaled.
+func (s *formatterJSON) appendString(val string) {
+	if !s.isStringSafe(val) {
+		s.appendJSONMarshal(val)
+		return
+	}
+
+	s.buf.append('"')
+	s.buf.appendStringRaw(val)
+	s.buf.append('"')
+}
+
+// isStringSafe checks if a string is safe to append without escaping.
+func (s *formatterJSON) isStringSafe(val string) bool {
+	for i := 0; i < len(val); i++ {
+		if b := val[i]; b >= utf8.RuneSelf || !jsonSafeSet[b] {
+			return false
+		}
+	}
+	return true
+}
+
+// appendTime appends a time value to the buffer.
+func (s *formatterJSON) appendTime(val time.Time) {
+	s.buf.append('"')
+	s.buf.appendStringRaw(val.Format(time.RFC3339))
+	s.buf.append('"')
+}
+
+// appendJSONMarshal appends a JSON marshaled value to the buffer.
+func (s *formatterJSON) appendJSONMarshal(val any) {
+	if err, ok := val.(error); ok && err != nil {
+		s.appendString(err.Error())
+		return
+	}
+
+	buf := newBuffer()
+	defer func() {
+		buf.free()
+	}()
+
+	enc := json.NewEncoder(buf)
+	enc.SetEscapeHTML(false)
+
+	if err := enc.Encode(val); err != nil {
+		// This should be a very unlikely situation, but just in case...
+		s.buf.appendStringRaw(`"<json marshal error>"`)
+		return
+	}
+
+	buf.removeNewline()
+	s.buf.append(*buf...)
+}
+
+// appendGroup appends a group of attributes to the buffer.
+func (s *formatterJSON) appendGroup(name string, attrs []slog.Attr) {
+	if len(attrs) == 0 {
+		return
+	}
+
+	if len(name) > 0 {
+		// If the group has a name, open it and defer closing it.
+		// Unnamed groups should be treated as sets of regular attributes.
+		s.openGroup(name)
+		defer s.closeGroup()
+	}
+
+	s.appendAttributes(attrs)
+}
+
+// openGroup opens a new group in the buffer.
+func (s *formatterJSON) openGroup(name string) {
+	s.groupsOpened++
+
+	s.appendKey(name)
+	s.buf.append(jsonGroupOpenToken)
+	s.sep = nil
+}
+
+// closeGroup closes the most recently opened group in the buffer.
+func (s *formatterJSON) closeGroup() {
+	s.groupsOpened--
+
+	s.buf.append(jsonGroupCloseToken)
+	s.sep = jsonAttributeSep
+}
+
+// jsonSafeSet is a set of runes that are safe to include in JSON strings without escaping.
+// Some runes here are explicitly marked as unsafe for clarity.
+// The unlisted runes are considered unsafe by default.
+// Shamesly stolen from https://github.com/golang/go/blob/master/src/encoding/json/tables.go.
+var jsonSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}

+ 321 - 0
logger/formatter_json_test.go

@@ -0,0 +1,321 @@
+package logger
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"log/slog"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type FormatterJsonTestSuite struct {
+	testutil.LazySuite
+
+	buf     testutil.LazyObj[*bytes.Buffer]
+	config  testutil.LazyObj[*Config]
+	handler testutil.LazyObj[*Handler]
+	logger  testutil.LazyObj[*slog.Logger]
+}
+
+func (s *FormatterJsonTestSuite) SetupTest() {
+	s.buf, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*bytes.Buffer, error) {
+			return new(bytes.Buffer), nil
+		},
+	)
+
+	s.config, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*Config, error) {
+			cfg := NewDefaultConfig()
+			cfg.Format = FormatJSON
+			return &cfg, nil
+		},
+	)
+
+	s.handler, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*Handler, error) {
+			return NewHandler(s.buf(), s.config()), nil
+		},
+	)
+
+	s.logger, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*slog.Logger, error) {
+			return slog.New(s.handler()), nil
+		},
+	)
+}
+
+func (s *FormatterJsonTestSuite) SetupSubTest() {
+	s.ResetLazyObjects()
+}
+
+func (s *FormatterJsonTestSuite) checkNextEntry(lvl string, msg map[string]any) {
+	str, err := s.buf().ReadString('\n')
+	s.Require().NoError(err)
+
+	var parsed map[string]any
+	err = json.Unmarshal([]byte(str), &parsed)
+	s.Require().NoError(err)
+
+	s.Require().IsType("", parsed["time"])
+	s.Require().IsType("", parsed["level"])
+
+	now := time.Now()
+	t, err := time.ParseInLocation(time.RFC3339, parsed["time"].(string), now.Location())
+	s.Require().NoError(err)
+	s.Require().WithinDuration(time.Now(), t, time.Minute)
+
+	s.Equal(lvl, parsed["level"].(string))
+
+	// Remove time and level as they are not included in `msg`
+	delete(parsed, "time")
+	delete(parsed, "level")
+
+	// Check the message
+	s.Equal(msg, parsed)
+}
+
+func (s *FormatterJsonTestSuite) TestLevel() {
+	type testEntry struct {
+		level     slog.Level
+		levelName string
+		message   string
+	}
+
+	testEntries := []testEntry{
+		{level: slog.LevelDebug, levelName: "DEBUG", message: "Debug message"},
+		{level: slog.LevelInfo, levelName: "INFO", message: "Info message"},
+		{level: slog.LevelWarn, levelName: "WARNING", message: "Warning message"},
+		{level: slog.LevelError, levelName: "ERROR", message: "Error message"},
+		{level: LevelCritical, levelName: "CRITICAL", message: "Critical message"},
+	}
+
+	testCases := []struct {
+		level   slog.Level
+		entries []testEntry
+	}{
+		{level: slog.LevelDebug, entries: testEntries},
+		{level: slog.LevelInfo, entries: testEntries[1:]},
+		{level: slog.LevelWarn, entries: testEntries[2:]},
+		{level: slog.LevelError, entries: testEntries[3:]},
+		{level: LevelCritical, entries: testEntries[4:]},
+	}
+
+	for _, tc := range testCases {
+		s.Run(tc.level.String(), func() {
+			s.config().Level = tc.level
+
+			for _, entry := range testEntries {
+				s.logger().Log(s.T().Context(), entry.level, entry.message)
+			}
+
+			for _, entry := range tc.entries {
+				s.checkNextEntry(entry.levelName, map[string]any{
+					"msg": entry.message,
+				})
+			}
+		})
+	}
+}
+
+func (s *FormatterJsonTestSuite) TestAttributes() {
+	s.logger().Info(
+		"Test message",
+		slog.String("string", "value"),
+		slog.Int("int", -100),
+		slog.Uint64("uint64", 200),
+		slog.Float64("float64", 3.14),
+		slog.Bool("bool", true),
+		slog.Time("timearg", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+		slog.Duration("duration", time.Minute),
+		slog.Any("err", errors.New("error value")),
+		slog.Any("any", struct {
+			Field1 string
+			Field2 int
+		}{"value", 42}),
+	)
+
+	s.checkNextEntry(
+		"INFO",
+		map[string]any{
+			"msg":      "Test message",
+			"string":   "value",
+			"int":      -100.0,
+			"uint64":   200.0,
+			"float64":  3.14,
+			"bool":     true,
+			"timearg":  "1984-01-02T03:04:05Z",
+			"duration": float64(time.Minute),
+			"err":      "error value",
+			"any":      map[string]any{"Field1": "value", "Field2": 42.0},
+		},
+	)
+}
+
+func (s *FormatterJsonTestSuite) TestGroups() {
+	s.Run("LastGroupNotEmpty", func() {
+		s.logger().
+			With(
+				slog.String("string", "value"),
+				slog.Int("int", -100),
+			).
+			WithGroup("group1").
+			With(
+				slog.Uint64("uint64", 200),
+				slog.Float64("float64", 3.14),
+			).
+			WithGroup("group2").
+			With(slog.Group("group3",
+				slog.Bool("bool", true),
+				slog.Time("timearg", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+			)).
+			With(slog.Group("empty_group")).
+			WithGroup("group4").
+			Info(
+				"Test message",
+				slog.Duration("duration", time.Minute),
+				slog.Any("any", struct {
+					Field1 string
+					Field2 int
+				}{"value", 42}),
+			)
+
+		s.checkNextEntry(
+			"INFO",
+			map[string]any{
+				"msg":    "Test message",
+				"string": "value",
+				"int":    -100.0,
+				"group1": map[string]any{
+					"uint64":  200.0,
+					"float64": 3.14,
+					"group2": map[string]any{
+						"group3": map[string]any{
+							"bool":    true,
+							"timearg": "1984-01-02T03:04:05Z",
+						},
+						"group4": map[string]any{
+							"duration": float64(time.Minute),
+							"any":      map[string]any{"Field1": "value", "Field2": 42.0},
+						},
+					},
+				},
+			},
+		)
+	})
+
+	s.Run("LastGroupsEmpty", func() {
+		s.logger().
+			With(
+				slog.String("string", "value"),
+				slog.Int("int", -100),
+			).
+			WithGroup("group1").
+			With(
+				slog.Uint64("uint64", 200),
+				slog.Float64("float64", 3.14),
+			).
+			WithGroup("group2").
+			With(slog.Group("group3",
+				slog.Bool("bool", true),
+				slog.Time("timearg", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+			)).
+			With(slog.Group("empty_group")).
+			WithGroup("group4").
+			WithGroup("group5").
+			Info("Test message")
+
+		s.checkNextEntry(
+			"INFO",
+			map[string]any{
+				"msg":    "Test message",
+				"string": "value",
+				"int":    -100.0,
+				"group1": map[string]any{
+					"uint64":  200.0,
+					"float64": 3.14,
+					"group2": map[string]any{
+						"group3": map[string]any{
+							"bool":    true,
+							"timearg": "1984-01-02T03:04:05Z",
+						},
+					},
+				},
+			},
+		)
+	})
+}
+
+func (s *FormatterJsonTestSuite) TestEscaping() {
+	s.logger().Info(
+		"Test message",
+		"key", "value",
+		"key 1", "value 1",
+		`"key"`, `"value"`,
+		`<key>`, `<value>`,
+		"\nkey\n", "\nvalue\n",
+		slog.Group("group name", slog.String("key", "value")),
+	)
+
+	s.checkNextEntry(
+		"INFO",
+		map[string]any{
+			"msg":        "Test message",
+			"key":        "value",
+			"key 1":      "value 1",
+			`"key"`:      `"value"`,
+			`<key>`:      `<value>`,
+			"\nkey\n":    "\nvalue\n",
+			"group name": map[string]any{"key": "value"},
+		},
+	)
+}
+
+func (s *FormatterJsonTestSuite) TestSpecialFields() {
+	s.logger().Info(
+		"Test message",
+		"stack", "stack value\nwith new lines",
+		"key1", "value1",
+		"error", errors.New("error value"),
+		"key2", "value2",
+		"source", "source value",
+		"key3", "value3",
+		slog.Group(
+			"group",
+			"stack", "stack in group",
+			"error", "error in group",
+			"source", "source in group",
+		),
+	)
+
+	expectedJSON := strings.Join([]string{
+		`"msg":"Test message",`,
+		`"key1":"value1",`,
+		`"key2":"value2",`,
+		`"key3":"value3",`,
+		`"group":{`,
+		`"stack":"stack in group",`,
+		`"error":"error in group",`,
+		`"source":"source in group"`,
+		`},`,
+		`"error":"error value",`,
+		`"source":"source value",`,
+		`"stack":"stack value\nwith new lines"`,
+		"}\n",
+	}, "")
+
+	s.Require().Contains(s.buf().String(), expectedJSON)
+}
+
+func TestFormatterJson(t *testing.T) {
+	suite.Run(t, new(FormatterJsonTestSuite))
+}

+ 190 - 0
logger/formatter_pretty.go

@@ -0,0 +1,190 @@
+package logger
+
+import (
+	"log/slog"
+	"time"
+)
+
+var (
+	prettyGroupOpenToken  = []byte("{")
+	prettyGroupCloseToken = []byte(" }")
+
+	prettyColorDebug     = []byte("\x1b[37m")   // Gray
+	prettyColorDebugBold = []byte("\x1b[1;37m") // Bold Gray
+	prettyColorInfo      = []byte("\x1b[36m")   // Cyan
+	prettyColorInfoBold  = []byte("\x1b[1;36m") // Bold Cyan
+	prettyColorWarn      = []byte("\x1b[33m")   // Yellow
+	prettyColorWarnBold  = []byte("\x1b[1;33m") // Bold Yellow
+	prettyColorError     = []byte("\x1b[31m")   // Red
+	prettyColorErrorBold = []byte("\x1b[1;31m") // Bold Red
+	prettyColorReset     = []byte("\x1b[0m")
+)
+
+// formatterPretty is a pretty printer for log records.
+type formatterPretty struct {
+	formatterCommon
+
+	colorThin []byte
+	colorBold []byte
+
+	groupsOpened int
+}
+
+// newFormatterPretty creates a new instance of formatterPretty.
+func newFormatterPretty(groups []attrGroup, buf *buffer) *formatterPretty {
+	return &formatterPretty{
+		formatterCommon: newFormatterCommon(groups, buf),
+	}
+}
+
+// format formats a log record as a pretty-printed string.
+func (s *formatterPretty) format(r slog.Record) {
+	s.colorThin, s.colorBold = s.getColor(r.Level)
+
+	// Append timestamp
+	s.buf.appendStringRaw(r.Time.Format(time.DateTime))
+	s.buf.append(' ')
+
+	// Append level marker
+	s.buf.append(s.colorBold...)
+	s.buf.appendStringRaw(s.levelName(r.Level))
+	s.buf.append(prettyColorReset...)
+	s.buf.append(' ')
+
+	// Append message
+	s.buf.appendStringRaw(r.Message)
+
+	// Append groups added with [Handler.WithAttrs] and [Handler.WithGroup]
+	for _, g := range s.groups {
+		if g.name != "" {
+			s.openGroup(g.name)
+		}
+
+		s.appendAttributes(g.attrs)
+	}
+
+	// Append attributes from the record
+	r.Attrs(func(attr slog.Attr) bool {
+		s.appendAttribute(attr)
+		return true
+	})
+
+	// Close all opened groups.
+	for s.groupsOpened > 0 {
+		s.closeGroup()
+	}
+
+	// Append error, source, and stack if present
+	if s.error.Key != "" {
+		s.appendKey(s.error.Key)
+		s.appendValue(s.error.Value, false)
+	}
+	if s.source.Key != "" {
+		s.appendKey(s.source.Key)
+		s.appendValue(s.source.Value, false)
+	}
+	if s.stack.Key != "" {
+		s.buf.append('\n')
+		s.buf.append(prettyColorDebug...)
+		s.buf.appendStringRaw(s.stack.Value.String())
+		s.buf.append(prettyColorReset...)
+	}
+}
+
+// getColor returns the terminal color sequences for a given log level.
+func (s *formatterPretty) getColor(lvl slog.Level) ([]byte, []byte) {
+	switch {
+	case lvl < slog.LevelInfo:
+		return prettyColorDebug, prettyColorDebugBold
+	case lvl < slog.LevelWarn:
+		return prettyColorInfo, prettyColorInfoBold
+	case lvl < slog.LevelError:
+		return prettyColorWarn, prettyColorWarnBold
+	default:
+		return prettyColorError, prettyColorErrorBold
+	}
+}
+
+// levelName returns the string representation of a log level.
+func (s *formatterPretty) levelName(lvl slog.Level) string {
+	switch {
+	case lvl < slog.LevelInfo:
+		return "[DBG]"
+	case lvl < slog.LevelWarn:
+		return "[INF]"
+	case lvl < slog.LevelError:
+		return "[WRN]"
+	case lvl < LevelCritical:
+		return "[ERR]"
+	default:
+		return "[CRT]"
+	}
+}
+
+// appendAttributes appends a list of attributes to the buffer.
+func (s *formatterPretty) appendAttributes(attrs []slog.Attr) {
+	for _, attr := range attrs {
+		s.appendAttribute(attr)
+	}
+}
+
+// appendAttribute appends a single attribute to the buffer.
+func (s *formatterPretty) appendAttribute(attr slog.Attr) {
+	// Resolve [slog.LogValuer] values
+	attr.Value = attr.Value.Resolve()
+
+	// If there are no groups opened, save special attributes for later
+	if s.groupsOpened == 0 && s.saveSpecialAttr(attr) {
+		return
+	}
+
+	// Groups need special handling
+	if attr.Value.Kind() == slog.KindGroup {
+		s.appendGroup(attr.Key, attr.Value.Group())
+		return
+	}
+
+	s.appendKey(attr.Key)
+	s.appendValue(attr.Value, false)
+}
+
+// appendKey appends an attribute key to the buffer.
+func (s *formatterPretty) appendKey(key string) {
+	s.buf.append(' ')
+	s.buf.append(s.colorThin...)
+	s.buf.appendString(key)
+	s.buf.append(prettyColorReset...)
+	s.buf.append('=')
+}
+
+// appendGroup appends a group of attributes to the buffer.
+func (s *formatterPretty) appendGroup(name string, attrs []slog.Attr) {
+	// Ignore empty groups
+	if len(attrs) == 0 {
+		return
+	}
+
+	if len(name) > 0 {
+		// If the group has a name, open it and defer closing it.
+		// Unnamed groups should be treated as sets of regular attributes.
+		s.openGroup(name)
+		defer s.closeGroup()
+	}
+
+	s.appendAttributes(attrs)
+}
+
+// openGroup opens a new group of attributes.
+func (s *formatterPretty) openGroup(name string) {
+	s.groupsOpened++
+
+	s.appendKey(name)
+	s.buf.append(prettyGroupOpenToken...)
+}
+
+// closeGroup closes the most recently opened group of attributes.
+func (s *formatterPretty) closeGroup() {
+	s.groupsOpened--
+
+	s.buf.append(prettyGroupCloseToken...)
+}

+ 345 - 0
logger/formatter_pretty_test.go

@@ -0,0 +1,345 @@
+package logger
+
+import (
+	"bytes"
+	"errors"
+	"log/slog"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type FormatterPrettyTestSuite struct {
+	testutil.LazySuite
+
+	buf     testutil.LazyObj[*bytes.Buffer]
+	config  testutil.LazyObj[*Config]
+	handler testutil.LazyObj[*Handler]
+	logger  testutil.LazyObj[*slog.Logger]
+}
+
+func (s *FormatterPrettyTestSuite) SetupTest() {
+	s.buf, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*bytes.Buffer, error) {
+			return new(bytes.Buffer), nil
+		},
+	)
+
+	s.config, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*Config, error) {
+			cfg := NewDefaultConfig()
+			cfg.Format = FormatPretty
+			return &cfg, nil
+		},
+	)
+
+	s.handler, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*Handler, error) {
+			return NewHandler(s.buf(), s.config()), nil
+		},
+	)
+
+	s.logger, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*slog.Logger, error) {
+			return slog.New(s.handler()), nil
+		},
+	)
+}
+
+func (s *FormatterPrettyTestSuite) SetupSubTest() {
+	s.ResetLazyObjects()
+}
+
+func (s *FormatterPrettyTestSuite) removeColorCodes() {
+	p := s.buf().Bytes()
+	q := p[:0]
+
+	inEscape := false
+
+	for i := 0; i < len(p); i++ {
+		switch {
+		case p[i] == '\x1b':
+			// Skip ANSI escape codes
+			inEscape = true
+		case inEscape && p[i] == 'm':
+			inEscape = false
+		case !inEscape:
+			q = append(q, p[i])
+		}
+	}
+
+	s.buf().Truncate(len(q))
+}
+
+func (s *FormatterPrettyTestSuite) checkNextEntry(lvl, msg string) {
+	// Remove color codes from the log entry,
+	// we're not going to test coloring
+	s.removeColorCodes()
+
+	// Pretty level names are always 3 characters long
+	s.Require().Len(lvl, 3)
+
+	str, err := s.buf().ReadString('\n')
+	s.Require().NoError(err)
+
+	const timeLen = 19
+	const lvlLen = 3 + 4 // +4 for the space and brackets
+	const prefixLen = timeLen + lvlLen
+
+	s.Require().GreaterOrEqual(len(str), prefixLen)
+
+	timePart := str[:timeLen]
+	levelPart := str[timeLen:prefixLen]
+
+	now := time.Now()
+	t, err := time.ParseInLocation(time.DateTime, timePart, now.Location())
+	s.Require().NoError(err)
+	s.Require().WithinDuration(time.Now(), t, time.Minute)
+
+	s.Equal(" ["+lvl+"] ", levelPart)
+
+	// Check the message
+	s.Equal(msg+"\n", str[prefixLen:])
+}
+
+func (s *FormatterPrettyTestSuite) TestLevel() {
+	type testEntry struct {
+		level     slog.Level
+		levelName string
+		message   string
+	}
+
+	testEntries := []testEntry{
+		{level: slog.LevelDebug, levelName: "DBG", message: "Debug message"},
+		{level: slog.LevelInfo, levelName: "INF", message: "Info message"},
+		{level: slog.LevelWarn, levelName: "WRN", message: "Warning message"},
+		{level: slog.LevelError, levelName: "ERR", message: "Error message"},
+		{level: LevelCritical, levelName: "CRT", message: "Critical message"},
+	}
+
+	testCases := []struct {
+		level   slog.Level
+		entries []testEntry
+	}{
+		{level: slog.LevelDebug, entries: testEntries},
+		{level: slog.LevelInfo, entries: testEntries[1:]},
+		{level: slog.LevelWarn, entries: testEntries[2:]},
+		{level: slog.LevelError, entries: testEntries[3:]},
+		{level: LevelCritical, entries: testEntries[4:]},
+	}
+
+	for _, tc := range testCases {
+		s.Run(tc.level.String(), func() {
+			s.config().Level = tc.level
+
+			for _, entry := range testEntries {
+				s.logger().Log(s.T().Context(), entry.level, entry.message)
+			}
+
+			for _, entry := range tc.entries {
+				s.checkNextEntry(entry.levelName, entry.message)
+			}
+		})
+	}
+}
+
+func (s *FormatterPrettyTestSuite) TestAttributes() {
+	s.logger().Info(
+		"Test message",
+		slog.String("string", "value"),
+		slog.Int("int", -100),
+		slog.Uint64("uint64", 200),
+		slog.Float64("float64", 3.14),
+		slog.Bool("bool", true),
+		slog.Time("time", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+		slog.Duration("duration", time.Minute),
+		slog.Any("err", errors.New("error value")),
+		slog.Any("any", struct {
+			Field1 string
+			Field2 int
+		}{"value", 42}),
+	)
+
+	s.checkNextEntry(
+		"INF",
+		strings.Join([]string{
+			"Test message",
+			"string=value",
+			"int=-100",
+			"uint64=200",
+			"float64=3.14",
+			"bool=true",
+			`time="1984-01-02 03:04:05"`,
+			"duration=1m0s",
+			`err="error value"`,
+			`any="{Field1:value Field2:42}"`,
+		}, " "),
+	)
+}
+
+func (s *FormatterPrettyTestSuite) TestGroups() {
+	s.Run("LastGroupNotEmpty", func() {
+		s.logger().
+			With(
+				slog.String("string", "value"),
+				slog.Int("int", -100),
+			).
+			WithGroup("group1").
+			With(
+				slog.Uint64("uint64", 200),
+				slog.Float64("float64", 3.14),
+			).
+			WithGroup("group2").
+			With(slog.Group("group3",
+				slog.Bool("bool", true),
+				slog.Time("time", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+			)).
+			With(slog.Group("empty_group")).
+			WithGroup("group4").
+			Info(
+				"Test message",
+				slog.Duration("duration", time.Minute),
+				slog.Any("any", struct {
+					Field1 string
+					Field2 int
+				}{"value", 42}),
+			)
+
+		s.checkNextEntry(
+			"INF",
+			strings.Join([]string{
+				"Test message",
+				"string=value",
+				"int=-100",
+				"group1={",
+				"uint64=200",
+				"float64=3.14",
+				"group2={",
+				"group3={",
+				"bool=true",
+				`time="1984-01-02 03:04:05"`,
+				"}",
+				"group4={",
+				"duration=1m0s",
+				`any="{Field1:value Field2:42}"`,
+				"}",
+				"}",
+				"}",
+			}, " "),
+		)
+	})
+
+	s.Run("LastGroupsEmpty", func() {
+		s.logger().
+			With(
+				slog.String("string", "value"),
+				slog.Int("int", -100),
+			).
+			WithGroup("group1").
+			With(
+				slog.Uint64("uint64", 200),
+				slog.Float64("float64", 3.14),
+			).
+			WithGroup("group2").
+			With(slog.Group("group3",
+				slog.Bool("bool", true),
+				slog.Time("time", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+			)).
+			With(slog.Group("empty_group")).
+			WithGroup("group4").
+			WithGroup("group5").
+			Info("Test message")
+
+		s.checkNextEntry(
+			"INF",
+			strings.Join([]string{
+				"Test message",
+				"string=value",
+				"int=-100",
+				"group1={",
+				"uint64=200",
+				"float64=3.14",
+				"group2={",
+				"group3={",
+				"bool=true",
+				`time="1984-01-02 03:04:05"`,
+				"}",
+				"}",
+				"}",
+			}, " "),
+		)
+	})
+}
+
+func (s *FormatterPrettyTestSuite) TestQuoting() {
+	s.logger().Info(
+		"Test message",
+		"key", "value",
+		"key with spaces", "value with spaces",
+		`key"with"quotes`, `value"with"quotes`,
+		"key\nwith\nnewlines", "value\nwith\nnewlines",
+		slog.Group("group name", slog.String("key", "value")),
+	)
+
+	s.checkNextEntry(
+		"INF",
+		strings.Join([]string{
+			"Test message",
+			"key=value",
+			`"key with spaces"="value with spaces"`,
+			`"key\"with\"quotes"="value\"with\"quotes"`,
+			`"key\nwith\nnewlines"="value\nwith\nnewlines"`,
+			`"group name"={ key=value }`,
+		}, " "),
+	)
+}
+
+func (s *FormatterPrettyTestSuite) TestSpecialFields() {
+	s.logger().Info(
+		"Test message",
+		"stack", "stack value\nwith new lines",
+		"key1", "value1",
+		"error", errors.New("error value"),
+		"key2", "value2",
+		"source", "source value",
+		"key3", "value3",
+		slog.Group(
+			"group",
+			"stack", "stack in group",
+			"error", "error in group",
+			"source", "source in group",
+		),
+	)
+
+	s.checkNextEntry(
+		"INF",
+		strings.Join([]string{
+			"Test message",
+			"key1=value1",
+			"key2=value2",
+			"key3=value3",
+			"group={",
+			`stack="stack in group"`,
+			`error="error in group"`,
+			`source="source in group"`,
+			"}",
+			`error="error value"`,
+			`source="source value"`,
+		}, " "),
+	)
+
+	s.removeColorCodes()
+
+	s.Require().Equal("stack value\nwith new lines\n", s.buf().String())
+}
+
+func TestFormatterPretty(t *testing.T) {
+	suite.Run(t, new(FormatterPrettyTestSuite))
+}

+ 133 - 0
logger/formatter_structured.go

@@ -0,0 +1,133 @@
+package logger
+
+import (
+	"log/slog"
+)
+
+// formatterStructured is a flat structured log formatter.
+type formatterStructured struct {
+	formatterCommon
+
+	// Current group prefix
+	prefix *buffer
+}
+
+// newFormatterStructured creates a new formatterStructured instance.
+func newFormatterStructured(groups []attrGroup, buf *buffer) *formatterStructured {
+	return &formatterStructured{
+		formatterCommon: newFormatterCommon(groups, buf),
+	}
+}
+
+// format formats a log record.
+func (s *formatterStructured) format(r slog.Record) {
+	s.prefix = newBuffer()
+	defer func() {
+		s.prefix.free()
+	}()
+
+	// Append timestamp
+	s.appendKey(slog.TimeKey)
+	s.appendTime(r.Time)
+
+	// Append log level
+	s.appendKey(slog.LevelKey)
+	s.appendString(s.levelName(r.Level), true)
+
+	// Append message
+	s.appendKey(slog.MessageKey)
+	s.appendString(r.Message, true)
+
+	// Append groups added with [Handler.WithAttrs] and [Handler.WithGroup]
+	for _, g := range s.groups {
+		if g.name != "" {
+			s.openGroup(g.name)
+		}
+
+		s.appendAttributes(g.attrs)
+	}
+
+	// Append attributes from the record
+	r.Attrs(func(attr slog.Attr) bool {
+		s.appendAttribute(attr)
+		return true
+	})
+
+	// Append error, source, and stack if present
+	if s.error.Key != "" {
+		s.appendKey(s.error.Key)
+		s.appendValue(s.error.Value, false)
+	}
+	if s.source.Key != "" {
+		s.appendKey(s.source.Key)
+		s.appendValue(s.source.Value, false)
+	}
+	if s.stack.Key != "" {
+		s.appendKey(s.stack.Key)
+		s.appendValue(s.stack.Value, false)
+	}
+}
+
+// appendAttributes appends a list of attributes to the buffer.
+func (s *formatterStructured) appendAttributes(attrs []slog.Attr) {
+	for _, attr := range attrs {
+		s.appendAttribute(attr)
+	}
+}
+
+// appendAttribute appends a single attribute to the buffer.
+func (s *formatterStructured) appendAttribute(attr slog.Attr) {
+	// Resolve [slog.LogValuer] values
+	attr.Value = attr.Value.Resolve()
+
+	// If there are no groups opened, save special attributes for later
+	if s.prefix.len() == 0 && s.saveSpecialAttr(attr) {
+		return
+	}
+
+	// Groups need special handling
+	if attr.Value.Kind() == slog.KindGroup {
+		s.appendGroup(attr.Key, attr.Value.Group())
+		return
+	}
+
+	s.appendKey(attr.Key)
+	s.appendValue(attr.Value, true)
+}
+
+// appendKey appends an attribute key to the buffer.
+func (s *formatterStructured) appendKey(key string) {
+	if len(*s.buf) > 0 {
+		s.buf.append(' ')
+	}
+
+	s.buf.appendString(s.prefix.String() + key)
+	s.buf.append('=')
+}
+
+// appendGroup appends a group of attributes to the buffer.
+func (s *formatterStructured) appendGroup(name string, attrs []slog.Attr) {
+	if len(attrs) == 0 {
+		return
+	}
+
+	if len(name) > 0 {
+		// If the group has a name, open it and defer closing it.
+		// Unnamed groups should be treated as sets of regular attributes.
+		s.openGroup(name)
+		defer s.closeGroup(name)
+	}
+
+	s.appendAttributes(attrs)
+}
+
+// openGroup opens a new group of attributes.
+func (s *formatterStructured) openGroup(name string) {
+	s.prefix.appendStringRaw(name)
+	s.prefix.append('.')
+}
+
+// closeGroup closes the most recently opened group of attributes.
+func (s *formatterStructured) closeGroup(name string) {
+	s.prefix.remove(len(name) + 1) // +1 for the dot
+}

+ 299 - 0
logger/formatter_structured_test.go

@@ -0,0 +1,299 @@
+package logger
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"log/slog"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type FormatterStructuredTestSuite struct {
+	testutil.LazySuite
+
+	buf     testutil.LazyObj[*bytes.Buffer]
+	config  testutil.LazyObj[*Config]
+	handler testutil.LazyObj[*Handler]
+	logger  testutil.LazyObj[*slog.Logger]
+}
+
+func (s *FormatterStructuredTestSuite) SetupTest() {
+	s.buf, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*bytes.Buffer, error) {
+			return new(bytes.Buffer), nil
+		},
+	)
+
+	s.config, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*Config, error) {
+			cfg := NewDefaultConfig()
+			cfg.Format = FormatStructured
+			return &cfg, nil
+		},
+	)
+
+	s.handler, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*Handler, error) {
+			return NewHandler(s.buf(), s.config()), nil
+		},
+	)
+
+	s.logger, _ = testutil.NewLazySuiteObj(
+		s,
+		func() (*slog.Logger, error) {
+			return slog.New(s.handler()), nil
+		},
+	)
+}
+
+func (s *FormatterStructuredTestSuite) SetupSubTest() {
+	s.ResetLazyObjects()
+}
+
+func (s *FormatterStructuredTestSuite) checkNextEntry(lvl, msg string) {
+	str, err := s.buf().ReadString('\n')
+	s.Require().NoError(err)
+
+	const timeLen = 19 + 7  // +7 for key, separator, and quotes
+	lvlLen := len(lvl) + 10 // +10 for key, separator, quotes, and spaces
+	prefixLen := timeLen + lvlLen
+
+	s.Require().GreaterOrEqual(len(str), prefixLen)
+
+	timePart := str[:timeLen]
+	levelPart := str[timeLen:prefixLen]
+
+	now := time.Now()
+	t, err := time.ParseInLocation(time.DateTime, timePart[6:timeLen-1], now.Location())
+	s.Require().NoError(err)
+	s.Require().WithinDuration(time.Now(), t, time.Minute)
+
+	s.Equal(` level="`+lvl+`" `, levelPart)
+
+	// Check the message
+	s.Equal(msg+"\n", str[prefixLen:])
+}
+
+func (s *FormatterStructuredTestSuite) TestLevel() {
+	type testEntry struct {
+		level     slog.Level
+		levelName string
+		message   string
+	}
+
+	testEntries := []testEntry{
+		{level: slog.LevelDebug, levelName: "DEBUG", message: "Debug message"},
+		{level: slog.LevelInfo, levelName: "INFO", message: "Info message"},
+		{level: slog.LevelWarn, levelName: "WARNING", message: "Warning message"},
+		{level: slog.LevelError, levelName: "ERROR", message: "Error message"},
+		{level: LevelCritical, levelName: "CRITICAL", message: "Critical message"},
+	}
+
+	testCases := []struct {
+		level   slog.Level
+		entries []testEntry
+	}{
+		{level: slog.LevelDebug, entries: testEntries},
+		{level: slog.LevelInfo, entries: testEntries[1:]},
+		{level: slog.LevelWarn, entries: testEntries[2:]},
+		{level: slog.LevelError, entries: testEntries[3:]},
+		{level: LevelCritical, entries: testEntries[4:]},
+	}
+
+	for _, tc := range testCases {
+		s.Run(tc.level.String(), func() {
+			s.config().Level = tc.level
+
+			for _, entry := range testEntries {
+				s.logger().Log(s.T().Context(), entry.level, entry.message)
+			}
+
+			for _, entry := range tc.entries {
+				s.checkNextEntry(entry.levelName, fmt.Sprintf(`msg="%s"`, entry.message))
+			}
+		})
+	}
+}
+
+func (s *FormatterStructuredTestSuite) TestAttributes() {
+	s.logger().Info(
+		"Test message",
+		slog.String("string", "value"),
+		slog.Int("int", -100),
+		slog.Uint64("uint64", 200),
+		slog.Float64("float64", 3.14),
+		slog.Bool("bool", true),
+		slog.Time("time", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+		slog.Duration("duration", time.Minute),
+		slog.Any("err", errors.New("error value")),
+		slog.Any("any", struct {
+			Field1 string
+			Field2 int
+		}{"value", 42}),
+	)
+
+	s.checkNextEntry(
+		"INFO",
+		strings.Join([]string{
+			`msg="Test message"`,
+			`string="value"`,
+			`int=-100`,
+			`uint64=200`,
+			`float64=3.14`,
+			`bool=true`,
+			`time="1984-01-02 03:04:05"`,
+			`duration="1m0s"`,
+			`err="error value"`,
+			`any="{Field1:value Field2:42}"`,
+		}, " "),
+	)
+}
+
+func (s *FormatterStructuredTestSuite) TestGroups() {
+	s.Run("LastGroupNotEmpty", func() {
+		s.logger().
+			With(
+				slog.String("string", "value"),
+				slog.Int("int", -100),
+			).
+			WithGroup("group1").
+			With(
+				slog.Uint64("uint64", 200),
+				slog.Float64("float64", 3.14),
+			).
+			WithGroup("group2").
+			With(slog.Group("group3",
+				slog.Bool("bool", true),
+				slog.Time("time", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+			)).
+			With(slog.Group("empty_group")).
+			WithGroup("group4").
+			Info(
+				"Test message",
+				slog.Duration("duration", time.Minute),
+				slog.Any("any", struct {
+					Field1 string
+					Field2 int
+				}{"value", 42}),
+			)
+
+		s.checkNextEntry(
+			"INFO",
+			strings.Join([]string{
+				`msg="Test message"`,
+				`string="value"`,
+				`int=-100`,
+				`group1.uint64=200`,
+				`group1.float64=3.14`,
+				`group1.group2.group3.bool=true`,
+				`group1.group2.group3.time="1984-01-02 03:04:05"`,
+				`group1.group2.group4.duration="1m0s"`,
+				`group1.group2.group4.any="{Field1:value Field2:42}"`,
+			}, " "),
+		)
+	})
+
+	s.Run("LastGroupsEmpty", func() {
+		s.logger().
+			With(
+				slog.String("string", "value"),
+				slog.Int("int", -100),
+			).
+			WithGroup("group1").
+			With(
+				slog.Uint64("uint64", 200),
+				slog.Float64("float64", 3.14),
+			).
+			WithGroup("group2").
+			With(slog.Group("group3",
+				slog.Bool("bool", true),
+				slog.Time("time", time.Date(1984, 1, 2, 3, 4, 5, 6, time.UTC)),
+			)).
+			With(slog.Group("empty_group")).
+			WithGroup("group4").
+			WithGroup("group5").
+			Info("Test message")
+
+		s.checkNextEntry(
+			"INFO",
+			strings.Join([]string{
+				`msg="Test message"`,
+				`string="value"`,
+				`int=-100`,
+				`group1.uint64=200`,
+				`group1.float64=3.14`,
+				`group1.group2.group3.bool=true`,
+				`group1.group2.group3.time="1984-01-02 03:04:05"`,
+			}, " "),
+		)
+	})
+}
+
+func (s *FormatterStructuredTestSuite) TestQuoting() {
+	s.logger().Info(
+		"Test message",
+		"key", "value",
+		"key with spaces", "value with spaces",
+		`key"with"quotes`, `value"with"quotes`,
+		"key\nwith\nnewlines", "value\nwith\nnewlines",
+		slog.Group("group name", slog.String("key", "value")),
+	)
+
+	s.checkNextEntry(
+		"INFO",
+		strings.Join([]string{
+			`msg="Test message"`,
+			`key="value"`,
+			`"key with spaces"="value with spaces"`,
+			`"key\"with\"quotes"="value\"with\"quotes"`,
+			`"key\nwith\nnewlines"="value\nwith\nnewlines"`,
+			`"group name.key"="value"`,
+		}, " "),
+	)
+}
+
+func (s *FormatterStructuredTestSuite) TestSpecialFields() {
+	s.logger().Info(
+		"Test message",
+		"stack", "stack value\nwith new lines",
+		"key1", "value1",
+		"error", errors.New("error value"),
+		"key2", "value2",
+		"source", "source value",
+		"key3", "value3",
+		slog.Group(
+			"group",
+			"stack", "stack in group",
+			"error", "error in group",
+			"source", "source in group",
+		),
+	)
+
+	s.checkNextEntry(
+		"INFO",
+		strings.Join([]string{
+			`msg="Test message"`,
+			`key1="value1"`,
+			`key2="value2"`,
+			`key3="value3"`,
+			`group.stack="stack in group"`,
+			`group.error="error in group"`,
+			`group.source="source in group"`,
+			`error="error value"`,
+			`source="source value"`,
+			`stack="stack value\nwith new lines"`,
+		}, " "),
+	)
+}
+
+func TestFormatterStructured(t *testing.T) {
+	suite.Run(t, new(FormatterStructuredTestSuite))
+}

+ 0 - 0
gliblog/gliblog.c → logger/gliblog/gliblog.c


+ 3 - 5
gliblog/gliblog.go → logger/gliblog/gliblog.go

@@ -5,7 +5,7 @@ package gliblog
 #include "gliblog.h"
 */
 import "C"
-import log "github.com/sirupsen/logrus"
+import "log/slog"
 
 //export logGLib
 func logGLib(cdomain *C.char, logLevel C.GLogLevelFlags, cstr *C.char) {
@@ -19,13 +19,11 @@ func logGLib(cdomain *C.char, logLevel C.GLogLevelFlags, cstr *C.char) {
 		domain = "GLib"
 	}
 
-	entry := log.WithField("source", domain)
-
 	switch logLevel {
 	case C.G_LOG_LEVEL_WARNING:
-		entry.Warn(str)
+		slog.Warn(str, "source", domain)
 	default:
-		entry.Error(str)
+		slog.Error(str, "source", domain)
 	}
 }
 

+ 0 - 0
gliblog/gliblog.h → logger/gliblog/gliblog.h


+ 225 - 0
logger/handler.go

@@ -0,0 +1,225 @@
+package logger
+
+import (
+	"context"
+	"errors"
+	"io"
+	"log/slog"
+	"os"
+	"slices"
+	"sync"
+	"time"
+)
+
+// LevelCritical is a log level for fatal errors
+const LevelCritical = slog.LevelError + 8
+
+// Format represents the log format
+type Format int
+
+const (
+	// FormatStructured is a key=value structured format
+	FormatStructured Format = iota
+	// FormatPretty is a human-readable format with colorization
+	FormatPretty
+	// FormatJSON is a JSON format
+	FormatJSON
+	// FormatGCP is a JSON format for Google Cloud Platform
+	FormatGCP
+)
+
+// attrGroup represents a named group of attributes.
+//
+// Both the group name and the attributes are optional.
+// Non-empty name means new nested group.
+type attrGroup struct {
+	name  string
+	attrs []slog.Attr
+}
+
+// Hook is an interface that defines a log hook.
+type Hook interface {
+	// Enabled checks if the hook is enabled for the given log level.
+	Enabled(lvl slog.Level) bool
+
+	// Fire is a function that gets called on log events.
+	//
+	// The slice provided in the msg parameter contains the formatted log message,
+	// followed by a newline character.
+	// It is guaranteed to be available for the duration of the hook call.
+	// The hook should not modify the contents of the msg slice except for appending.
+	Fire(time time.Time, lvl slog.Level, msg []byte) error
+}
+
+// Handler is an implementation of [slog.Handler] with support for hooks.
+type Handler struct {
+	out    io.Writer
+	config *Config
+
+	mu *sync.Mutex // Mutex is shared between all instances
+
+	groups []attrGroup
+
+	hooks []Hook
+}
+
+// NewHandler creates a new [Handler] instance.
+func NewHandler(out io.Writer, config *Config) *Handler {
+	return &Handler{
+		out:    out,
+		config: config,
+		mu:     new(sync.Mutex),
+	}
+}
+
+// AddHook adds a new hook to the handler.
+func (h *Handler) AddHook(hook Hook) {
+	if hook == nil {
+		return
+	}
+
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	h.hooks = append(h.hooks, hook)
+}
+
+// Enabled checks if the given log level is enabled.
+func (h *Handler) Enabled(ctx context.Context, level slog.Level) bool {
+	if level >= h.config.Level.Level() {
+		return true
+	}
+
+	for _, hook := range h.hooks {
+		if hook.Enabled(level) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// withGroup returns a new handler with the given attribute group added.
+func (h *Handler) withGroup(group attrGroup) *Handler {
+	h2 := *h
+	h2.groups = append(slices.Clip(h.groups), group)
+	h2.hooks = slices.Clip(h.hooks)
+	return &h2
+}
+
+// WithAttrs returns a new handler with the given attributes added.
+func (h *Handler) WithAttrs(attrs []slog.Attr) slog.Handler {
+	if len(attrs) == 0 {
+		return h
+	}
+
+	return h.withGroup(attrGroup{
+		name:  "",
+		attrs: attrs,
+	})
+}
+
+// WithGroup returns a new handler with the given group name added.
+func (h *Handler) WithGroup(name string) slog.Handler {
+	if name == "" {
+		return h
+	}
+
+	return h.withGroup(attrGroup{
+		name:  name,
+		attrs: nil,
+	})
+}
+
+// Handle processes a log record.
+func (h *Handler) Handle(ctx context.Context, r slog.Record) error {
+	buf := newBuffer()
+	defer func() {
+		buf.free()
+	}()
+
+	h.format(r, buf)
+
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	var errs []error
+
+	// Write log entry to output
+	_, err := h.out.Write(*buf)
+	if err != nil {
+		errs = append(errs, err)
+	}
+
+	// Fire hooks
+	for _, hook := range h.hooks {
+		if !hook.Enabled(r.Level) {
+			continue
+		}
+		if err = hook.Fire(r.Time, r.Level, slices.Clip(*buf)); err != nil {
+			errs = append(errs, err)
+		}
+	}
+
+	// If writing to output or firing hooks returned errors,
+	// join them, write to STDERR, and return
+	if err = h.joinErrors(errs); err != nil {
+		h.writeError(err)
+		return err
+	}
+
+	return nil
+}
+
+// format formats a log record and writes it to the buffer.
+func (h *Handler) format(r slog.Record, buf *buffer) {
+	groups := h.groups
+
+	// If there are no attributes in the record itself,
+	// remove empty groups from the end
+	if r.NumAttrs() == 0 {
+		for len(groups) > 0 && len(groups[len(groups)-1].attrs) == 0 {
+			groups = groups[:len(groups)-1]
+		}
+	}
+
+	// Format the log record according to the format specified in options
+	switch h.config.Format {
+	case FormatPretty:
+		newFormatterPretty(groups, buf).format(r)
+	case FormatJSON:
+		newFormatterJSON(groups, buf, false).format(r)
+	case FormatGCP:
+		newFormatterJSON(groups, buf, true).format(r)
+	default:
+		newFormatterStructured(groups, buf).format(r)
+	}
+
+	// Add line break after each log entry
+	buf.append('\n')
+}
+
+func (h *Handler) joinErrors(errs []error) error {
+	if len(errs) == 0 {
+		return nil
+	}
+	if len(errs) == 1 {
+		return errs[0]
+	}
+	return errors.Join(errs...)
+}
+
+// writeError writes a logging error message to STDERR.
+func (h *Handler) writeError(err error) {
+	buf := newBuffer()
+	defer func() {
+		buf.free()
+	}()
+
+	r := slog.NewRecord(time.Now(), slog.LevelError, "An error occurred during logging", 0)
+	r.Add("error", err)
+
+	h.format(r, buf)
+
+	_, _ = os.Stderr.Write(*buf)
+}

+ 105 - 0
logger/handler_test.go

@@ -0,0 +1,105 @@
+package logger
+
+import (
+	"errors"
+	"io"
+	"log/slog"
+	"testing"
+	"time"
+)
+
+var handlerBenchmarkMsg = "test message"
+var handlerBenchmarkAttrs = []any{
+	slog.String("string", "value"),
+	slog.Int("int", -100),
+	slog.Uint64("uint64", 200),
+	slog.Float64("float64", 3.14),
+	slog.Bool("bool", true),
+	slog.Time("time", time.Now()),
+	slog.Duration("duration", time.Minute+time.Second),
+	slog.Group("group", "group_key", "group_value"),
+	slog.Any("err", errors.New("error value")),
+	slog.Any("any", struct {
+		Field1 string
+		Field2 int
+	}{"value", 42}),
+}
+
+func BenchmarkFormatterPretty(b *testing.B) {
+	testHandler := NewHandler(io.Discard, &Config{
+		Level:  slog.LevelDebug,
+		Format: FormatPretty,
+	})
+	testLogger := slog.New(testHandler)
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		testLogger.Info(
+			handlerBenchmarkMsg,
+			handlerBenchmarkAttrs...,
+		)
+	}
+}
+
+func BenchmarkFormatterStructured(b *testing.B) {
+	testHandler := NewHandler(io.Discard, &Config{
+		Level:  slog.LevelDebug,
+		Format: FormatStructured,
+	})
+	testLogger := slog.New(testHandler)
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		testLogger.Info(
+			handlerBenchmarkMsg,
+			handlerBenchmarkAttrs...,
+		)
+	}
+}
+
+func BenchmarkFormatterJSON(b *testing.B) {
+	testHandler := NewHandler(io.Discard, &Config{
+		Level:  slog.LevelDebug,
+		Format: FormatJSON,
+	})
+	testLogger := slog.New(testHandler)
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		testLogger.Info(
+			handlerBenchmarkMsg,
+			handlerBenchmarkAttrs...,
+		)
+	}
+}
+
+func BenchmarkNativeText(b *testing.B) {
+	testHandler := slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelDebug})
+	testLogger := slog.New(testHandler)
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		testLogger.Info(
+			handlerBenchmarkMsg,
+			handlerBenchmarkAttrs...,
+		)
+	}
+}
+
+func BenchmarkNativeJSON(b *testing.B) {
+	testHandler := slog.NewJSONHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelDebug})
+	testLogger := slog.New(testHandler)
+
+	b.ResetTimer()
+
+	for i := 0; i < b.N; i++ {
+		testLogger.Info(
+			handlerBenchmarkMsg,
+			handlerBenchmarkAttrs...,
+		)
+	}
+}

+ 0 - 61
logger/log.go

@@ -1,61 +0,0 @@
-package logger
-
-import (
-	"fmt"
-	"os"
-
-	logrus "github.com/sirupsen/logrus"
-
-	"github.com/imgproxy/imgproxy/v3/config/configurators"
-)
-
-func init() {
-	// Configure logrus so it can be used before Init().
-	// Structured formatter is a compromise between JSON and pretty formatters.
-	logrus.SetOutput(os.Stdout)
-	logrus.SetFormatter(&structuredFormatter{})
-}
-
-func Init() error {
-	logrus.SetOutput(os.Stdout)
-
-	logFormat := "pretty"
-	logLevel := "info"
-
-	configurators.String(&logFormat, "IMGPROXY_LOG_FORMAT")
-	configurators.String(&logLevel, "IMGPROXY_LOG_LEVEL")
-
-	switch logFormat {
-	case "structured":
-		logrus.SetFormatter(&structuredFormatter{})
-	case "json":
-		logrus.SetFormatter(&logrus.JSONFormatter{})
-	case "gcp":
-		logrus.SetFormatter(&logrus.JSONFormatter{
-			FieldMap: logrus.FieldMap{
-				"level": "severity",
-				"msg":   "message",
-			},
-		})
-	default:
-		logrus.SetFormatter(newPrettyFormatter())
-	}
-
-	levelLogLevel, err := logrus.ParseLevel(logLevel)
-	if err != nil {
-		levelLogLevel = logrus.InfoLevel
-	}
-
-	logrus.SetLevel(levelLogLevel)
-
-	if isSyslogEnabled() {
-		slHook, err := newSyslogHook()
-		if err != nil {
-			return fmt.Errorf("Unable to connect to syslog daemon: %s", err)
-		}
-
-		logrus.AddHook(slHook)
-	}
-
-	return nil
-}

+ 70 - 0
logger/logger.go

@@ -0,0 +1,70 @@
+package logger
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+	"os"
+
+	"github.com/imgproxy/imgproxy/v3/logger/gliblog"
+	"github.com/imgproxy/imgproxy/v3/logger/syslog"
+)
+
+// We store a [Handler] instance here so that we can restore it in [Unmute]
+var handler *Handler
+
+// init initializes the default logger
+func init() {
+	cfg := NewDefaultConfig()
+	handler = NewHandler(os.Stdout, &cfg)
+	setDefaultHandler(handler)
+}
+
+// Init creates a logger and sets it as the default log/slog logger
+func Init(config *Config) error {
+	if err := config.Validate(); err != nil {
+		return err
+	}
+
+	handler = NewHandler(os.Stdout, config)
+	setDefaultHandler(handler)
+
+	gliblog.Init()
+
+	if config.Syslog.Enabled {
+		slHook, err := syslog.NewHook(&config.Syslog)
+		if err != nil {
+			return fmt.Errorf("Unable to connect to syslog daemon: %s", err)
+		}
+		if slHook != nil {
+			AddHook(slHook)
+		}
+	}
+
+	return nil
+}
+
+func AddHook(hook Hook) {
+	if handler != nil {
+		handler.AddHook(hook)
+	}
+}
+
+func Fatal(msg string, args ...any) {
+	slog.Log(context.Background(), LevelCritical, msg, args...)
+	os.Exit(1)
+}
+
+// Mute sets the default logger to a discard logger muting all log output
+func Mute() {
+	setDefaultHandler(slog.DiscardHandler)
+}
+
+// Unmute restores the default logger to the one created in [Init]
+func Unmute() {
+	setDefaultHandler(handler)
+}
+
+func setDefaultHandler(h slog.Handler) {
+	slog.SetDefault(slog.New(h))
+}

+ 0 - 85
logger/syslog.go

@@ -1,85 +0,0 @@
-package logger
-
-import (
-	"fmt"
-	"log/syslog"
-	"os"
-
-	"github.com/imgproxy/imgproxy/v3/config/configurators"
-	"github.com/sirupsen/logrus"
-)
-
-var (
-	syslogLevels = map[string]logrus.Level{
-		"crit":    logrus.FatalLevel,
-		"error":   logrus.ErrorLevel,
-		"warning": logrus.WarnLevel,
-		"info":    logrus.InfoLevel,
-	}
-)
-
-type syslogHook struct {
-	writer    *syslog.Writer
-	levels    []logrus.Level
-	formatter logrus.Formatter
-}
-
-func isSyslogEnabled() (enabled bool) {
-	configurators.Bool(&enabled, "IMGPROXY_SYSLOG_ENABLE")
-	return
-}
-
-func newSyslogHook() (*syslogHook, error) {
-	var (
-		network, addr string
-		level         logrus.Level
-
-		tag      = "imgproxy"
-		levelStr = "notice"
-	)
-
-	configurators.String(&network, "IMGPROXY_SYSLOG_NETWORK")
-	configurators.String(&addr, "IMGPROXY_SYSLOG_ADDRESS")
-	configurators.String(&tag, "IMGPROXY_SYSLOG_TAG")
-	configurators.String(&levelStr, "IMGPROXY_SYSLOG_LEVEL")
-
-	if l, ok := syslogLevels[levelStr]; ok {
-		level = l
-	} else {
-		level = logrus.InfoLevel
-		logrus.Warningf("Syslog level '%s' is invalid, 'info' is used", levelStr)
-	}
-
-	w, err := syslog.Dial(network, addr, syslog.LOG_NOTICE, tag)
-
-	return &syslogHook{
-		writer:    w,
-		levels:    logrus.AllLevels[:int(level)+1],
-		formatter: &structuredFormatter{},
-	}, err
-}
-
-func (hook *syslogHook) Fire(entry *logrus.Entry) error {
-	line, err := hook.formatter.Format(entry)
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Unable to read entry, %v\n", err)
-		return err
-	}
-
-	switch entry.Level {
-	case logrus.PanicLevel, logrus.FatalLevel:
-		return hook.writer.Crit(string(line))
-	case logrus.ErrorLevel:
-		return hook.writer.Err(string(line))
-	case logrus.WarnLevel:
-		return hook.writer.Warning(string(line))
-	case logrus.InfoLevel:
-		return hook.writer.Info(string(line))
-	default:
-		return nil
-	}
-}
-
-func (hook *syslogHook) Levels() []logrus.Level {
-	return hook.levels
-}

+ 76 - 0
logger/syslog/config.go

@@ -0,0 +1,76 @@
+package syslog
+
+import (
+	"errors"
+	"fmt"
+	"log/slog"
+	"strings"
+
+	"github.com/imgproxy/imgproxy/v3/config/configurators"
+	"github.com/imgproxy/imgproxy/v3/ensure"
+)
+
+type Config struct {
+	Enabled bool
+	Level   slog.Leveler
+	Network string
+	Addr    string
+	Tag     string
+}
+
+func NewDefaultConfig() Config {
+	return Config{
+		Enabled: false,
+		Level:   slog.LevelInfo,
+		Tag:     "imgproxy",
+	}
+}
+
+func LoadConfigFromEnv(c *Config) *Config {
+	c = ensure.Ensure(c, NewDefaultConfig)
+
+	configurators.Bool(&c.Enabled, "IMGPROXY_SYSLOG_ENABLE")
+
+	configurators.String(&c.Network, "IMGPROXY_SYSLOG_NETWORK")
+	configurators.String(&c.Addr, "IMGPROXY_SYSLOG_ADDRESS")
+	configurators.String(&c.Tag, "IMGPROXY_SYSLOG_TAG")
+
+	var levelStr string
+	configurators.String(&levelStr, "IMGPROXY_SYSLOG_LEVEL")
+
+	if levelStr != "" {
+		c.Level = parseLevel(levelStr)
+	}
+
+	return c
+}
+
+func (c *Config) Validate() error {
+	if !c.Enabled {
+		return nil
+	}
+
+	if c.Network != "" && c.Addr == "" {
+		return errors.New("Syslog address is required if syslog network is set")
+	}
+
+	return nil
+}
+
+func parseLevel(str string) slog.Level {
+	switch strings.ToLower(str) {
+	case "debug":
+		return slog.LevelDebug
+	case "info":
+		return slog.LevelInfo
+	case "warn":
+		return slog.LevelWarn
+	case "error":
+		return slog.LevelError
+	case "crit":
+		return slog.LevelError + 8
+	default:
+		slog.Warn(fmt.Sprintf("Syslog level '%s' is invalid, 'info' is used", str))
+		return slog.LevelInfo
+	}
+}

+ 54 - 0
logger/syslog/syslog.go

@@ -0,0 +1,54 @@
+package syslog
+
+import (
+	"log/slog"
+	"log/syslog"
+	"time"
+)
+
+// Hook is a [logger.Hook] implementation for syslog.
+type Hook struct {
+	writer *syslog.Writer
+	level  slog.Leveler
+}
+
+// NewHook creates a new syslog hook.
+// It returns nil if the syslog hook is not enabled in the config.
+func NewHook(config *Config) (*Hook, error) {
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	if !config.Enabled {
+		return nil, nil
+	}
+
+	w, err := syslog.Dial(config.Network, config.Addr, syslog.LOG_INFO, config.Tag)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Hook{
+		writer: w,
+		level:  config.Level.Level(),
+	}, nil
+}
+
+func (hook *Hook) Enabled(level slog.Level) bool {
+	return level >= hook.level.Level()
+}
+
+func (hook *Hook) Fire(time time.Time, lvl slog.Level, msg []byte) error {
+	msgStr := string(msg)
+
+	switch {
+	case lvl < slog.LevelInfo:
+		return hook.writer.Debug(msgStr)
+	case lvl < slog.LevelWarn:
+		return hook.writer.Info(msgStr)
+	case lvl < slog.LevelError:
+		return hook.writer.Warning(msgStr)
+	default:
+		return hook.writer.Err(msgStr)
+	}
+}

+ 11 - 8
memory/stats.go

@@ -1,23 +1,26 @@
 package memory
 
 import (
+	"log/slog"
 	"runtime"
 
-	log "github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/vips"
 )
 
 func LogStats() {
 	var m runtime.MemStats
 	runtime.ReadMemStats(&m)
-	log.Debugf(
-		"GO MEMORY USAGE: Sys=%d HeapIdle=%d HeapInuse=%d",
-		m.Sys/1024/1024, m.HeapIdle/1024/1024, m.HeapInuse/1024/1024,
+	slog.Debug(
+		"GO MEMORY USAGE",
+		"sys", m.Sys/1024/1024,
+		"heap_idle", m.HeapIdle/1024/1024,
+		"heap_inuse", m.HeapInuse/1024/1024,
 	)
 
-	log.Debugf(
-		"VIPS MEMORY USAGE: Cur=%d Max=%d Allocs=%d",
-		int(vips.GetMem())/1024/1024, int(vips.GetMemHighwater())/1024/1024, int(vips.GetAllocs()),
+	slog.Debug(
+		"VIPS MEMORY USAGE",
+		"cur", int(vips.GetMem())/1024/1024,
+		"max", int(vips.GetMemHighwater())/1024/1024,
+		"allocs", int(vips.GetAllocs()),
 	)
 }

+ 2 - 2
monitoring/cloudwatch/cloudwatch.go

@@ -3,6 +3,7 @@ package cloudwatch
 import (
 	"context"
 	"fmt"
+	"log/slog"
 	"slices"
 	"sync"
 	"time"
@@ -11,7 +12,6 @@ import (
 	awsConfig "github.com/aws/aws-sdk-go-v2/config"
 	"github.com/aws/aws-sdk-go-v2/service/cloudwatch"
 	cloudwatchTypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types"
-	"github.com/sirupsen/logrus"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/monitoring/stats"
@@ -268,7 +268,7 @@ func runMetricsCollector() {
 				defer cancel()
 
 				if _, err := client.PutMetricData(ctx, &input); err != nil {
-					logrus.Warnf("Can't send CloudWatch metrics: %s", err)
+					slog.Warn(fmt.Sprintf("Can't send CloudWatch metrics: %s", err))
 				}
 			}()
 		case <-collectorCtx.Done():

+ 4 - 3
monitoring/datadog/datadog.go

@@ -2,6 +2,8 @@ package datadog
 
 import (
 	"context"
+	"fmt"
+	"log/slog"
 	"net"
 	"net/http"
 	"os"
@@ -14,7 +16,6 @@ import (
 	"github.com/DataDog/dd-trace-go/v2/ddtrace/ext"
 	"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
 	"github.com/felixge/httpsnoop"
-	log "github.com/sirupsen/logrus"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/monitoring/errformat"
@@ -86,7 +87,7 @@ func Init() {
 		enabledMetrics = true
 		go runMetricsCollector()
 	} else {
-		log.Warnf("Can't initialize DogStatsD client: %s", err)
+		slog.Warn(fmt.Sprintf("Can't initialize DogStatsD client: %s", err))
 	}
 }
 
@@ -239,5 +240,5 @@ type dataDogLogger struct {
 }
 
 func (l dataDogLogger) Log(msg string) {
-	log.Info(msg)
+	slog.Info(msg)
 }

+ 8 - 3
monitoring/newrelic/newrelic.go

@@ -3,6 +3,7 @@ package newrelic
 import (
 	"context"
 	"fmt"
+	"log/slog"
 	"math"
 	"net/http"
 	"reflect"
@@ -12,7 +13,6 @@ import (
 
 	"github.com/newrelic/go-agent/v3/newrelic"
 	"github.com/newrelic/newrelic-telemetry-sdk-go/telemetry"
-	log "github.com/sirupsen/logrus"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/monitoring/errformat"
@@ -89,19 +89,24 @@ func Init() error {
 		metricsURL = euMetricURL
 	}
 
+	errLogger := slog.NewLogLogger(
+		slog.With("from", "newrelic").Handler(),
+		slog.LevelWarn,
+	)
+
 	harvester, err = telemetry.NewHarvester(
 		telemetry.ConfigAPIKey(config.NewRelicKey),
 		telemetry.ConfigCommonAttributes(harvesterAttributes),
 		telemetry.ConfigHarvestPeriod(0), // Don't harvest automatically
 		telemetry.ConfigMetricsURLOverride(metricsURL),
-		telemetry.ConfigBasicErrorLogger(log.StandardLogger().WithField("from", "newrelic").WriterLevel(log.WarnLevel)),
+		telemetry.ConfigBasicErrorLogger(errLogger.Writer()),
 	)
 	if err == nil {
 		harvesterCtx, harvesterCtxCancel = context.WithCancel(context.Background())
 		enabledHarvester = true
 		go runMetricsCollector()
 	} else {
-		log.Warnf("Can't init New Relic telemetry harvester: %s", err)
+		slog.Warn(fmt.Sprintf("Can't init New Relic telemetry harvester: %s", err))
 	}
 
 	enabled = true

+ 13 - 15
monitoring/otel/otel.go

@@ -6,6 +6,7 @@ import (
 	"crypto/x509"
 	"errors"
 	"fmt"
+	"log/slog"
 	"net/http"
 	"os"
 	"reflect"
@@ -17,7 +18,6 @@ import (
 
 	"github.com/felixge/httpsnoop"
 	"github.com/shirou/gopsutil/process"
-	"github.com/sirupsen/logrus"
 	ec2 "go.opentelemetry.io/contrib/detectors/aws/ec2/v2"
 	"go.opentelemetry.io/contrib/detectors/aws/ecs"
 	"go.opentelemetry.io/contrib/detectors/aws/eks"
@@ -78,7 +78,7 @@ func Init() error {
 		return nil
 	}
 
-	otel.SetErrorHandler(&errorHandler{entry: logrus.WithField("from", "opentelemetry")})
+	otel.SetErrorHandler(errorHandler{})
 
 	var (
 		traceExporter  *otlptrace.Exporter
@@ -123,7 +123,7 @@ func Init() error {
 	if merged, merr := resource.Merge(awsRes, res); merr == nil {
 		res = merged
 	} else {
-		logrus.Warnf("Can't add AWS attributes to OpenTelemetry: %s", merr)
+		slog.Warn(fmt.Sprintf("Can't add AWS attributes to OpenTelemetry: %s", merr))
 	}
 
 	opts := []sdktrace.TracerProviderOption{
@@ -185,7 +185,7 @@ func Init() error {
 func mapDeprecatedConfig() {
 	endpoint := os.Getenv("IMGPROXY_OPEN_TELEMETRY_ENDPOINT")
 	if len(endpoint) > 0 {
-		logrus.Warn("The IMGPROXY_OPEN_TELEMETRY_ENDPOINT config is deprecated. Use IMGPROXY_OPEN_TELEMETRY_ENABLE and OTEL_EXPORTER_OTLP_ENDPOINT instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
+		slog.Warn("The IMGPROXY_OPEN_TELEMETRY_ENDPOINT config is deprecated. Use IMGPROXY_OPEN_TELEMETRY_ENABLE and OTEL_EXPORTER_OTLP_ENDPOINT instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
 		config.OpenTelemetryEnable = true
 	}
 
@@ -196,7 +196,7 @@ func mapDeprecatedConfig() {
 	protocol := "grpc"
 
 	if prot := os.Getenv("IMGPROXY_OPEN_TELEMETRY_PROTOCOL"); len(prot) > 0 {
-		logrus.Warn("The IMGPROXY_OPEN_TELEMETRY_PROTOCOL config is deprecated. Use OTEL_EXPORTER_OTLP_PROTOCOL instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
+		slog.Warn("The IMGPROXY_OPEN_TELEMETRY_PROTOCOL config is deprecated. Use OTEL_EXPORTER_OTLP_PROTOCOL instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
 		protocol = prot
 		os.Setenv("OTEL_EXPORTER_OTLP_PROTOCOL", protocol)
 	}
@@ -207,7 +207,7 @@ func mapDeprecatedConfig() {
 		switch protocol {
 		case "grpc":
 			if insecure, _ := strconv.ParseBool(os.Getenv("IMGPROXY_OPEN_TELEMETRY_GRPC_INSECURE")); insecure {
-				logrus.Warn("The IMGPROXY_OPEN_TELEMETRY_GRPC_INSECURE config is deprecated. Use OTEL_EXPORTER_OTLP_ENDPOINT with the `http://` schema instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
+				slog.Warn("The IMGPROXY_OPEN_TELEMETRY_GRPC_INSECURE config is deprecated. Use OTEL_EXPORTER_OTLP_ENDPOINT with the `http://` schema instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
 				schema = "http"
 			}
 		case "http":
@@ -218,17 +218,17 @@ func mapDeprecatedConfig() {
 	}
 
 	if serviceName := os.Getenv("IMGPROXY_OPEN_TELEMETRY_SERVICE_NAME"); len(serviceName) > 0 {
-		logrus.Warn("The IMGPROXY_OPEN_TELEMETRY_SERVICE_NAME config is deprecated. Use OTEL_SERVICE_NAME instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
+		slog.Warn("The IMGPROXY_OPEN_TELEMETRY_SERVICE_NAME config is deprecated. Use OTEL_SERVICE_NAME instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
 		os.Setenv("OTEL_SERVICE_NAME", serviceName)
 	}
 
 	if propagators := os.Getenv("IMGPROXY_OPEN_TELEMETRY_PROPAGATORS"); len(propagators) > 0 {
-		logrus.Warn("The IMGPROXY_OPEN_TELEMETRY_PROPAGATORS config is deprecated. Use OTEL_PROPAGATORS instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
+		slog.Warn("The IMGPROXY_OPEN_TELEMETRY_PROPAGATORS config is deprecated. Use OTEL_PROPAGATORS instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
 		os.Setenv("OTEL_PROPAGATORS", propagators)
 	}
 
 	if timeout := os.Getenv("IMGPROXY_OPEN_TELEMETRY_CONNECTION_TIMEOUT"); len(timeout) > 0 {
-		logrus.Warn("The IMGPROXY_OPEN_TELEMETRY_CONNECTION_TIMEOUT config is deprecated. Use OTEL_EXPORTER_OTLP_TIMEOUT instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
+		slog.Warn("The IMGPROXY_OPEN_TELEMETRY_CONNECTION_TIMEOUT config is deprecated. Use OTEL_EXPORTER_OTLP_TIMEOUT instead. See https://docs.imgproxy.net/latest/monitoring/open_telemetry#deprecated-environment-variables")
 
 		if to, _ := strconv.Atoi(timeout); to > 0 {
 			os.Setenv("OTEL_EXPORTER_OTLP_TIMEOUT", strconv.Itoa(to*1000))
@@ -711,7 +711,7 @@ func AddGaugeFunc(name, desc, u string, f GaugeFunc) {
 		}),
 	)
 	if err != nil {
-		logrus.Warnf("Can't add %s gauge to OpenTelemetry: %s", name, err)
+		slog.Warn(fmt.Sprintf("Can't add %s gauge to OpenTelemetry: %s", name, err))
 	}
 }
 
@@ -739,10 +739,8 @@ func SetBufferMaxSize(t string, size int) {
 	}
 }
 
-type errorHandler struct {
-	entry *logrus.Entry
-}
+type errorHandler struct{}
 
-func (h *errorHandler) Handle(err error) {
-	h.entry.Warn(err.Error())
+func (h errorHandler) Handle(err error) {
+	slog.Warn(err.Error(), "source", "opentelemetry")
 }

+ 6 - 3
monitoring/otel/otel_test.go

@@ -1,21 +1,24 @@
 package otel
 
 import (
-	"io"
 	"os"
 	"strings"
 	"testing"
 
-	"github.com/sirupsen/logrus"
 	"github.com/stretchr/testify/suite"
 
 	"github.com/imgproxy/imgproxy/v3/config"
+	"github.com/imgproxy/imgproxy/v3/logger"
 )
 
 type OtelTestSuite struct{ suite.Suite }
 
 func (s *OtelTestSuite) SetupSuite() {
-	logrus.SetOutput(io.Discard)
+	logger.Mute()
+}
+
+func (s *OtelTestSuite) TearDownSuite() {
+	logger.Unmute()
 }
 
 func (s *OtelTestSuite) SetupTest() {

+ 3 - 3
monitoring/prometheus/prometheus.go

@@ -3,6 +3,7 @@ package prometheus
 import (
 	"context"
 	"fmt"
+	"log/slog"
 	"net/http"
 	"strconv"
 	"time"
@@ -10,7 +11,6 @@ import (
 	"github.com/felixge/httpsnoop"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
-	log "github.com/sirupsen/logrus"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/monitoring/stats"
@@ -167,9 +167,9 @@ func StartServer(cancel context.CancelFunc) error {
 	}
 
 	go func() {
-		log.Infof("Starting Prometheus server at %s", config.PrometheusBind)
+		slog.Info(fmt.Sprintf("Starting Prometheus server at %s", config.PrometheusBind))
 		if err := s.Serve(l); err != nil && err != http.ErrServerClosed {
-			log.Error(err)
+			slog.Error(err.Error())
 		}
 		cancel()
 	}()

+ 3 - 2
options/apply.go

@@ -2,13 +2,14 @@ package options
 
 import (
 	"encoding/base64"
+	"fmt"
+	"log/slog"
 	"slices"
 	"strconv"
 	"time"
 
 	"github.com/imgproxy/imgproxy/v3/imagetype"
 	"github.com/imgproxy/imgproxy/v3/vips"
-	log "github.com/sirupsen/logrus"
 )
 
 func applyWidthOption(po *ProcessingOptions, args []string) error {
@@ -540,7 +541,7 @@ func applyPresetOption(f *Factory, po *ProcessingOptions, args []string, usedPre
 	for _, preset := range args {
 		if p, ok := f.presets[preset]; ok {
 			if slices.Contains(usedPresets, preset) {
-				log.Warningf("Recursive preset usage is detected: %s", preset)
+				slog.Warn(fmt.Sprintf("Recursive preset usage is detected: %s", preset))
 				continue
 			}
 

+ 3 - 3
options/parse.go

@@ -1,10 +1,10 @@
 package options
 
 import (
+	"fmt"
+	"log/slog"
 	"slices"
 	"strconv"
-
-	log "github.com/sirupsen/logrus"
 )
 
 // ensureMaxArgs checks if the number of arguments is as expected
@@ -24,7 +24,7 @@ func parseBool(value *bool, name string, args ...string) error {
 	b, err := strconv.ParseBool(args[0])
 
 	if err != nil {
-		log.Warningf("%s `%s` is not a valid boolean value. Treated as false", name, args[0])
+		slog.Warn(fmt.Sprintf("%s `%s` is not a valid boolean value. Treated as false", name, args[0]))
 	}
 
 	*value = b

+ 5 - 0
options/processing_options.go

@@ -1,6 +1,7 @@
 package options
 
 import (
+	"log/slog"
 	"maps"
 	"net/http"
 	"slices"
@@ -183,6 +184,10 @@ func (po *ProcessingOptions) MarshalJSON() ([]byte, error) {
 	return po.Diff().MarshalJSON()
 }
 
+func (po *ProcessingOptions) LogValue() slog.Value {
+	return po.Diff().LogValue()
+}
+
 // Default returns the ProcessingOptions instance with defaults set
 func (po *ProcessingOptions) Default() *ProcessingOptions {
 	return po.defaultOptions.clone()

+ 18 - 5
processing/fix_size.go

@@ -1,11 +1,12 @@
 package processing
 
 import (
+	"fmt"
+	"log/slog"
 	"math"
 
 	"github.com/imgproxy/imgproxy/v3/imagetype"
 	"github.com/imgproxy/imgproxy/v3/vips"
-	log "github.com/sirupsen/logrus"
 )
 
 const (
@@ -28,7 +29,10 @@ func fixWebpSize(img *vips.Image) error {
 		return err
 	}
 
-	log.Warningf("WebP dimension size is limited to %d. The image is rescaled to %dx%d", int(webpMaxDimension), img.Width(), img.Height())
+	slog.Warn(fmt.Sprintf(
+		"WebP dimension size is limited to %d. The image is rescaled to %dx%d",
+		int(webpMaxDimension), img.Width(), img.Height(),
+	))
 
 	return nil
 }
@@ -45,7 +49,10 @@ func fixHeifSize(img *vips.Image) error {
 		return err
 	}
 
-	log.Warningf("AVIF/HEIC dimension size is limited to %d. The image is rescaled to %dx%d", int(heifMaxDimension), img.Width(), img.Height())
+	slog.Warn(fmt.Sprintf(
+		"AVIF/HEIC dimension size is limited to %d. The image is rescaled to %dx%d",
+		int(heifMaxDimension), img.Width(), img.Height(),
+	))
 
 	return nil
 }
@@ -66,7 +73,10 @@ func fixGifSize(img *vips.Image) error {
 		return err
 	}
 
-	log.Warningf("GIF resolution is limited to %d and dimension size is limited to %d. The image is rescaled to %dx%d", int(gifMaxResolution), int(gifMaxDimension), img.Width(), img.Height())
+	slog.Warn(fmt.Sprintf(
+		"GIF resolution is limited to %d and dimension size is limited to %d. The image is rescaled to %dx%d",
+		int(gifMaxResolution), int(gifMaxDimension), img.Width(), img.Height(),
+	))
 
 	return nil
 }
@@ -83,7 +93,10 @@ func fixIcoSize(img *vips.Image) error {
 		return err
 	}
 
-	log.Warningf("ICO dimension size is limited to %d. The image is rescaled to %dx%d", int(icoMaxDimension), img.Width(), img.Height())
+	slog.Warn(fmt.Sprintf(
+		"ICO dimension size is limited to %d. The image is rescaled to %dx%d",
+		int(icoMaxDimension), img.Width(), img.Height(),
+	))
 
 	return nil
 }

+ 7 - 7
processing/processing.go

@@ -3,11 +3,11 @@ package processing
 import (
 	"context"
 	"errors"
+	"fmt"
+	"log/slog"
 	"runtime"
 	"slices"
 
-	log "github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/auximageprovider"
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/imagedata"
@@ -53,7 +53,7 @@ func ValidatePreferredFormats() error {
 
 	for _, t := range config.PreferredFormats {
 		if !vips.SupportsSave(t) {
-			log.Warnf("%s can't be a preferred format as it's saving is not supported", t)
+			slog.Warn(fmt.Sprintf("%s can't be a preferred format as it's saving is not supported", t))
 		} else {
 			filtered = append(filtered, t)
 		}
@@ -183,7 +183,7 @@ func initialLoadImage(
 		if err := img.LoadThumbnail(imgdata); err == nil {
 			return true, nil
 		} else {
-			log.Debugf("Can't load thumbnail: %s", err)
+			slog.Debug(fmt.Sprintf("Can't load thumbnail: %s", err))
 		}
 	}
 
@@ -425,7 +425,7 @@ func transformAnimated(
 	// NOTE: END TEMPORARY BLOCK
 
 	if po.Trim.Enabled {
-		log.Warning("Trim is not supported for animated images")
+		slog.Warn("Trim is not supported for animated images")
 		po.Trim.Enabled = false
 	}
 
@@ -550,10 +550,10 @@ func saveImage(
 			po.Format = imagetype.JPEG
 		}
 
-		log.Warningf(
+		slog.Warn(fmt.Sprintf(
 			"Minimal dimension of AVIF is 16, current image size is %dx%d. Image will be saved as %s",
 			img.Width(), img.Height(), po.Format,
-		)
+		))
 	}
 
 	// If we want and can fit the image into the specified number of bytes,

+ 6 - 3
processing/processing_test.go

@@ -3,18 +3,17 @@ package processing
 import (
 	"context"
 	"fmt"
-	"io"
 	"os"
 	"path/filepath"
 	"testing"
 
-	"github.com/sirupsen/logrus"
 	"github.com/stretchr/testify/suite"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/fetcher"
 	"github.com/imgproxy/imgproxy/v3/ierrors"
 	"github.com/imgproxy/imgproxy/v3/imagedata"
+	"github.com/imgproxy/imgproxy/v3/logger"
 	"github.com/imgproxy/imgproxy/v3/options"
 	"github.com/imgproxy/imgproxy/v3/security"
 	"github.com/imgproxy/imgproxy/v3/vips"
@@ -36,7 +35,7 @@ func (s *ProcessingTestSuite) SetupSuite() {
 
 	s.Require().NoError(vips.Init())
 
-	logrus.SetOutput(io.Discard)
+	logger.Mute()
 
 	fc := fetcher.NewDefaultConfig()
 	f, err := fetcher.New(&fc)
@@ -57,6 +56,10 @@ func (s *ProcessingTestSuite) SetupSuite() {
 	s.Require().NoError(err)
 }
 
+func (s *ProcessingTestSuite) TearDownSuite() {
+	logger.Unmute()
+}
+
 func (s *ProcessingTestSuite) openFile(name string) imagedata.ImageData {
 	wd, err := os.Getwd()
 	s.Require().NoError(err)

+ 2 - 3
processing/scale_on_load.go

@@ -1,10 +1,9 @@
 package processing
 
 import (
+	"log/slog"
 	"math"
 
-	log "github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/imagedata"
 	"github.com/imgproxy/imgproxy/v3/imagetype"
 	"github.com/imgproxy/imgproxy/v3/imath"
@@ -66,7 +65,7 @@ func scaleOnLoad(c *Context) error {
 		defer thumbnail.Clear()
 
 		if err := thumbnail.LoadThumbnail(c.ImgData); err != nil {
-			log.Debugf("Can't load thumbnail: %s", err)
+			slog.Debug("Can't load thumbnail: %s", "error", err)
 			return nil
 		}
 

+ 2 - 3
reuseport/listen_no_reuseport.go

@@ -4,14 +4,13 @@
 package reuseport
 
 import (
+	"log/slog"
 	"net"
-
-	log "github.com/sirupsen/logrus"
 )
 
 func Listen(network, address string, reuse bool) (net.Listener, error) {
 	if reuse {
-		log.Warning("SO_REUSEPORT support is not implemented for your OS or Go version")
+		slog.Warn("SO_REUSEPORT support is not implemented for your OS or Go version")
 	}
 
 	return net.Listen(network, address)

+ 3 - 4
security/config.go

@@ -2,12 +2,11 @@ package security
 
 import (
 	"fmt"
+	"log/slog"
 	"regexp"
 
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/ensure"
-
-	log "github.com/sirupsen/logrus"
 )
 
 // Config is the package-local configuration
@@ -75,11 +74,11 @@ func (c *Config) Validate() error {
 	}
 
 	if len(c.Keys) == 0 {
-		log.Warning("No keys defined, so signature checking is disabled")
+		slog.Warn("No keys defined, so signature checking is disabled")
 	}
 
 	if len(c.Salts) == 0 {
-		log.Warning("No salts defined, so signature checking is disabled")
+		slog.Warn("No salts defined, so signature checking is disabled")
 	}
 
 	if c.SignatureSize < 1 || c.SignatureSize > 32 {

+ 27 - 26
server/logging.go

@@ -1,11 +1,13 @@
 package server
 
 import (
+	"context"
+	"fmt"
+	"log/slog"
 	"net"
 	"net/http"
 
 	"github.com/imgproxy/imgproxy/v3/ierrors"
-	log "github.com/sirupsen/logrus"
 )
 
 func LogRequest(reqID string, r *http.Request) {
@@ -13,52 +15,51 @@ func LogRequest(reqID string, r *http.Request) {
 
 	clientIP, _, _ := net.SplitHostPort(r.RemoteAddr)
 
-	log.WithFields(log.Fields{
-		"request_id": reqID,
-		"method":     r.Method,
-		"client_ip":  clientIP,
-	}).Infof("Started %s", path)
+	slog.Info(
+		fmt.Sprintf("Started %s", path),
+		"request_id", reqID,
+		"method", r.Method,
+		"client_ip", clientIP,
+	)
 }
 
-func LogResponse(reqID string, r *http.Request, status int, err *ierrors.Error, additional ...log.Fields) {
-	var level log.Level
+func LogResponse(reqID string, r *http.Request, status int, err *ierrors.Error, additional ...slog.Attr) {
+	var level slog.Level
 
 	switch {
 	case status >= 500 || (err != nil && err.StatusCode() >= 500):
-		level = log.ErrorLevel
+		level = slog.LevelError
 	case status >= 400:
-		level = log.WarnLevel
+		level = slog.LevelWarn
 	default:
-		level = log.InfoLevel
+		level = slog.LevelInfo
 	}
 
 	clientIP, _, _ := net.SplitHostPort(r.RemoteAddr)
 
-	fields := log.Fields{
-		"request_id": reqID,
-		"method":     r.Method,
-		"status":     status,
-		"client_ip":  clientIP,
+	attrs := []slog.Attr{
+		slog.String("request_id", reqID),
+		slog.String("method", r.Method),
+		slog.Int("status", status),
+		slog.String("client_ip", clientIP),
 	}
 
 	if err != nil {
-		fields["error"] = err
+		attrs = append(attrs, slog.String("error", err.Error()))
 
-		if level <= log.ErrorLevel {
+		if level >= slog.LevelError {
 			if stack := err.FormatStack(); len(stack) > 0 {
-				fields["stack"] = stack
+				attrs = append(attrs, slog.String("stack", stack))
 			}
 		}
 	}
 
-	for _, f := range additional {
-		for k, v := range f {
-			fields[k] = v
-		}
-	}
+	attrs = append(attrs, additional...)
 
-	log.WithFields(fields).Logf(
+	slog.LogAttrs(
+		context.Background(),
 		level,
-		"Completed in %s %s", requestStartedAt(r.Context()), r.RequestURI,
+		fmt.Sprintf("Completed in %s %s", requestStartedAt(r.Context()), r.RequestURI),
+		attrs...,
 	)
 }

+ 5 - 6
server/responsewriter/config_test.go

@@ -2,13 +2,12 @@ package responsewriter
 
 import (
 	"fmt"
-	"io"
-	"os"
 	"testing"
 
-	"github.com/imgproxy/imgproxy/v3/config"
-	"github.com/sirupsen/logrus"
 	"github.com/stretchr/testify/suite"
+
+	"github.com/imgproxy/imgproxy/v3/config"
+	"github.com/imgproxy/imgproxy/v3/logger"
 )
 
 type ResponseWriterConfigSuite struct {
@@ -16,11 +15,11 @@ type ResponseWriterConfigSuite struct {
 }
 
 func (s *ResponseWriterConfigSuite) SetupSuite() {
-	logrus.SetOutput(io.Discard)
+	logger.Mute()
 }
 
 func (s *ResponseWriterConfigSuite) TearDownSuite() {
-	logrus.SetOutput(os.Stdout)
+	logger.Unmute()
 }
 
 func (s *ResponseWriterConfigSuite) TestLoadingVaryValueFromEnv() {

+ 7 - 8
server/server.go

@@ -3,11 +3,10 @@ package server
 import (
 	"context"
 	"fmt"
-	golog "log"
+	"log/slog"
 	"net"
 	"net/http"
 
-	log "github.com/sirupsen/logrus"
 	"golang.org/x/net/netutil"
 
 	"github.com/imgproxy/imgproxy/v3/config"
@@ -39,9 +38,9 @@ func Start(cancel context.CancelFunc, router *Router) (*Server, error) {
 		l = netutil.LimitListener(l, router.config.MaxClients)
 	}
 
-	errLogger := golog.New(
-		log.WithField("source", "http_server").WriterLevel(log.ErrorLevel),
-		"", 0,
+	errLogger := slog.NewLogLogger(
+		slog.With("source", "http_server").Handler(),
+		slog.LevelError,
 	)
 
 	addr := l.Addr()
@@ -60,10 +59,10 @@ func Start(cancel context.CancelFunc, router *Router) (*Server, error) {
 	}
 
 	go func() {
-		log.Infof("Starting server at %s", router.config.Bind)
+		slog.Info(fmt.Sprintf("Starting server at %s", router.config.Bind))
 
 		if err := srv.Serve(l); err != nil && err != http.ErrServerClosed {
-			log.Error(err)
+			slog.Error(err.Error(), "source", "http_server")
 		}
 
 		cancel()
@@ -78,7 +77,7 @@ func Start(cancel context.CancelFunc, router *Router) (*Server, error) {
 
 // Shutdown gracefully shuts down the server
 func (s *Server) Shutdown(ctx context.Context) {
-	log.Info("Shutting down the server...")
+	slog.Info("Shutting down the server...")
 
 	ctx, close := context.WithTimeout(ctx, s.router.config.GracefulTimeout)
 	defer close()

+ 9 - 0
structdiff/diff.go

@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"encoding/json"
 	"fmt"
+	"log/slog"
 	"reflect"
 	"strings"
 )
@@ -67,6 +68,14 @@ func (d Entries) MarshalJSON() ([]byte, error) {
 	return buf.Bytes(), nil
 }
 
+func (d Entries) LogValue() slog.Value {
+	attrs := make([]slog.Attr, 0, len(d))
+	for _, e := range d {
+		attrs = append(attrs, slog.Any(e.Name, e.Value))
+	}
+	return slog.GroupValue(attrs...)
+}
+
 func (d Entries) flatten(m map[string]interface{}, prefix string) {
 	for _, e := range d {
 		key := e.Name

+ 9 - 10
vips/vips.go

@@ -11,6 +11,7 @@ import "C"
 import (
 	"context"
 	"fmt"
+	"log/slog"
 	"math"
 	"net/http"
 	"os"
@@ -21,8 +22,6 @@ import (
 	"time"
 	"unsafe"
 
-	log "github.com/sirupsen/logrus"
-
 	"github.com/imgproxy/imgproxy/v3/config"
 	"github.com/imgproxy/imgproxy/v3/ierrors"
 	"github.com/imgproxy/imgproxy/v3/imagedata"
@@ -414,7 +413,7 @@ func (img *Image) Load(imgdata imagedata.ImageData, shrink int, scale float64, p
 		if C.vips_fix_float_tiff(img.VipsImage, &tmp) == 0 {
 			img.swapAndUnref(tmp)
 		} else {
-			log.Warnf("Can't fix TIFF: %s", Error())
+			slog.Warn("Can't fix TIFF", "error", Error())
 		}
 	}
 
@@ -808,7 +807,7 @@ func (img *Image) BackupColourProfile() {
 	if C.vips_icc_backup(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't backup ICC profile: %s", Error())
+		slog.Warn("Can't backup ICC profile", "error", Error())
 	}
 }
 
@@ -818,7 +817,7 @@ func (img *Image) RestoreColourProfile() {
 	if C.vips_icc_restore(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't restore ICC profile: %s", Error())
+		slog.Warn("Can't restore ICC profile", "error", Error())
 	}
 }
 
@@ -843,7 +842,7 @@ func (img *Image) ImportColourProfile() error {
 	if C.vips_icc_import_go(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't import ICC profile: %s", Error())
+		slog.Warn("Can't import ICC profile", "error", Error())
 	}
 
 	return nil
@@ -865,7 +864,7 @@ func (img *Image) ExportColourProfile() error {
 	if C.vips_icc_export_go(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't export ICC profile: %s", Error())
+		slog.Warn("Can't export ICC profile", "error", Error())
 	}
 
 	return nil
@@ -882,7 +881,7 @@ func (img *Image) ExportColourProfileToSRGB() error {
 	if C.vips_icc_export_srgb(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't export ICC profile: %s", Error())
+		slog.Warn("Can't export ICC profile", "error", Error())
 	}
 
 	return nil
@@ -901,7 +900,7 @@ func (img *Image) TransformColourProfileToSRGB() error {
 	if C.vips_icc_transform_srgb(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't transform ICC profile to sRGB: %s", Error())
+		slog.Warn("Can't transform ICC profile to sRGB", "error", Error())
 	}
 
 	return nil
@@ -913,7 +912,7 @@ func (img *Image) RemoveColourProfile() error {
 	if C.vips_icc_remove(img.VipsImage, &tmp) == 0 {
 		img.swapAndUnref(tmp)
 	} else {
-		log.Warningf("Can't remove ICC profile: %s", Error())
+		slog.Warn("Can't remove ICC profile", "error", Error())
 	}
 
 	return nil