Victor Sokolov пре 3 месеци
родитељ
комит
a6952ef62c

+ 4 - 0
CHANGELOG.v4.md

@@ -1,5 +1,9 @@
 # 📑 Changelog (version/4 dev)
 
+## 2021-10-29
+
+- Introduced `IMGPROXY_(ABS|GCS|S3|SWIFT)_(ALLOWED|DENIED)_BUCKETS` env var
+
 ## 2021-10-20
 
 ### 🆕 Added

+ 4 - 0
env/desc.go

@@ -31,6 +31,10 @@ func DescribeByMap[T any](name string, m map[string]T) Desc {
 
 // Getenv returns the value of the env variable
 func (d Desc) Get() (string, bool) {
+	if len(d.Name) == 0 {
+		return "", false
+	}
+
 	value := os.Getenv(d.Name)
 	return value, len(value) > 0
 }

+ 1 - 2
fetcher/fetcher.go

@@ -8,7 +8,6 @@ import (
 	"time"
 
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/common"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 )
 
@@ -56,7 +55,7 @@ func (f *Fetcher) newHttpClient() *http.Client {
 
 // NewImageFetcherRequest creates a new ImageFetcherRequest with the provided context, URL, headers, and cookie jar
 func (f *Fetcher) BuildRequest(ctx context.Context, url string, header http.Header, jar http.CookieJar) (*Request, error) {
-	url = common.EscapeURL(url)
+	url = transport.EscapeURL(url)
 
 	// Set request timeout and get cancel function
 	ctx, cancel := context.WithTimeout(ctx, f.config.DownloadTimeout)

+ 0 - 119
fetcher/transport/azure/azure_test.go

@@ -1,119 +0,0 @@
-package azure
-
-import (
-	"net/http"
-	"net/http/httptest"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/logger"
-)
-
-type AzureTestSuite struct {
-	suite.Suite
-
-	server       *httptest.Server // TODO: use testutils.TestServer
-	transport    http.RoundTripper
-	etag         string
-	lastModified time.Time
-}
-
-func (s *AzureTestSuite) SetupSuite() {
-	data := make([]byte, 32)
-
-	logger.Mute()
-
-	s.etag = "testetag"
-	s.lastModified, _ = time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
-
-	s.server = httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
-		s.Equal("/test/foo/test.png", r.URL.Path)
-
-		rw.Header().Set(httpheaders.Etag, s.etag)
-		rw.Header().Set(httpheaders.LastModified, s.lastModified.Format(http.TimeFormat))
-		rw.WriteHeader(200)
-		rw.Write(data)
-	}))
-
-	config := NewDefaultConfig()
-	config.Endpoint = s.server.URL
-	config.Name = "testname"
-	config.Key = "dGVzdGtleQ=="
-
-	tc := generichttp.NewDefaultConfig()
-	tc.IgnoreSslVerification = true
-
-	trans, gerr := generichttp.New(false, &tc)
-	s.Require().NoError(gerr)
-
-	var err error
-	s.transport, err = New(&config, trans, "?")
-	s.Require().NoError(err)
-}
-
-func (s *AzureTestSuite) TearDownSuite() {
-	s.server.Close()
-	logger.Unmute()
-}
-
-func (s *AzureTestSuite) TestRoundTripWithETag() {
-	request, _ := http.NewRequest("GET", "abs://test/foo/test.png", nil)
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.etag, response.Header.Get(httpheaders.Etag))
-}
-
-func (s *AzureTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	request, _ := http.NewRequest("GET", "abs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
-}
-
-func (s *AzureTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	request, _ := http.NewRequest("GET", "abs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
-}
-
-func (s *AzureTestSuite) TestRoundTripWithLastModifiedEnabled() {
-	request, _ := http.NewRequest("GET", "abs://test/foo/test.png", nil)
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Header.Get(httpheaders.LastModified))
-}
-
-func (s *AzureTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	request, _ := http.NewRequest("GET", "abs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
-}
-
-func (s *AzureTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	request, _ := http.NewRequest("GET", "abs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
-}
-
-func TestAzureTransport(t *testing.T) {
-	suite.Run(t, new(AzureTestSuite))
-}

+ 0 - 52
fetcher/transport/azure/config.go

@@ -1,52 +0,0 @@
-package azure
-
-import (
-	"errors"
-
-	"github.com/imgproxy/imgproxy/v3/ensure"
-	"github.com/imgproxy/imgproxy/v3/env"
-)
-
-var (
-	IMGPROXY_ABS_NAME     = env.Describe("IMGPROXY_ABS_NAME", "string")
-	IMGPROXY_ABS_ENDPOINT = env.Describe("IMGPROXY_ABS_ENDPOINT", "string")
-	IMGPROXY_ABS_KEY      = env.Describe("IMGPROXY_ABS_KEY", "string")
-)
-
-// Config holds the configuration for Azure Blob Storage transport
-type Config struct {
-	Name     string // Azure storage account name
-	Endpoint string // Azure Blob Storage endpoint URL
-	Key      string // Azure storage account key
-}
-
-// NewDefaultConfig returns a new default configuration for Azure Blob Storage transport
-func NewDefaultConfig() Config {
-	return Config{
-		Name:     "",
-		Endpoint: "",
-		Key:      "",
-	}
-}
-
-// LoadConfigFromEnv loads configuration from the global config package
-func LoadConfigFromEnv(c *Config) (*Config, error) {
-	c = ensure.Ensure(c, NewDefaultConfig)
-
-	err := errors.Join(
-		env.String(&c.Name, IMGPROXY_ABS_NAME),
-		env.String(&c.Endpoint, IMGPROXY_ABS_ENDPOINT),
-		env.String(&c.Key, IMGPROXY_ABS_KEY),
-	)
-
-	return c, err
-}
-
-// Validate checks if the configuration is valid
-func (c *Config) Validate() error {
-	if len(c.Name) == 0 {
-		return IMGPROXY_ABS_NAME.ErrorEmpty()
-	}
-
-	return nil
-}

+ 87 - 17
fetcher/transport/config.go

@@ -8,12 +8,12 @@ import (
 
 	"github.com/imgproxy/imgproxy/v3/ensure"
 	"github.com/imgproxy/imgproxy/v3/env"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/azure"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/fs"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/gcs"
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/s3"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/swift"
+	azure "github.com/imgproxy/imgproxy/v3/storage/abs"
+	"github.com/imgproxy/imgproxy/v3/storage/fs"
+	"github.com/imgproxy/imgproxy/v3/storage/gcs"
+	"github.com/imgproxy/imgproxy/v3/storage/s3"
+	"github.com/imgproxy/imgproxy/v3/storage/swift"
 )
 
 var (
@@ -22,6 +22,49 @@ var (
 	IMGPROXY_USE_S3                     = env.Describe("IMGPROXY_USE_S3", "boolean")
 	IMGPROXY_USE_SWIFT                  = env.Describe("IMGPROXY_USE_SWIFT", "boolean")
 	IMGPROXY_SOURCE_URL_QUERY_SEPARATOR = env.Describe("IMGPROXY_SOURCE_URL_QUERY_SEPARATOR", "string")
+
+	fsDesc = fs.ConfigDesc{
+		Root: env.Describe("IMGPROXY_LOCAL_FILESYSTEM_ROOT", "path"),
+	}
+
+	absConfigDesc = azure.ConfigDesc{
+		Name:           env.Describe("IMGPROXY_ABS_NAME", "string"),
+		Endpoint:       env.Describe("IMGPROXY_ABS_ENDPOINT", "string"),
+		Key:            env.Describe("IMGPROXY_ABS_KEY", "string"),
+		AllowedBuckets: env.Describe("IMGPROXY_ABS_ALLOWED_BUCKETS", "comma-separated list"),
+		DeniedBuckets:  env.Describe("IMGPROXY_ABS_DENIED_BUCKETS", "comma-separated list"),
+	}
+
+	gcsConfigDesc = gcs.ConfigDesc{
+		Key:            env.Describe("IMGPROXY_GCS_KEY", "string"),
+		Endpoint:       env.Describe("IMGPROXY_GCS_ENDPOINT", "string"),
+		AllowedBuckets: env.Describe("IMGPROXY_GCS_ALLOWED_BUCKETS", "comma-separated list"),
+		DeniedBuckets:  env.Describe("IMGPROXY_GCS_DENIED_BUCKETS", "comma-separated list"),
+	}
+
+	s3ConfigDesc = s3.ConfigDesc{
+		Region:                  env.Describe("IMGPROXY_S3_REGION", "string"),
+		Endpoint:                env.Describe("IMGPROXY_S3_ENDPOINT", "string"),
+		EndpointUsePathStyle:    env.Describe("IMGPROXY_S3_ENDPOINT_USE_PATH_STYLE", "boolean"),
+		AssumeRoleArn:           env.Describe("IMGPROXY_S3_ASSUME_ROLE_ARN", "string"),
+		AssumeRoleExternalID:    env.Describe("IMGPROXY_S3_ASSUME_ROLE_EXTERNAL_ID", "string"),
+		DecryptionClientEnabled: env.Describe("IMGPROXY_S3_DECRYPTION_CLIENT_ENABLED", "boolean"),
+		AllowedBuckets:          env.Describe("IMGPROXY_S3_ALLOWED_BUCKETS", "comma-separated list"),
+		DeniedBuckets:           env.Describe("IMGPROXY_S3_DENIED_BUCKETS", "comma-separated list"),
+	}
+
+	swiftConfigDesc = swift.ConfigDesc{
+		Username:       env.Describe("IMGPROXY_SWIFT_USERNAME", "string"),
+		APIKey:         env.Describe("IMGPROXY_SWIFT_API_KEY", "string"),
+		AuthURL:        env.Describe("IMGPROXY_SWIFT_AUTH_URL", "string"),
+		Domain:         env.Describe("IMGPROXY_SWIFT_DOMAIN", "string"),
+		Tenant:         env.Describe("IMGPROXY_SWIFT_TENANT", "string"),
+		AuthVersion:    env.Describe("IMGPROXY_SWIFT_AUTH_VERSION", "number"),
+		ConnectTimeout: env.Describe("IMGPROXY_SWIFT_CONNECT_TIMEOUT_SECONDS", "number"),
+		Timeout:        env.Describe("IMGPROXY_SWIFT_TIMEOUT_SECONDS", "number"),
+		AllowedBuckets: env.Describe("IMGPROXY_SWIFT_ALLOWED_BUCKETS", "comma-separated list"),
+		DeniedBuckets:  env.Describe("IMGPROXY_SWIFT_DENIED_BUCKETS", "comma-separated list"),
+	}
 )
 
 // Config represents configuration of the transport package
@@ -70,23 +113,23 @@ func LoadConfigFromEnv(c *Config) (*Config, error) {
 	c = ensure.Ensure(c, NewDefaultConfig)
 
 	_, genericErr := generichttp.LoadConfigFromEnv(&c.HTTP)
-	_, localErr := fs.LoadConfigFromEnv(&c.Local)
-	_, azureErr := azure.LoadConfigFromEnv(&c.ABS)
-	_, gcsErr := gcs.LoadConfigFromEnv(&c.GCS)
-	_, s3Err := s3.LoadConfigFromEnv(&c.S3)
-	_, swiftErr := swift.LoadConfigFromEnv(&c.Swift)
+	_, localErr := fs.LoadConfigFromEnv(fsDesc, &c.Local)
+	_, absErr := azure.LoadConfigFromEnv(absConfigDesc, &c.ABS)
+	_, gcsErr := gcs.LoadConfigFromEnv(gcsConfigDesc, &c.GCS)
+	_, s3Err := s3.LoadConfigFromEnv(s3ConfigDesc, &c.S3)
+	_, swiftErr := swift.LoadConfigFromEnv(swiftConfigDesc, &c.Swift)
 
 	err := errors.Join(
+		env.Bool(&c.ABSEnabled, IMGPROXY_USE_ABS),
+		env.Bool(&c.GCSEnabled, IMGPROXY_USE_GCS),
+		env.Bool(&c.S3Enabled, IMGPROXY_USE_S3),
+		env.Bool(&c.SwiftEnabled, IMGPROXY_USE_SWIFT),
 		genericErr,
 		localErr,
-		azureErr,
+		absErr,
 		gcsErr,
 		s3Err,
 		swiftErr,
-		env.Bool(&c.ABSEnabled, IMGPROXY_USE_ABS),
-		env.Bool(&c.GCSEnabled, IMGPROXY_USE_GCS),
-		env.Bool(&c.S3Enabled, IMGPROXY_USE_S3),
-		env.Bool(&c.SwiftEnabled, IMGPROXY_USE_SWIFT),
 	)
 
 	// empty value is a valid value for this separator, we can't rely on env.String,
@@ -99,6 +142,33 @@ func LoadConfigFromEnv(c *Config) (*Config, error) {
 }
 
 func (c *Config) Validate() error {
-	// We won't validate upstream config here: they might not be used
-	return nil
+	// Since all the subsequent configuration files are part of
+	// the base config, we need to forward validations downstream.
+	//
+	// We assume that transport is going to use all the transports
+	// at once when created so we make an exception here and move
+	// specific validator calls level up.
+	var err []error
+
+	if c.Local.Root != "" {
+		err = append(err, c.Local.Validate())
+	}
+
+	if c.ABSEnabled {
+		err = append(err, c.ABS.Validate())
+	}
+
+	if c.GCSEnabled {
+		err = append(err, c.GCS.Validate())
+	}
+
+	if c.S3Enabled {
+		err = append(err, c.S3.Validate())
+	}
+
+	if c.SwiftEnabled {
+		err = append(err, c.Swift.Validate())
+	}
+
+	return errors.Join(err...)
 }

+ 0 - 151
fetcher/transport/fs/fs.go

@@ -1,151 +0,0 @@
-package fs
-
-import (
-	"crypto/md5"
-	"encoding/base64"
-	"fmt"
-	"io"
-	"io/fs"
-	"mime"
-	"net/http"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/common"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/notmodified"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/httprange"
-)
-
-type transport struct {
-	fs             http.Dir
-	querySeparator string
-}
-
-func New(config *Config, querySeparator string) (transport, error) {
-	if err := config.Validate(); err != nil {
-		return transport{}, err
-	}
-
-	return transport{fs: http.Dir(config.Root), querySeparator: querySeparator}, nil
-}
-
-func (t transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
-	header := make(http.Header)
-
-	_, path, _ := common.GetBucketAndKey(req.URL, t.querySeparator)
-	path = "/" + path
-
-	f, err := t.fs.Open(path)
-	if err != nil {
-		if os.IsNotExist(err) {
-			return respNotFound(req, fmt.Sprintf("%s doesn't exist", path)), nil
-		}
-		return nil, err
-	}
-
-	fi, err := f.Stat()
-	if err != nil {
-		return nil, err
-	}
-
-	if fi.IsDir() {
-		return respNotFound(req, fmt.Sprintf("%s is directory", path)), nil
-	}
-
-	statusCode := 200
-	size := fi.Size()
-	body := io.ReadCloser(f)
-
-	if mimetype := detectContentType(f, fi); len(mimetype) > 0 {
-		header.Set(httpheaders.ContentType, mimetype)
-	}
-	f.Seek(0, io.SeekStart)
-
-	start, end, err := httprange.Parse(req.Header.Get(httpheaders.Range))
-	switch {
-	case err != nil:
-		f.Close()
-		return httprange.InvalidHTTPRangeResponse(req), nil
-
-	case end != 0:
-		if end < 0 {
-			end = size - 1
-		}
-
-		f.Seek(start, io.SeekStart)
-
-		statusCode = http.StatusPartialContent
-		size = end - start + 1
-		body = &fileLimiter{f: f, left: int(size)}
-		header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", start, end, fi.Size()))
-
-	default:
-		etag := BuildEtag(path, fi)
-		header.Set(httpheaders.Etag, etag)
-
-		lastModified := fi.ModTime().Format(http.TimeFormat)
-		header.Set(httpheaders.LastModified, lastModified)
-	}
-
-	if resp := notmodified.Response(req, header); resp != nil {
-		f.Close()
-		return resp, nil
-	}
-
-	header.Set(httpheaders.AcceptRanges, "bytes")
-	header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
-
-	return &http.Response{
-		StatusCode:    statusCode,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        header,
-		ContentLength: size,
-		Body:          body,
-		Close:         true,
-		Request:       req,
-	}, nil
-}
-
-func BuildEtag(path string, fi fs.FileInfo) string {
-	tag := fmt.Sprintf("%s__%d__%d", path, fi.Size(), fi.ModTime().UnixNano())
-	hash := md5.Sum([]byte(tag))
-	return `"` + string(base64.RawURLEncoding.EncodeToString(hash[:])) + `"`
-}
-
-func respNotFound(req *http.Request, msg string) *http.Response {
-	return &http.Response{
-		StatusCode:    http.StatusNotFound,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        http.Header{httpheaders.ContentType: {"text/plain"}},
-		ContentLength: int64(len(msg)),
-		Body:          io.NopCloser(strings.NewReader(msg)),
-		Close:         false,
-		Request:       req,
-	}
-}
-
-func detectContentType(f http.File, fi fs.FileInfo) string {
-	var (
-		tmp      [512]byte
-		mimetype string
-	)
-
-	if n, err := io.ReadFull(f, tmp[:]); err == nil {
-		mimetype = http.DetectContentType(tmp[:n])
-	}
-
-	if len(mimetype) == 0 || strings.HasPrefix(mimetype, "text/plain") || strings.HasPrefix(mimetype, "application/octet-stream") {
-		if m := mime.TypeByExtension(filepath.Ext(fi.Name())); len(m) > 0 {
-			mimetype = m
-		}
-	}
-
-	return mimetype
-}

+ 0 - 92
fetcher/transport/fs/fs_test.go

@@ -1,92 +0,0 @@
-package fs
-
-import (
-	"net/http"
-	"os"
-	"path/filepath"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-)
-
-type FsTestSuite struct {
-	suite.Suite
-
-	transport http.RoundTripper
-	etag      string
-	modTime   time.Time
-}
-
-func (s *FsTestSuite) SetupSuite() {
-	wd, err := os.Getwd()
-	s.Require().NoError(err)
-
-	fsRoot := filepath.Join(wd, "..", "..", "..", "testdata")
-
-	fi, err := os.Stat(filepath.Join(fsRoot, "test1.png"))
-	s.Require().NoError(err)
-
-	s.etag = BuildEtag("/test1.png", fi)
-	s.modTime = fi.ModTime()
-	s.transport, _ = New(&Config{Root: fsRoot}, "?")
-}
-
-func (s *FsTestSuite) TestRoundTripWithETagEnabled() {
-	request, _ := http.NewRequest("GET", "local:///test1.png", nil)
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.etag, response.Header.Get(httpheaders.Etag))
-}
-func (s *FsTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	request, _ := http.NewRequest("GET", "local:///test1.png", nil)
-	request.Header.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
-}
-
-func (s *FsTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	request, _ := http.NewRequest("GET", "local:///test1.png", nil)
-	request.Header.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
-}
-
-func (s *FsTestSuite) TestRoundTripWithLastModifiedEnabledReturns200() {
-	request, _ := http.NewRequest("GET", "local:///test1.png", nil)
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.modTime.Format(http.TimeFormat), response.Header.Get(httpheaders.LastModified))
-}
-
-func (s *FsTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	request, _ := http.NewRequest("GET", "local:///test1.png", nil)
-	request.Header.Set(httpheaders.IfModifiedSince, s.modTime.Format(http.TimeFormat))
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
-}
-
-func (s *FsTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	request, _ := http.NewRequest("GET", "local:///test1.png", nil)
-	request.Header.Set(httpheaders.IfModifiedSince, s.modTime.Add(-time.Minute).Format(http.TimeFormat))
-
-	response, err := s.transport.RoundTrip(request)
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
-}
-
-func TestFSTransport(t *testing.T) {
-	suite.Run(t, new(FsTestSuite))
-}

+ 0 - 44
fetcher/transport/gcs/config.go

@@ -1,44 +0,0 @@
-package gcs
-
-import (
-	"errors"
-
-	"github.com/imgproxy/imgproxy/v3/ensure"
-	"github.com/imgproxy/imgproxy/v3/env"
-)
-
-var (
-	IMGPROXY_GCS_KEY      = env.Describe("IMGPROXY_GCS_KEY", "string")
-	IMGPROXY_GCS_ENDPOINT = env.Describe("IMGPROXY_GCS_ENDPOINT", "string")
-)
-
-// Config holds the configuration for Google Cloud Storage transport
-type Config struct {
-	Key      string // Google Cloud Storage service account key
-	Endpoint string // Google Cloud Storage endpoint URL
-}
-
-// NewDefaultConfig returns a new default configuration for Google Cloud Storage transport
-func NewDefaultConfig() Config {
-	return Config{
-		Key:      "",
-		Endpoint: "",
-	}
-}
-
-// LoadConfigFromEnv loads configuration from the global config package
-func LoadConfigFromEnv(c *Config) (*Config, error) {
-	c = ensure.Ensure(c, NewDefaultConfig)
-
-	err := errors.Join(
-		env.String(&c.Key, IMGPROXY_GCS_KEY),
-		env.String(&c.Endpoint, IMGPROXY_GCS_ENDPOINT),
-	)
-
-	return c, err
-}
-
-// Validate checks the configuration for errors
-func (c *Config) Validate() error {
-	return nil
-}

+ 0 - 197
fetcher/transport/gcs/gcs.go

@@ -1,197 +0,0 @@
-package gcs
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"net/http"
-	"strconv"
-	"strings"
-
-	"cloud.google.com/go/storage"
-	"github.com/pkg/errors"
-	"google.golang.org/api/option"
-	raw "google.golang.org/api/storage/v1"
-	htransport "google.golang.org/api/transport/http"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/common"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/notmodified"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/httprange"
-	"github.com/imgproxy/imgproxy/v3/ierrors"
-)
-
-// For tests
-var noAuth bool = false
-
-type transport struct {
-	client      *storage.Client
-	qsSeparator string
-}
-
-func buildHTTPClient(config *Config, trans *http.Transport, opts ...option.ClientOption) (*http.Client, error) {
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	htrans, err := htransport.NewTransport(context.Background(), trans, opts...)
-	if err != nil {
-		return nil, errors.Wrap(err, "error creating GCS transport")
-	}
-
-	return &http.Client{Transport: htrans}, nil
-}
-
-func New(config *Config, trans *http.Transport, sep string) (http.RoundTripper, error) {
-	var client *storage.Client
-
-	opts := []option.ClientOption{
-		option.WithScopes(raw.DevstorageReadOnlyScope),
-	}
-
-	if len(config.Key) > 0 {
-		opts = append(opts, option.WithCredentialsJSON([]byte(config.Key)))
-	}
-
-	if len(config.Endpoint) > 0 {
-		opts = append(opts, option.WithEndpoint(config.Endpoint))
-	}
-
-	if noAuth {
-		opts = append(opts, option.WithoutAuthentication())
-	}
-
-	httpClient, err := buildHTTPClient(config, trans, opts...)
-	if err != nil {
-		return nil, err
-	}
-	opts = append(opts, option.WithHTTPClient(httpClient))
-
-	client, err = storage.NewClient(context.Background(), opts...)
-
-	if err != nil {
-		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("Can't create GCS client"))
-	}
-
-	return transport{client, sep}, nil
-}
-
-func (t transport) RoundTrip(req *http.Request) (*http.Response, error) {
-	bucket, key, query := common.GetBucketAndKey(req.URL, t.qsSeparator)
-
-	if len(bucket) == 0 || len(key) == 0 {
-		body := strings.NewReader("Invalid GCS URL: bucket name or object key is empty")
-		return &http.Response{
-			StatusCode:    http.StatusNotFound,
-			Proto:         "HTTP/1.0",
-			ProtoMajor:    1,
-			ProtoMinor:    0,
-			Header:        http.Header{"Content-Type": {"text/plain"}},
-			ContentLength: int64(body.Len()),
-			Body:          io.NopCloser(body),
-			Close:         false,
-			Request:       req,
-		}, nil
-	}
-
-	bkt := t.client.Bucket(bucket)
-	obj := bkt.Object(key)
-
-	if g, err := strconv.ParseInt(query, 10, 64); err == nil && g > 0 {
-		obj = obj.Generation(g)
-	}
-
-	var (
-		reader     *storage.Reader
-		statusCode int
-		size       int64
-	)
-
-	header := make(http.Header)
-
-	if r := req.Header.Get(httpheaders.Range); len(r) != 0 {
-		start, end, err := httprange.Parse(r)
-		if err != nil {
-			return httprange.InvalidHTTPRangeResponse(req), nil
-		}
-
-		if end != 0 {
-			length := end - start + 1
-			if end < 0 {
-				length = -1
-			}
-
-			reader, err = obj.NewRangeReader(req.Context(), start, length)
-			if err != nil {
-				return nil, err
-			}
-
-			if end < 0 || end >= reader.Attrs.Size {
-				end = reader.Attrs.Size - 1
-			}
-
-			size = end - reader.Attrs.StartOffset + 1
-
-			statusCode = http.StatusPartialContent
-			header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", reader.Attrs.StartOffset, end, reader.Attrs.Size))
-		}
-	}
-
-	// We haven't initialize reader yet, this means that we need non-ranged reader
-	if reader == nil {
-		attrs, aerr := obj.Attrs(req.Context())
-		if aerr != nil {
-			return handleError(req, aerr)
-		}
-		header.Set(httpheaders.Etag, attrs.Etag)
-		header.Set(httpheaders.LastModified, attrs.Updated.Format(http.TimeFormat))
-
-		if resp := notmodified.Response(req, header); resp != nil {
-			return resp, nil
-		}
-
-		var err error
-		reader, err = obj.NewReader(req.Context())
-		if err != nil {
-			return handleError(req, err)
-		}
-
-		statusCode = 200
-		size = reader.Attrs.Size
-	}
-
-	header.Set(httpheaders.AcceptRanges, "bytes")
-	header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
-	header.Set(httpheaders.ContentType, reader.Attrs.ContentType)
-	header.Set(httpheaders.CacheControl, reader.Attrs.CacheControl)
-
-	return &http.Response{
-		StatusCode:    statusCode,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        header,
-		ContentLength: reader.Attrs.Size,
-		Body:          reader,
-		Close:         true,
-		Request:       req,
-	}, nil
-}
-
-func handleError(req *http.Request, err error) (*http.Response, error) {
-	if err != storage.ErrBucketNotExist && err != storage.ErrObjectNotExist {
-		return nil, err
-	}
-
-	return &http.Response{
-		StatusCode:    http.StatusNotFound,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        http.Header{httpheaders.ContentType: {"text/plain"}},
-		ContentLength: int64(len(err.Error())),
-		Body:          io.NopCloser(strings.NewReader(err.Error())),
-		Close:         false,
-		Request:       req,
-	}, nil
-}

+ 0 - 53
fetcher/transport/notmodified/notmodified.go

@@ -1,53 +0,0 @@
-package notmodified
-
-import (
-	"net/http"
-	"time"
-
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-)
-
-func Response(req *http.Request, header http.Header) *http.Response {
-	etag := header.Get(httpheaders.Etag)
-	ifNoneMatch := req.Header.Get(httpheaders.IfNoneMatch)
-
-	if len(ifNoneMatch) > 0 && ifNoneMatch == etag {
-		return response(req, header)
-	}
-
-	lastModifiedRaw := header.Get(httpheaders.LastModified)
-	if len(lastModifiedRaw) == 0 {
-		return nil
-	}
-	ifModifiedSinceRaw := req.Header.Get(httpheaders.IfModifiedSince)
-	if len(ifModifiedSinceRaw) == 0 {
-		return nil
-	}
-	lastModified, err := time.Parse(http.TimeFormat, lastModifiedRaw)
-	if err != nil {
-		return nil
-	}
-	ifModifiedSince, err := time.Parse(http.TimeFormat, ifModifiedSinceRaw)
-	if err != nil {
-		return nil
-	}
-	if !ifModifiedSince.Before(lastModified) {
-		return response(req, header)
-	}
-
-	return nil
-}
-
-func response(req *http.Request, header http.Header) *http.Response {
-	return &http.Response{
-		StatusCode:    http.StatusNotModified,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        header,
-		ContentLength: 0,
-		Body:          nil,
-		Close:         false,
-		Request:       req,
-	}
-}

+ 37 - 0
fetcher/transport/round_tripper.go

@@ -0,0 +1,37 @@
+package transport
+
+import (
+	"net/http"
+
+	"github.com/imgproxy/imgproxy/v3/storage"
+)
+
+// RoundTripper wraps storage with http.RoundTripper
+type RoundTripper struct {
+	http.RoundTripper
+
+	storage        storage.Reader
+	querySeparator string
+}
+
+// New creates a new RoundTripper
+func NewRoundTripper(storage storage.Reader, querySeparator string) *RoundTripper {
+	return &RoundTripper{
+		storage:        storage,
+		querySeparator: querySeparator,
+	}
+}
+
+// RoundTrip implements the http.RoundTripper interface
+func (t RoundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+	// Parse container and object name from the URL
+	container, key, query := GetBucketAndKey(req.URL, t.querySeparator)
+
+	// Call GetObject
+	r, err := t.storage.GetObject(req.Context(), req.Header, container, key, query)
+	if err != nil {
+		return nil, err
+	}
+
+	return r.Response(req), nil
+}

+ 99 - 0
fetcher/transport/round_tripper_test.go

@@ -0,0 +1,99 @@
+package transport
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/suite"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage/response"
+)
+
+// mockStorage is a simple mock implementation of storage.Reader
+type mockStorage struct {
+	getObject func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error)
+}
+
+func (m *mockStorage) GetObject(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error) {
+	if m.getObject == nil {
+		return nil, nil
+	}
+
+	return m.getObject(ctx, reqHeader, bucket, key, query)
+}
+
+type RoundTripperTestSuite struct {
+	suite.Suite
+}
+
+func (s *RoundTripperTestSuite) TestRoundTripperSuccess() {
+	// Create mock storage that returns a successful response
+	mock := &mockStorage{
+		getObject: func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error) {
+			s.Equal("test-bucket", bucket)
+			s.Equal("test-key", key)
+			s.Equal("version=123", query)
+
+			headers := make(http.Header)
+			headers.Set(httpheaders.ContentType, "image/png")
+			headers.Set(httpheaders.Etag, "test-etag")
+
+			body := io.NopCloser(strings.NewReader("test data"))
+			return response.NewOK(headers, body), nil
+		},
+	}
+
+	rt := NewRoundTripper(mock, "?")
+
+	// Create a test request
+	req, err := http.NewRequest("GET", EscapeURL("s3://test-bucket/test-key?version=123"), nil)
+	s.Require().NoError(err)
+
+	// Execute RoundTrip
+	resp, err := rt.RoundTrip(req)
+	s.Require().NoError(err)
+	s.Require().NotNil(resp)
+
+	// Verify response
+	s.Equal(http.StatusOK, resp.StatusCode)
+	s.Equal("image/png", resp.Header.Get(httpheaders.ContentType))
+	s.Equal("test-etag", resp.Header.Get(httpheaders.Etag))
+
+	// Read and verify body
+	defer resp.Body.Close()
+	data, err := io.ReadAll(resp.Body)
+	s.Require().NoError(err)
+	s.Equal("test data", string(data))
+}
+
+func (s *RoundTripperTestSuite) TestRoundTripperNotFound() {
+	// Create mock storage that returns 404
+	mock := &mockStorage{
+		getObject: func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error) {
+			return response.NewNotFound("object not found"), nil
+		},
+	}
+
+	rt := NewRoundTripper(mock, "?")
+
+	req, err := http.NewRequest("GET", "s3://bucket/key", nil)
+	s.Require().NoError(err)
+
+	resp, err := rt.RoundTrip(req)
+	s.Require().NoError(err)
+	s.Require().NotNil(resp)
+
+	if resp.Body != nil {
+		resp.Body.Close()
+	}
+
+	s.Equal(http.StatusNotFound, resp.StatusCode)
+}
+
+func TestRoundTripper(t *testing.T) {
+	suite.Run(t, new(RoundTripperTestSuite))
+}

+ 0 - 60
fetcher/transport/s3/config.go

@@ -1,60 +0,0 @@
-package s3
-
-import (
-	"errors"
-
-	"github.com/imgproxy/imgproxy/v3/ensure"
-	"github.com/imgproxy/imgproxy/v3/env"
-)
-
-var (
-	IMGPROXY_S3_REGION                    = env.Describe("IMGPROXY_S3_REGION", "string")
-	IMGPROXY_S3_ENDPOINT                  = env.Describe("IMGPROXY_S3_ENDPOINT", "string")
-	IMGPROXY_S3_ENDPOINT_USE_PATH_STYLE   = env.Describe("IMGPROXY_S3_ENDPOINT_USE_PATH_STYLE", "boolean")
-	IMGPROXY_S3_ASSUME_ROLE_ARN           = env.Describe("IMGPROXY_S3_ASSUME_ROLE_ARN", "string")
-	IMGPROXY_S3_ASSUME_ROLE_EXTERNAL_ID   = env.Describe("IMGPROXY_S3_ASSUME_ROLE_EXTERNAL_ID", "string")
-	IMGPROXY_S3_DECRYPTION_CLIENT_ENABLED = env.Describe("IMGPROXY_S3_DECRYPTION_CLIENT_ENABLED", "boolean")
-)
-
-// Config holds the configuration for S3 transport
-type Config struct {
-	Region                  string // AWS region for S3 (default: "")
-	Endpoint                string // Custom endpoint for S3 (default: "")
-	EndpointUsePathStyle    bool   // Use path-style URLs for S3 (default: true)
-	AssumeRoleArn           string // ARN for assuming an AWS role (default: "")
-	AssumeRoleExternalID    string // External ID for assuming an AWS role (default: "")
-	DecryptionClientEnabled bool   // Enables S3 decryption client (default: false)
-}
-
-// NewDefaultConfig returns a new default configuration for S3 transport
-func NewDefaultConfig() Config {
-	return Config{
-		Region:                  "",
-		Endpoint:                "",
-		EndpointUsePathStyle:    true,
-		AssumeRoleArn:           "",
-		AssumeRoleExternalID:    "",
-		DecryptionClientEnabled: false,
-	}
-}
-
-// LoadConfigFromEnv loads configuration from the global config package
-func LoadConfigFromEnv(c *Config) (*Config, error) {
-	c = ensure.Ensure(c, NewDefaultConfig)
-
-	err := errors.Join(
-		env.String(&c.Region, IMGPROXY_S3_REGION),
-		env.String(&c.Endpoint, IMGPROXY_S3_ENDPOINT),
-		env.Bool(&c.EndpointUsePathStyle, IMGPROXY_S3_ENDPOINT_USE_PATH_STYLE),
-		env.String(&c.AssumeRoleArn, IMGPROXY_S3_ASSUME_ROLE_ARN),
-		env.String(&c.AssumeRoleExternalID, IMGPROXY_S3_ASSUME_ROLE_EXTERNAL_ID),
-		env.Bool(&c.DecryptionClientEnabled, IMGPROXY_S3_DECRYPTION_CLIENT_ENABLED),
-	)
-
-	return c, err
-}
-
-// Validate checks the configuration for errors
-func (c *Config) Validate() error {
-	return nil
-}

+ 0 - 69
fetcher/transport/swift/config.go

@@ -1,69 +0,0 @@
-package swift
-
-import (
-	"errors"
-	"time"
-
-	"github.com/imgproxy/imgproxy/v3/ensure"
-	"github.com/imgproxy/imgproxy/v3/env"
-)
-
-var (
-	IMGPROXY_SWIFT_USERNAME                = env.Describe("IMGPROXY_SWIFT_USERNAME", "string")
-	IMGPROXY_SWIFT_API_KEY                 = env.Describe("IMGPROXY_SWIFT_API_KEY", "string")
-	IMGPROXY_SWIFT_AUTH_URL                = env.Describe("IMGPROXY_SWIFT_AUTH_URL", "string")
-	IMGPROXY_SWIFT_DOMAIN                  = env.Describe("IMGPROXY_SWIFT_DOMAIN", "string")
-	IMGPROXY_SWIFT_TENANT                  = env.Describe("IMGPROXY_SWIFT_TENANT", "string")
-	IMGPROXY_SWIFT_AUTH_VERSION            = env.Describe("IMGPROXY_SWIFT_AUTH_VERSION", "number")
-	IMGPROXY_SWIFT_CONNECT_TIMEOUT_SECONDS = env.Describe("IMGPROXY_SWIFT_CONNECT_TIMEOUT_SECONDS", "number")
-	IMGPROXY_SWIFT_TIMEOUT_SECONDS         = env.Describe("IMGPROXY_SWIFT_TIMEOUT_SECONDS", "number")
-)
-
-// Config holds the configuration for Swift transport
-type Config struct {
-	Username       string        // Username for Swift authentication
-	APIKey         string        // API key for Swift authentication
-	AuthURL        string        // Authentication URL for Swift
-	Domain         string        // Domain for Swift authentication
-	Tenant         string        // Tenant for Swift authentication
-	AuthVersion    int           // Authentication version for Swift
-	ConnectTimeout time.Duration // Connection timeout for Swift
-	Timeout        time.Duration // Request timeout for Swift
-}
-
-// NewDefaultConfig returns a new default configuration for Swift transport
-func NewDefaultConfig() Config {
-	return Config{
-		Username:       "",
-		APIKey:         "",
-		AuthURL:        "",
-		Domain:         "",
-		Tenant:         "",
-		AuthVersion:    0,
-		ConnectTimeout: 10 * time.Second,
-		Timeout:        60 * time.Second,
-	}
-}
-
-// LoadConfigFromEnv loads configuration from the global config package
-func LoadConfigFromEnv(c *Config) (*Config, error) {
-	c = ensure.Ensure(c, NewDefaultConfig)
-
-	err := errors.Join(
-		env.String(&c.Username, IMGPROXY_SWIFT_USERNAME),
-		env.String(&c.APIKey, IMGPROXY_SWIFT_API_KEY),
-		env.String(&c.AuthURL, IMGPROXY_SWIFT_AUTH_URL),
-		env.String(&c.Domain, IMGPROXY_SWIFT_DOMAIN),
-		env.String(&c.Tenant, IMGPROXY_SWIFT_TENANT),
-		env.Int(&c.AuthVersion, IMGPROXY_SWIFT_AUTH_VERSION),
-		env.Duration(&c.ConnectTimeout, IMGPROXY_SWIFT_CONNECT_TIMEOUT_SECONDS),
-		env.Duration(&c.Timeout, IMGPROXY_SWIFT_TIMEOUT_SECONDS),
-	)
-
-	return c, err
-}
-
-// Validate checks the configuration for errors
-func (c *Config) Validate() error {
-	return nil
-}

+ 0 - 123
fetcher/transport/swift/swift.go

@@ -1,123 +0,0 @@
-package swift
-
-import (
-	"context"
-	"errors"
-	"io"
-	"net/http"
-	"strings"
-
-	"github.com/ncw/swift/v2"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/common"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/notmodified"
-	"github.com/imgproxy/imgproxy/v3/ierrors"
-)
-
-type transport struct {
-	con            *swift.Connection
-	querySeparator string
-}
-
-func New(config *Config, trans *http.Transport, querySeparator string) (http.RoundTripper, error) {
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	c := &swift.Connection{
-		UserName:       config.Username,
-		ApiKey:         config.APIKey,
-		AuthUrl:        config.AuthURL,
-		AuthVersion:    config.AuthVersion,
-		Domain:         config.Domain, // v3 auth only
-		Tenant:         config.Tenant, // v2 auth only
-		Timeout:        config.Timeout,
-		ConnectTimeout: config.ConnectTimeout,
-		Transport:      trans,
-	}
-
-	ctx := context.Background()
-
-	err := c.Authenticate(ctx)
-
-	if err != nil {
-		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("swift authentication error"))
-	}
-
-	return transport{con: c, querySeparator: querySeparator}, nil
-}
-
-func (t transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
-	container, objectName, _ := common.GetBucketAndKey(req.URL, t.querySeparator)
-
-	if len(container) == 0 || len(objectName) == 0 {
-		body := strings.NewReader("Invalid Swift URL: container name or object name is empty")
-		return &http.Response{
-			StatusCode:    http.StatusNotFound,
-			Proto:         "HTTP/1.0",
-			ProtoMajor:    1,
-			ProtoMinor:    0,
-			Header:        http.Header{"Content-Type": {"text/plain"}},
-			ContentLength: int64(body.Len()),
-			Body:          io.NopCloser(body),
-			Close:         false,
-			Request:       req,
-		}, nil
-	}
-
-	reqHeaders := make(swift.Headers)
-	if r := req.Header.Get("Range"); len(r) > 0 {
-		reqHeaders["Range"] = r
-	}
-
-	object, objectHeaders, err := t.con.ObjectOpen(req.Context(), container, objectName, false, reqHeaders)
-
-	header := make(http.Header)
-
-	if err != nil {
-		if errors.Is(err, swift.ObjectNotFound) || errors.Is(err, swift.ContainerNotFound) {
-			return &http.Response{
-				StatusCode:    http.StatusNotFound,
-				Proto:         "HTTP/1.0",
-				ProtoMajor:    1,
-				ProtoMinor:    0,
-				Header:        http.Header{"Content-Type": {"text/plain"}},
-				ContentLength: int64(len(err.Error())),
-				Body:          io.NopCloser(strings.NewReader(err.Error())),
-				Close:         false,
-				Request:       req,
-			}, nil
-		}
-
-		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("error opening object"))
-	}
-
-	if etag, ok := objectHeaders["Etag"]; ok {
-		header.Set("ETag", etag)
-	}
-
-	if lastModified, ok := objectHeaders["Last-Modified"]; ok {
-		header.Set("Last-Modified", lastModified)
-	}
-
-	if resp := notmodified.Response(req, header); resp != nil {
-		object.Close()
-		return resp, nil
-	}
-
-	for k, v := range objectHeaders {
-		header.Set(k, v)
-	}
-
-	return &http.Response{
-		Status:     "200 OK",
-		StatusCode: 200,
-		Proto:      "HTTP/1.0",
-		ProtoMajor: 1,
-		ProtoMinor: 0,
-		Header:     header,
-		Body:       object,
-		Close:      true,
-		Request:    req,
-	}, nil
-}

+ 23 - 15
fetcher/transport/transport.go

@@ -3,15 +3,17 @@
 package transport
 
 import (
+	"context"
+	"log/slog"
 	"net/http"
 
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
 
-	azureTransport "github.com/imgproxy/imgproxy/v3/fetcher/transport/azure"
-	fsTransport "github.com/imgproxy/imgproxy/v3/fetcher/transport/fs"
-	gcsTransport "github.com/imgproxy/imgproxy/v3/fetcher/transport/gcs"
-	s3Transport "github.com/imgproxy/imgproxy/v3/fetcher/transport/s3"
-	swiftTransport "github.com/imgproxy/imgproxy/v3/fetcher/transport/swift"
+	absStorage "github.com/imgproxy/imgproxy/v3/storage/abs"
+	fsStorage "github.com/imgproxy/imgproxy/v3/storage/fs"
+	gcsStorage "github.com/imgproxy/imgproxy/v3/storage/gcs"
+	s3Storage "github.com/imgproxy/imgproxy/v3/storage/s3"
+	swiftStorage "github.com/imgproxy/imgproxy/v3/storage/swift"
 )
 
 // Transport is a wrapper around http.Transport which allows to track registered protocols
@@ -61,6 +63,7 @@ func (t *Transport) Transport() *http.Transport {
 func (t *Transport) RegisterProtocol(scheme string, rt http.RoundTripper) {
 	t.transport.RegisterProtocol(scheme, rt)
 	t.schemes[scheme] = struct{}{}
+	slog.Info("Scheme registered", "scheme", scheme)
 }
 
 // IsProtocolRegistered checks if a protocol is registered in the transport
@@ -79,43 +82,48 @@ func (t *Transport) registerAllProtocols() error {
 	}
 
 	if t.config.Local.Root != "" {
-		p, err := fsTransport.New(&t.config.Local, sep)
+		tr, err := fsStorage.New(&t.config.Local, sep)
 		if err != nil {
 			return err
 		}
-		t.RegisterProtocol("local", p)
+		t.RegisterProtocol("local", NewRoundTripper(tr, sep))
 	}
 
 	if t.config.S3Enabled {
-		tr, err := s3Transport.New(&t.config.S3, transp, sep)
+		tr, err := s3Storage.New(&t.config.S3, transp)
 		if err != nil {
 			return err
 		}
-		t.RegisterProtocol("s3", tr)
+		t.RegisterProtocol("s3", NewRoundTripper(tr, sep))
 	}
 
 	if t.config.GCSEnabled {
-		tr, err := gcsTransport.New(&t.config.GCS, transp, sep)
+		tr, err := gcsStorage.New(&t.config.GCS, transp, true)
 		if err != nil {
 			return err
 		}
-		t.RegisterProtocol("gs", tr)
+		t.RegisterProtocol("gs", NewRoundTripper(tr, sep))
 	}
 
 	if t.config.ABSEnabled {
-		tr, err := azureTransport.New(&t.config.ABS, transp, sep)
+		tr, err := absStorage.New(&t.config.ABS, transp)
 		if err != nil {
 			return err
 		}
-		t.RegisterProtocol("abs", tr)
+		t.RegisterProtocol("abs", NewRoundTripper(tr, sep))
 	}
 
 	if t.config.SwiftEnabled {
-		tr, err := swiftTransport.New(&t.config.Swift, transp, sep)
+		tr, err := swiftStorage.New(
+			context.Background(),
+			&t.config.Swift,
+			transp,
+		)
 		if err != nil {
 			return err
 		}
-		t.RegisterProtocol("swift", tr)
+
+		t.RegisterProtocol("swift", NewRoundTripper(tr, sep))
 	}
 
 	return nil

+ 2 - 1
fetcher/transport/common/common.go → fetcher/transport/url.go

@@ -1,4 +1,4 @@
-package common
+package transport
 
 import (
 	"net/url"
@@ -26,6 +26,7 @@ func EscapeURL(u string) string {
 	return u
 }
 
+// GetBucketAndKey extracts bucket and key from the provided URL.
 func GetBucketAndKey(u *url.URL, sep string) (bucket, key, query string) {
 	bucket = u.Host
 

+ 77 - 78
fetcher/transport/azure/azure.go → storage/abs/abs.go

@@ -1,12 +1,11 @@
 package azure
 
 import (
+	"context"
 	"fmt"
-	"io"
 	"net/http"
 	"net/url"
 	"strconv"
-	"strings"
 
 	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
 	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
@@ -15,22 +14,20 @@ import (
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
 
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/common"
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/notmodified"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httprange"
+	"github.com/imgproxy/imgproxy/v3/storage/common"
+	"github.com/imgproxy/imgproxy/v3/storage/response"
 )
 
-type transport struct {
-	client         *azblob.Client
-	querySeparator string
+// Storage represents Azure Storage
+type Storage struct {
+	config *Config
+	client *azblob.Client
 }
 
-func New(config *Config, trans *http.Transport, querySeparator string) (http.RoundTripper, error) {
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
+// New creates a new Azure Storage instance
+func New(config *Config, trans *http.Transport) (*Storage, error) {
 	var (
 		client                 *azblob.Client
 		sharedKeyCredential    *azblob.SharedKeyCredential
@@ -38,6 +35,10 @@ func New(config *Config, trans *http.Transport, querySeparator string) (http.Rou
 		err                    error
 	)
 
+	if err = config.Validate(); err != nil {
+		return nil, err
+	}
+
 	endpoint := config.Endpoint
 	if len(endpoint) == 0 {
 		endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", config.Name)
@@ -74,73 +75,47 @@ func New(config *Config, trans *http.Transport, querySeparator string) (http.Rou
 		return nil, err
 	}
 
-	return transport{client, querySeparator}, nil
+	return &Storage{config, client}, nil
 }
 
-func (t transport) RoundTrip(req *http.Request) (*http.Response, error) {
-	container, key, _ := common.GetBucketAndKey(req.URL, t.querySeparator)
-
+// GetObject retrieves an object from Azure cloud
+func (s *Storage) GetObject(
+	ctx context.Context,
+	reqHeader http.Header,
+	container, key, _ string,
+) (*response.Object, error) {
+	// If either container or object name is empty, return 404
 	if len(container) == 0 || len(key) == 0 {
-		body := strings.NewReader("Invalid ABS URL: container name or object key is empty")
-		return &http.Response{
-			StatusCode:    http.StatusNotFound,
-			Proto:         "HTTP/1.0",
-			ProtoMajor:    1,
-			ProtoMinor:    0,
-			Header:        http.Header{httpheaders.ContentType: {"text/plain"}},
-			ContentLength: int64(body.Len()),
-			Body:          io.NopCloser(body),
-			Close:         false,
-			Request:       req,
-		}, nil
-	}
-
-	statusCode := http.StatusOK
+		return response.NewNotFound(
+			"invalid Azure Storage URL: container name or object key are empty",
+		), nil
+	}
+
+	// Check if access to the container is allowed
+	if !common.IsBucketAllowed(container, s.config.AllowedBuckets, s.config.DeniedBuckets) {
+		return nil, fmt.Errorf("access to the Azure Storage container %s is denied", container)
+	}
 
 	header := make(http.Header)
 	opts := &blob.DownloadStreamOptions{}
 
-	if r := req.Header.Get(httpheaders.Range); len(r) != 0 {
-		start, end, err := httprange.Parse(r)
-		if err != nil {
-			return httprange.InvalidHTTPRangeResponse(req), nil
-		}
-
-		if end != 0 {
-			length := end - start + 1
-			if end <= 0 {
-				length = blockblob.CountToEnd
-			}
-
-			opts.Range = blob.HTTPRange{
-				Offset: start,
-				Count:  length,
-			}
-		}
-
-		statusCode = http.StatusPartialContent
+	// Check if this is partial request
+	partial, err := parseRangeHeader(opts, reqHeader)
+	if err != nil {
+		return response.NewInvalidRange(), nil
 	}
 
-	result, err := t.client.DownloadStream(req.Context(), container, key, opts)
+	// Open the object
+	result, err := s.client.DownloadStream(ctx, container, key, opts)
 	if err != nil {
 		if azError, ok := err.(*azcore.ResponseError); !ok || azError.StatusCode < 100 || azError.StatusCode == 301 {
 			return nil, err
 		} else {
-			body := strings.NewReader(azError.Error())
-			return &http.Response{
-				StatusCode:    azError.StatusCode,
-				Proto:         "HTTP/1.0",
-				ProtoMajor:    1,
-				ProtoMinor:    0,
-				Header:        http.Header{"Content-Type": {"text/plain"}},
-				ContentLength: int64(body.Len()),
-				Body:          io.NopCloser(body),
-				Close:         false,
-				Request:       req,
-			}, nil
+			return response.NewError(azError.StatusCode, azError.Error()), nil
 		}
 	}
 
+	// Pass through etag and last modified
 	if result.ETag != nil {
 		etag := string(*result.ETag)
 		header.Set(httpheaders.Etag, etag)
@@ -151,18 +126,19 @@ func (t transport) RoundTrip(req *http.Request) (*http.Response, error) {
 		header.Set(httpheaders.LastModified, lastModified)
 	}
 
-	if resp := notmodified.Response(req, header); resp != nil {
+	// Break early if response was not modified
+	if !partial && common.IsNotModified(reqHeader, header) {
 		if result.Body != nil {
 			result.Body.Close()
 		}
-		return resp, nil
+
+		return response.NewNotModified(header), nil
 	}
 
+	// Pass through important headers
 	header.Set(httpheaders.AcceptRanges, "bytes")
 
-	contentLength := int64(0)
 	if result.ContentLength != nil {
-		contentLength = *result.ContentLength
 		header.Set(httpheaders.ContentLength, strconv.FormatInt(*result.ContentLength, 10))
 	}
 
@@ -178,15 +154,38 @@ func (t transport) RoundTrip(req *http.Request) (*http.Response, error) {
 		header.Set(httpheaders.CacheControl, *result.CacheControl)
 	}
 
-	return &http.Response{
-		StatusCode:    statusCode,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        header,
-		ContentLength: contentLength,
-		Body:          result.Body,
-		Close:         true,
-		Request:       req,
-	}, nil
+	// If the request was partial, let's respond with partial
+	if partial {
+		return response.NewPartialContent(header, result.Body), nil
+	}
+
+	return response.NewOK(header, result.Body), nil
+}
+
+func parseRangeHeader(opts *blob.DownloadStreamOptions, reqHeader http.Header) (bool, error) {
+	r := reqHeader.Get(httpheaders.Range)
+	if len(r) == 0 {
+		return false, nil
+	}
+
+	start, end, err := httprange.Parse(r)
+	if err != nil {
+		return false, err
+	}
+
+	if end == 0 {
+		return false, nil
+	}
+
+	length := end - start + 1
+	if end <= 0 {
+		length = blockblob.CountToEnd
+	}
+
+	opts.Range = blob.HTTPRange{
+		Offset: start,
+		Count:  length,
+	}
+
+	return true, nil
 }

+ 145 - 0
storage/abs/abs_test.go

@@ -0,0 +1,145 @@
+package azure
+
+import (
+	"net/http"
+	"net/http/httptest"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/suite"
+
+	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/logger"
+	"github.com/imgproxy/imgproxy/v3/storage"
+)
+
+type AbsTest struct {
+	suite.Suite
+
+	server       *httptest.Server // TODO: use testutils.TestServer
+	storage      storage.Reader
+	etag         string
+	lastModified time.Time
+}
+
+func (s *AbsTest) SetupSuite() {
+	data := make([]byte, 32)
+
+	logger.Mute()
+
+	s.etag = "testetag"
+	s.lastModified, _ = time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
+
+	s.server = httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+		s.Equal("/test/foo/test.png", r.URL.Path)
+
+		rw.Header().Set(httpheaders.Etag, s.etag)
+		rw.Header().Set(httpheaders.LastModified, s.lastModified.Format(http.TimeFormat))
+		rw.WriteHeader(200)
+		rw.Write(data)
+	}))
+
+	config := NewDefaultConfig()
+	config.Endpoint = s.server.URL
+	config.Name = "testname"
+	config.Key = "dGVzdGtleQ=="
+
+	c := generichttp.NewDefaultConfig()
+	c.IgnoreSslVerification = true
+
+	trans, err := generichttp.New(false, &c)
+	s.Require().NoError(err)
+
+	s.storage, err = New(&config, trans)
+	s.Require().NoError(err)
+}
+
+func (s *AbsTest) TearDownSuite() {
+	s.server.Close()
+	logger.Unmute()
+}
+
+func (s *AbsTest) TestRoundTripWithETag() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func (s *AbsTest) TestRoundTripWithIfNoneMatchReturns304() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
+}
+
+func (s *AbsTest) TestRoundTripWithUpdatedETagReturns200() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func (s *AbsTest) TestRoundTripWithLastModifiedEnabled() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func (s *AbsTest) TestRoundTripWithIfModifiedSinceReturns304() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
+}
+
+func (s *AbsTest) TestRoundTripWithUpdatedLastModifiedReturns200() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func TestAzureTransport(t *testing.T) {
+	suite.Run(t, new(AbsTest))
+}

+ 67 - 0
storage/abs/config.go

@@ -0,0 +1,67 @@
+package azure
+
+import (
+	"errors"
+
+	"github.com/imgproxy/imgproxy/v3/ensure"
+	"github.com/imgproxy/imgproxy/v3/env"
+)
+
+// ConfigDesc holds the configuration descriptions for
+// Azure Blob Storage transport
+type ConfigDesc struct {
+	Name           env.Desc
+	Endpoint       env.Desc
+	Key            env.Desc
+	AllowedBuckets env.Desc
+	DeniedBuckets  env.Desc
+}
+
+// Config holds the configuration for Azure Blob Storage transport
+type Config struct {
+	Name           string   // Azure storage account name
+	Endpoint       string   // Azure Blob Storage endpoint URL
+	Key            string   // Azure storage account key
+	ReadOnly       bool     // Read-only access
+	AllowedBuckets []string // List of allowed buckets (containers)
+	DeniedBuckets  []string // List of denied buckets (containers)
+	desc           ConfigDesc
+}
+
+// NewDefaultConfig returns a new default configuration for Azure Blob Storage transport
+func NewDefaultConfig() Config {
+	return Config{
+		Name:           "",
+		Endpoint:       "",
+		Key:            "",
+		ReadOnly:       true,
+		AllowedBuckets: nil,
+		DeniedBuckets:  nil,
+	}
+}
+
+// LoadConfigFromEnv loads configuration from the global config package
+func LoadConfigFromEnv(desc ConfigDesc, c *Config) (*Config, error) {
+	c = ensure.Ensure(c, NewDefaultConfig)
+
+	err := errors.Join(
+		env.String(&c.Name, desc.Name),
+		env.String(&c.Endpoint, desc.Endpoint),
+		env.String(&c.Key, desc.Key),
+		env.StringSlice(&c.AllowedBuckets, desc.AllowedBuckets),
+		env.StringSlice(&c.DeniedBuckets, desc.DeniedBuckets),
+	)
+
+	c.desc = desc
+
+	return c, err
+}
+
+// Validate checks if the configuration is valid
+func (c *Config) Validate() error {
+	if len(c.Name) == 0 {
+		return c.desc.Name.ErrorEmpty()
+	}
+
+	return nil
+}

+ 18 - 0
storage/common/lists.go

@@ -0,0 +1,18 @@
+package common
+
+import (
+	"slices"
+)
+
+// IsBucketAllowed checks if the provided bucket is allowed based on allowed and denied buckets lists.
+func IsBucketAllowed(bucket string, allowedBuckets, deniedBuckets []string) bool {
+	if len(allowedBuckets) > 0 && !slices.Contains(allowedBuckets, bucket) {
+		return false
+	}
+
+	if slices.Contains(deniedBuckets, bucket) {
+		return false
+	}
+
+	return true
+}

+ 45 - 0
storage/common/notmodified.go

@@ -0,0 +1,45 @@
+package common
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+)
+
+// IsNotModified returns true if a file was not modified according to
+// request/response headers
+func IsNotModified(reqHeader http.Header, header http.Header) bool {
+	etag := header.Get(httpheaders.Etag)
+	ifNoneMatch := reqHeader.Get(httpheaders.IfNoneMatch)
+
+	if len(ifNoneMatch) > 0 && ifNoneMatch == etag {
+		return true
+	}
+
+	lastModifiedRaw := header.Get(httpheaders.LastModified)
+	if len(lastModifiedRaw) == 0 {
+		return false
+	}
+
+	ifModifiedSinceRaw := reqHeader.Get(httpheaders.IfModifiedSince)
+	if len(ifModifiedSinceRaw) == 0 {
+		return false
+	}
+
+	lastModified, err := time.Parse(http.TimeFormat, lastModifiedRaw)
+	if err != nil {
+		return false
+	}
+
+	ifModifiedSince, err := time.Parse(http.TimeFormat, ifModifiedSinceRaw)
+	if err != nil {
+		return false
+	}
+
+	if !ifModifiedSince.Before(lastModified) {
+		return true
+	}
+
+	return false
+}

+ 16 - 9
fetcher/transport/fs/config.go → storage/fs/config.go

@@ -8,37 +8,44 @@ import (
 	"github.com/imgproxy/imgproxy/v3/env"
 )
 
-var (
-	IMGPROXY_LOCAL_FILESYSTEM_ROOT = env.Describe("IMGPROXY_LOCAL_FILESYSTEM_ROOT", "path")
-)
+// ConfigDesc holds the configuration descriptions for
+// local file system storage
+type ConfigDesc struct {
+	Root env.Desc
+}
 
 // Config holds the configuration for local file system transport
 type Config struct {
-	Root string // Root directory for the local file system transport
+	Root     string // Root directory for the local file system transport
+	ReadOnly bool   // Read-only access
+	desc     ConfigDesc
 }
 
 // NewDefaultConfig returns a new default configuration for local file system transport
 func NewDefaultConfig() Config {
 	return Config{
-		Root: "",
+		Root:     "",
+		ReadOnly: true,
 	}
 }
 
 // LoadConfigFromEnv loads configuration from the global config package
-func LoadConfigFromEnv(c *Config) (*Config, error) {
+func LoadConfigFromEnv(desc ConfigDesc, c *Config) (*Config, error) {
 	c = ensure.Ensure(c, NewDefaultConfig)
 
-	err := env.String(&c.Root, IMGPROXY_LOCAL_FILESYSTEM_ROOT)
+	err := env.String(&c.Root, desc.Root)
+
+	c.desc = desc
 
 	return c, err
 }
 
 // Validate checks if the configuration is valid
 func (c *Config) Validate() error {
-	e := IMGPROXY_LOCAL_FILESYSTEM_ROOT
+	e := c.desc.Root
 
 	if c.Root == "" {
-		return e.ErrorEmpty()
+		return nil
 	}
 
 	stat, err := os.Stat(c.Root)

+ 0 - 0
fetcher/transport/fs/file_limiter.go → storage/fs/file_limiter.go


+ 157 - 0
storage/fs/fs.go

@@ -0,0 +1,157 @@
+package fs
+
+import (
+	"context"
+	"crypto/md5"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"io/fs"
+	"mime"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/httprange"
+	"github.com/imgproxy/imgproxy/v3/storage/common"
+	"github.com/imgproxy/imgproxy/v3/storage/response"
+)
+
+// Storage represents fs file storage
+type Storage struct {
+	fs             http.Dir
+	querySeparator string
+}
+
+// New creates a new Storage instance.
+func New(config *Config, qsSeparator string) (*Storage, error) {
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	return &Storage{fs: http.Dir(config.Root), querySeparator: qsSeparator}, nil
+}
+
+// GetObject retrieves an object from file system.
+func (s *Storage) GetObject(
+	ctx context.Context,
+	reqHeader http.Header,
+	_, name, _ string,
+) (*response.Object, error) {
+	// If either container or object name is empty, return 404
+	if len(name) == 0 {
+		return response.NewNotFound(
+			"invalid FS Storage URL: object name is empty",
+		), nil
+	}
+
+	name = "/" + name
+
+	// check that file exists
+	f, err := s.fs.Open(name)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return response.NewNotFound(fmt.Sprintf("%s doesn't exist", name)), nil
+		}
+
+		return nil, err
+	}
+
+	// check that file is not a directory
+	fi, err := f.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	if fi.IsDir() {
+		return response.NewNotFound(fmt.Sprintf("%s is directory", name)), nil
+	}
+
+	// file basic properties
+	size := fi.Size()
+	body := io.ReadCloser(f)
+
+	// result headers
+	header := make(http.Header)
+
+	// set default headers
+	header.Set(httpheaders.AcceptRanges, "bytes")
+
+	// try to detect content type from magic bytes or extension
+	if mimetype := detectContentType(f, fi); len(mimetype) > 0 {
+		header.Set(httpheaders.ContentType, mimetype)
+	}
+
+	// calculate Etag and Last-Modified date
+	etag := buildEtag(name, fi)
+	lastModified := fi.ModTime().Format(http.TimeFormat)
+
+	// try requested range
+	start, end, err := httprange.Parse(header.Get(httpheaders.Range))
+	switch {
+	case err != nil:
+		f.Close()
+		return response.NewInvalidRange(), nil
+
+	// Range requested: partial content should be returned
+	case end != 0:
+		if end < 0 {
+			end = size - 1
+		}
+
+		f.Seek(start, io.SeekStart)
+
+		size = end - start + 1
+		body = &fileLimiter{f: f, left: int(size)}
+		header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", start, end, fi.Size()))
+
+		return response.NewPartialContent(header, body), nil
+
+	// Full object requested
+	default:
+		header.Set(httpheaders.Etag, etag)
+		header.Set(httpheaders.LastModified, lastModified)
+	}
+
+	// Either size of a partial or the total
+	header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
+
+	// In case file was not modified, let's not return reader
+	if common.IsNotModified(reqHeader, header) {
+		f.Close()
+		return response.NewNotModified(header), nil
+	}
+
+	return response.NewOK(header, body), nil
+}
+
+func buildEtag(path string, fi fs.FileInfo) string {
+	tag := fmt.Sprintf("%s__%d__%d", path, fi.Size(), fi.ModTime().UnixNano())
+	hash := md5.Sum([]byte(tag))
+	return `"` + string(base64.RawURLEncoding.EncodeToString(hash[:])) + `"`
+}
+
+// detectContentType detects the content type of a file by mime or extension
+func detectContentType(f http.File, fi fs.FileInfo) string {
+	var (
+		tmp      [512]byte
+		mimetype string
+	)
+
+	if n, err := io.ReadFull(f, tmp[:]); err == nil || err == io.ErrUnexpectedEOF {
+		mimetype = http.DetectContentType(tmp[:n])
+	}
+
+	f.Seek(0, io.SeekStart) // rewind file position
+
+	if len(mimetype) == 0 || strings.HasPrefix(mimetype, "text/plain") || strings.HasPrefix(mimetype, "application/octet-stream") {
+		if m := mime.TypeByExtension(filepath.Ext(fi.Name())); len(m) > 0 {
+			mimetype = m
+		}
+	}
+
+	return mimetype
+}

+ 119 - 0
storage/fs/fs_test.go

@@ -0,0 +1,119 @@
+package fs
+
+import (
+	"net/http"
+	"os"
+	"path/filepath"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/suite"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+)
+
+type FsTestSuite struct {
+	suite.Suite
+
+	storage storage.Reader
+	etag    string
+	modTime time.Time
+}
+
+func (s *FsTestSuite) SetupSuite() {
+	tdp := testutil.NewTestDataProvider(s.T)
+	fsRoot := tdp.Root()
+
+	fi, err := os.Stat(filepath.Join(fsRoot, "test1.png"))
+	s.Require().NoError(err)
+
+	s.etag = buildEtag("/test1.png", fi)
+	s.modTime = fi.ModTime()
+
+	s.storage, _ = New(&Config{Root: fsRoot}, "?")
+}
+
+func (s *FsTestSuite) TestRoundTripWithETagEnabled() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+func (s *FsTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
+}
+
+func (s *FsTestSuite) TestRoundTripWithUpdatedETagReturns200() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func (s *FsTestSuite) TestRoundTripWithLastModifiedEnabledReturns200() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.modTime.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func (s *FsTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.modTime.Format(http.TimeFormat))
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
+}
+
+func (s *FsTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.modTime.Add(-time.Minute).Format(http.TimeFormat))
+
+	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+func TestFSTransport(t *testing.T) {
+	suite.Run(t, new(FsTestSuite))
+}

+ 59 - 0
storage/gcs/config.go

@@ -0,0 +1,59 @@
+package gcs
+
+import (
+	"errors"
+
+	"github.com/imgproxy/imgproxy/v3/ensure"
+	"github.com/imgproxy/imgproxy/v3/env"
+)
+
+// ConfigDesc holds the configuration descriptions
+// for Google Cloud Storage storage
+type ConfigDesc struct {
+	Key            env.Desc
+	Endpoint       env.Desc
+	AllowedBuckets env.Desc
+	DeniedBuckets  env.Desc
+}
+
+// Config holds the configuration for Google Cloud Storage transport
+type Config struct {
+	Key            string   // Google Cloud Storage service account key
+	Endpoint       string   // Google Cloud Storage endpoint URL
+	ReadOnly       bool     // Read-only access
+	AllowedBuckets []string // List of allowed buckets
+	DeniedBuckets  []string // List of denied buckets
+	desc           ConfigDesc
+}
+
+// NewDefaultConfig returns a new default configuration for Google Cloud Storage transport
+func NewDefaultConfig() Config {
+	return Config{
+		Key:            "",
+		Endpoint:       "",
+		ReadOnly:       true,
+		AllowedBuckets: nil,
+		DeniedBuckets:  nil,
+	}
+}
+
+// LoadConfigFromEnv loads configuration from the global config package
+func LoadConfigFromEnv(desc ConfigDesc, c *Config) (*Config, error) {
+	c = ensure.Ensure(c, NewDefaultConfig)
+
+	err := errors.Join(
+		env.String(&c.Key, desc.Key),
+		env.String(&c.Endpoint, desc.Endpoint),
+		env.StringSlice(&c.AllowedBuckets, desc.AllowedBuckets),
+		env.StringSlice(&c.DeniedBuckets, desc.DeniedBuckets),
+	)
+
+	c.desc = desc
+
+	return c, err
+}
+
+// Validate checks the configuration for errors
+func (c *Config) Validate() error {
+	return nil
+}

+ 196 - 0
storage/gcs/gcs.go

@@ -0,0 +1,196 @@
+package gcs
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+	"net/http"
+	"strconv"
+
+	"cloud.google.com/go/storage"
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/httprange"
+	"github.com/imgproxy/imgproxy/v3/ierrors"
+	"github.com/imgproxy/imgproxy/v3/storage/common"
+	"github.com/imgproxy/imgproxy/v3/storage/response"
+	"github.com/pkg/errors"
+	"google.golang.org/api/option"
+	raw "google.golang.org/api/storage/v1"
+	htransport "google.golang.org/api/transport/http"
+)
+
+// Storage represents Google Cloud Storage implementation
+type Storage struct {
+	config *Config
+	client *storage.Client
+}
+
+// New creates a new Storage instance.
+func New(
+	config *Config,
+	trans *http.Transport,
+	auth bool, // use authentication, should be false in tests
+) (*Storage, error) {
+	var client *storage.Client
+
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	opts := []option.ClientOption{
+		option.WithScopes(raw.DevstorageReadOnlyScope),
+	}
+
+	if !config.ReadOnly {
+		opts = append(opts, option.WithScopes(raw.DevstorageReadWriteScope))
+	}
+
+	if len(config.Key) > 0 {
+		opts = append(opts, option.WithCredentialsJSON([]byte(config.Key)))
+	}
+
+	if len(config.Endpoint) > 0 {
+		opts = append(opts, option.WithEndpoint(config.Endpoint))
+	}
+
+	if !auth {
+		slog.Warn("GCS storage: authentication disabled")
+		opts = append(opts, option.WithoutAuthentication())
+	}
+
+	htrans, err := htransport.NewTransport(context.TODO(), trans, opts...)
+	if err != nil {
+		return nil, errors.Wrap(err, "error creating GCS transport")
+	}
+
+	httpClient := &http.Client{Transport: htrans}
+	opts = append(opts, option.WithHTTPClient(httpClient))
+
+	client, err = storage.NewClient(context.Background(), opts...)
+
+	if err != nil {
+		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("Can't create GCS client"))
+	}
+
+	return &Storage{config, client}, nil
+}
+
+// GetObject retrieves an object from Azure cloud
+func (s *Storage) GetObject(
+	ctx context.Context,
+	reqHeader http.Header,
+	bucket, key, query string,
+) (*response.Object, error) {
+	// If either bucket or object key is empty, return 404
+	if len(bucket) == 0 || len(key) == 0 {
+		return response.NewNotFound(
+			"invalid GCS Storage URL: bucket name or object key are empty",
+		), nil
+	}
+
+	// Check if access to the bucket is allowed
+	if !common.IsBucketAllowed(bucket, s.config.AllowedBuckets, s.config.DeniedBuckets) {
+		return nil, fmt.Errorf("access to the GCS bucket %s is denied", bucket)
+	}
+
+	var (
+		reader *storage.Reader
+		size   int64
+	)
+
+	bkt := s.client.Bucket(bucket)
+	obj := bkt.Object(key)
+
+	if g, err := strconv.ParseInt(query, 10, 64); err == nil && g > 0 {
+		obj = obj.Generation(g)
+	}
+
+	header := make(http.Header)
+
+	// Try respond with partial: if that was a partial request,
+	// we either return error or Object
+	if r, err := s.tryRespondWithPartial(ctx, obj, reqHeader, header); r != nil || err != nil {
+		return r, err
+	}
+
+	attrs, aerr := obj.Attrs(ctx)
+	if aerr != nil {
+		return handleError(aerr)
+	}
+	header.Set(httpheaders.Etag, attrs.Etag)
+	header.Set(httpheaders.LastModified, attrs.Updated.Format(http.TimeFormat))
+
+	if common.IsNotModified(reqHeader, header) {
+		return response.NewNotModified(header), nil
+	}
+
+	var err error
+	reader, err = obj.NewReader(ctx)
+	if err != nil {
+		return handleError(err)
+	}
+
+	size = reader.Attrs.Size
+	setHeadersFromReader(header, reader, size)
+
+	return response.NewOK(header, reader), nil
+}
+
+// tryRespondWithPartial tries to respond with a partial object
+// if the Range header is set.
+func (s *Storage) tryRespondWithPartial(
+	ctx context.Context,
+	obj *storage.ObjectHandle,
+	reqHeader http.Header,
+	header http.Header,
+) (*response.Object, error) {
+	r := reqHeader.Get(httpheaders.Range)
+	if len(r) == 0 {
+		return nil, nil
+	}
+
+	start, end, err := httprange.Parse(r)
+	if err != nil {
+		return response.NewInvalidRange(), nil
+	}
+
+	if end == 0 {
+		return nil, nil
+	}
+
+	length := end - start + 1
+	if end < 0 {
+		length = -1
+	}
+
+	reader, err := obj.NewRangeReader(ctx, start, length)
+	if err != nil {
+		return nil, err
+	}
+
+	if end < 0 || end >= reader.Attrs.Size {
+		end = reader.Attrs.Size - 1
+	}
+
+	size := end - reader.Attrs.StartOffset + 1
+
+	header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", reader.Attrs.StartOffset, end, reader.Attrs.Size))
+	setHeadersFromReader(header, reader, size)
+
+	return response.NewPartialContent(header, reader), nil
+}
+
+func handleError(err error) (*response.Object, error) {
+	if err != storage.ErrBucketNotExist && err != storage.ErrObjectNotExist {
+		return nil, err
+	}
+
+	return response.NewNotFound(err.Error()), nil
+}
+
+func setHeadersFromReader(header http.Header, reader *storage.Reader, size int64) {
+	header.Set(httpheaders.AcceptRanges, "bytes")
+	header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
+	header.Set(httpheaders.ContentType, reader.Attrs.ContentType)
+	header.Set(httpheaders.CacheControl, reader.Attrs.CacheControl)
+}

+ 57 - 32
fetcher/transport/gcs/gcs_test.go → storage/gcs/gcs_test.go

@@ -12,6 +12,7 @@ import (
 
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
 )
 
 func getFreePort() (int, error) {
@@ -32,14 +33,12 @@ type GCSTestSuite struct {
 	suite.Suite
 
 	server       *fakestorage.Server
-	transport    http.RoundTripper
+	storage      storage.Reader
 	etag         string
 	lastModified time.Time
 }
 
 func (s *GCSTestSuite) SetupSuite() {
-	noAuth = true
-
 	// s.etag = "testetag"
 	s.lastModified, _ = time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
 
@@ -71,13 +70,13 @@ func (s *GCSTestSuite) SetupSuite() {
 	config := NewDefaultConfig()
 	config.Endpoint = s.server.PublicURL() + "/storage/v1/"
 
-	tc := generichttp.NewDefaultConfig()
-	tc.IgnoreSslVerification = true
+	c := generichttp.NewDefaultConfig()
+	c.IgnoreSslVerification = true
 
-	trans, gerr := generichttp.New(false, &tc)
-	s.Require().NoError(gerr)
+	trans, err := generichttp.New(false, &c)
+	s.Require().NoError(err)
 
-	s.transport, err = New(&config, trans, "?")
+	s.storage, err = New(&config, trans, false)
 	s.Require().NoError(err)
 }
 
@@ -86,56 +85,82 @@ func (s *GCSTestSuite) TearDownSuite() {
 }
 
 func (s *GCSTestSuite) TestRoundTripWithETagEnabled() {
-	request, _ := http.NewRequest("GET", "gcs://test/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.etag, response.Header.Get(httpheaders.Etag))
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *GCSTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	request, _ := http.NewRequest("GET", "gcs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfNoneMatch, s.etag)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
 }
 
 func (s *GCSTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	request, _ := http.NewRequest("GET", "gcs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *GCSTestSuite) TestRoundTripWithLastModifiedEnabled() {
-	request, _ := http.NewRequest("GET", "gcs://test/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Header.Get(httpheaders.LastModified))
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 func (s *GCSTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	request, _ := http.NewRequest("GET", "gcs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
 }
 
 func (s *GCSTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	request, _ := http.NewRequest("GET", "gcs://test/foo/test.png", nil)
-	request.Header.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 func TestGCSTransport(t *testing.T) {
 	suite.Run(t, new(GCSTestSuite))

+ 20 - 0
storage/reader.go

@@ -0,0 +1,20 @@
+package storage
+
+import (
+	"context"
+	"net/http"
+
+	"github.com/imgproxy/imgproxy/v3/storage/response"
+)
+
+// Reader represents a generic storage interface, which can read
+// objects from a storage backend.
+type Reader interface {
+	// GetObject retrieves an object from the storage and returns
+	// ObjectReader with the result.
+	GetObject(
+		ctx context.Context,
+		reqHeader http.Header,
+		bucket, key, query string,
+	) (*response.Object, error)
+}

+ 112 - 0
storage/response/object.go

@@ -0,0 +1,112 @@
+package response
+
+import (
+	"io"
+	"net/http"
+	"strconv"
+	"strings"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+)
+
+// Object represents a generic response for a storage object.
+// It can be converted to HTTP response or used as-is.
+type Object struct {
+	Status        int           // HTTP status code
+	Headers       http.Header   // Response headers harvested from the engine response
+	Body          io.ReadCloser // Response body reader
+	contentLength int64
+}
+
+// NewOK creates a new ObjectReader with a 200 OK status.
+func NewOK(headers http.Header, body io.ReadCloser) *Object {
+	return &Object{
+		Status:        http.StatusOK,
+		Headers:       headers,
+		Body:          body,
+		contentLength: -1, // is set in Response()
+	}
+}
+
+// NewPartialContent creates a new ObjectReader with a 206 Partial Content status.
+func NewPartialContent(headers http.Header, body io.ReadCloser) *Object {
+	return &Object{
+		Status:        http.StatusPartialContent,
+		Headers:       headers,
+		Body:          body,
+		contentLength: -1, // is set in Response()
+	}
+}
+
+// NewNotFound creates a new ObjectReader with a 404 Not Found status.
+func NewNotFound(message string) *Object {
+	return NewError(http.StatusNotFound, message)
+}
+
+// NewError creates a new ObjectReader with a custom status code
+func NewError(statusCode int, message string) *Object {
+	return &Object{
+		Status:        statusCode,
+		Body:          io.NopCloser(strings.NewReader(message)),
+		Headers:       http.Header{httpheaders.ContentType: {"text/plain"}},
+		contentLength: int64(len(message)),
+	}
+}
+
+// NewNotModified creates a new ObjectReader with a 304 Not Modified status.
+func NewNotModified(headers http.Header) *Object {
+	// Copy headers relevant to NotModified response only
+	nmHeaders := make(http.Header)
+	httpheaders.Copy(
+		headers,
+		nmHeaders,
+		[]string{httpheaders.Etag, httpheaders.LastModified},
+	)
+
+	return &Object{
+		Status:        http.StatusNotModified,
+		Headers:       nmHeaders,
+		contentLength: 0,
+	}
+}
+
+// NewInvalidRang creates a new ObjectReader with a 416 Range Not Satisfiable status.
+func NewInvalidRange() *Object {
+	return &Object{
+		Status:        http.StatusRequestedRangeNotSatisfiable,
+		contentLength: 0,
+	}
+}
+
+// ContentLength returns the content length of the response.
+func (r *Object) ContentLength() int64 {
+	if r.contentLength > 0 {
+		return r.contentLength
+	}
+
+	h := r.Headers.Get(httpheaders.ContentLength)
+	if len(h) > 0 {
+		p, err := strconv.ParseInt(h, 10, 64)
+		if err != nil {
+			return p
+		}
+	}
+
+	return -1
+}
+
+// Response converts ObjectReader to http.Response
+func (r *Object) Response(req *http.Request) *http.Response {
+	return &http.Response{
+		Status:        http.StatusText(r.Status),
+		StatusCode:    r.Status,
+		Proto:         "HTTP/1.0",
+		ProtoMajor:    1,
+		ProtoMinor:    0,
+		Header:        r.Headers,
+		Body:          r.Body,
+		Close:         true,
+		Request:       req,
+		ContentLength: r.ContentLength(),
+	}
+}

+ 74 - 0
storage/s3/config.go

@@ -0,0 +1,74 @@
+package s3
+
+import (
+	"errors"
+
+	"github.com/imgproxy/imgproxy/v3/ensure"
+	"github.com/imgproxy/imgproxy/v3/env"
+)
+
+// ConfigDesc holds the configuration descriptions for S3 storage
+type ConfigDesc struct {
+	Region                  env.Desc
+	Endpoint                env.Desc
+	EndpointUsePathStyle    env.Desc
+	AssumeRoleArn           env.Desc
+	AssumeRoleExternalID    env.Desc
+	DecryptionClientEnabled env.Desc
+	AllowedBuckets          env.Desc
+	DeniedBuckets           env.Desc
+}
+
+// Config holds the configuration for S3 transport
+type Config struct {
+	Region                  string   // AWS region for S3 (default: "")
+	Endpoint                string   // Custom endpoint for S3 (default: "")
+	EndpointUsePathStyle    bool     // Use path-style URLs for S3 (default: true)
+	AssumeRoleArn           string   // ARN for assuming an AWS role (default: "")
+	AssumeRoleExternalID    string   // External ID for assuming an AWS role (default: "")
+	DecryptionClientEnabled bool     // Enables S3 decryption client (default: false)
+	ReadOnly                bool     // Read-only access
+	AllowedBuckets          []string // List of allowed buckets (containers)
+	DeniedBuckets           []string // List of denied buckets (containers)
+	desc                    ConfigDesc
+}
+
+// NewDefaultConfig returns a new default configuration for S3 transport
+func NewDefaultConfig() Config {
+	return Config{
+		Region:                  "",
+		Endpoint:                "",
+		EndpointUsePathStyle:    true,
+		AssumeRoleArn:           "",
+		AssumeRoleExternalID:    "",
+		DecryptionClientEnabled: false,
+		ReadOnly:                true,
+		AllowedBuckets:          nil,
+		DeniedBuckets:           nil,
+	}
+}
+
+// LoadConfigFromEnv loads configuration from the global config package
+func LoadConfigFromEnv(desc ConfigDesc, c *Config) (*Config, error) {
+	c = ensure.Ensure(c, NewDefaultConfig)
+
+	err := errors.Join(
+		env.String(&c.Region, desc.Region),
+		env.String(&c.Endpoint, desc.Endpoint),
+		env.Bool(&c.EndpointUsePathStyle, desc.EndpointUsePathStyle),
+		env.String(&c.AssumeRoleArn, desc.AssumeRoleArn),
+		env.String(&c.AssumeRoleExternalID, desc.AssumeRoleExternalID),
+		env.Bool(&c.DecryptionClientEnabled, desc.DecryptionClientEnabled),
+		env.StringSlice(&c.AllowedBuckets, desc.AllowedBuckets),
+		env.StringSlice(&c.DeniedBuckets, desc.DeniedBuckets),
+	)
+
+	c.desc = desc
+
+	return c, err
+}
+
+// Validate checks the configuration for errors
+func (c *Config) Validate() error {
+	return nil
+}

+ 43 - 65
fetcher/transport/s3/s3.go → storage/s3/s3.go

@@ -3,7 +3,7 @@ package s3
 import (
 	"context"
 	"errors"
-	"io"
+	"fmt"
 	"net/http"
 	"strconv"
 	"strings"
@@ -20,17 +20,18 @@ import (
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/aws/aws-sdk-go-v2/service/sts"
 
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/common"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/ierrors"
+	"github.com/imgproxy/imgproxy/v3/storage/common"
+	"github.com/imgproxy/imgproxy/v3/storage/response"
 )
 
 type s3Client interface {
 	GetObject(ctx context.Context, input *s3.GetObjectInput, opts ...func(*s3.Options)) (*s3.GetObjectOutput, error)
 }
 
-// transport implements RoundTripper for the 's3' protocol.
-type transport struct {
+// Storage implements S3 Storage
+type Storage struct {
 	clientOptions []func(*s3.Options)
 
 	defaultClient s3Client
@@ -41,11 +42,11 @@ type transport struct {
 
 	mu sync.RWMutex
 
-	config         *Config
-	querySeparator string
+	config *Config
 }
 
-func New(config *Config, trans *http.Transport, querySeparator string) (http.RoundTripper, error) {
+// New creates a new S3 storage instance
+func New(config *Config, trans *http.Transport) (*Storage, error) {
 	if err := config.Validate(); err != nil {
 		return nil, err
 	}
@@ -96,33 +97,32 @@ func New(config *Config, trans *http.Transport, querySeparator string) (http.Rou
 		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("can't create S3 client"))
 	}
 
-	return &transport{
+	return &Storage{
 		clientOptions:   clientOptions,
 		defaultClient:   client,
 		defaultConfig:   conf,
 		clientsByRegion: map[string]s3Client{conf.Region: client},
 		clientsByBucket: make(map[string]s3Client),
 		config:          config,
-		querySeparator:  querySeparator,
 	}, nil
 }
 
-func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
-	bucket, key, query := common.GetBucketAndKey(req.URL, t.querySeparator)
-
+// GetObject retrieves an object from Azure cloud
+func (s *Storage) GetObject(
+	ctx context.Context,
+	reqHeader http.Header,
+	bucket, key, query string,
+) (*response.Object, error) {
+	// If either bucket or object key is empty, return 404
 	if len(bucket) == 0 || len(key) == 0 {
-		body := strings.NewReader("Invalid S3 URL: bucket name or object key is empty")
-		return &http.Response{
-			StatusCode:    http.StatusNotFound,
-			Proto:         "HTTP/1.0",
-			ProtoMajor:    1,
-			ProtoMinor:    0,
-			Header:        http.Header{httpheaders.ContentType: {"text/plain"}},
-			ContentLength: int64(body.Len()),
-			Body:          io.NopCloser(body),
-			Close:         false,
-			Request:       req,
-		}, nil
+		return response.NewNotFound(
+			"invalid S3 Storage URL: bucket name or object key are empty",
+		), nil
+	}
+
+	// Check if access to the container is allowed
+	if !common.IsBucketAllowed(bucket, s.config.AllowedBuckets, s.config.DeniedBuckets) {
+		return nil, fmt.Errorf("access to the S3 bucket %s is denied", bucket)
 	}
 
 	input := &s3.GetObjectInput{
@@ -134,16 +134,14 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
 		input.VersionId = aws.String(query)
 	}
 
-	statusCode := http.StatusOK
-
-	if r := req.Header.Get("Range"); len(r) != 0 {
+	if r := reqHeader.Get(httpheaders.Range); len(r) != 0 {
 		input.Range = aws.String(r)
 	} else {
-		if ifNoneMatch := req.Header.Get("If-None-Match"); len(ifNoneMatch) > 0 {
+		if ifNoneMatch := reqHeader.Get(httpheaders.IfNoneMatch); len(ifNoneMatch) > 0 {
 			input.IfNoneMatch = aws.String(ifNoneMatch)
 		}
 
-		if ifModifiedSince := req.Header.Get("If-Modified-Since"); len(ifModifiedSince) > 0 {
+		if ifModifiedSince := reqHeader.Get(httpheaders.IfModifiedSince); len(ifModifiedSince) > 0 {
 			parsedIfModifiedSince, err := time.Parse(http.TimeFormat, ifModifiedSince)
 			if err == nil {
 				input.IfModifiedSince = &parsedIfModifiedSince
@@ -151,9 +149,9 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
 		}
 	}
 
-	client := t.getBucketClient(bucket)
+	client := s.getBucketClient(bucket)
 
-	output, err := client.GetObject(req.Context(), input)
+	output, err := client.GetObject(ctx, input)
 
 	defer func() {
 		if err != nil && output != nil && output.Body != nil {
@@ -165,17 +163,17 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
 		// Check if the error is the region mismatch error.
 		// If so, create a new client with the correct region and retry the request.
 		if region := regionFromError(err); len(region) != 0 {
-			client, err = t.createBucketClient(bucket, region)
+			client, err = s.createBucketClient(bucket, region)
 			if err != nil {
-				return handleError(req, err)
+				return handleError(err)
 			}
 
-			output, err = client.GetObject(req.Context(), input)
+			output, err = client.GetObject(ctx, input)
 		}
 	}
 
 	if err != nil {
-		return handleError(req, err)
+		return handleError(err)
 	}
 
 	contentLength := int64(-1)
@@ -183,11 +181,11 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
 		contentLength = *output.ContentLength
 	}
 
-	if t.config.DecryptionClientEnabled {
+	if s.config.DecryptionClientEnabled {
 		if unencryptedContentLength := output.Metadata["X-Amz-Meta-X-Amz-Unencrypted-Content-Length"]; len(unencryptedContentLength) != 0 {
 			cl, err := strconv.ParseInt(unencryptedContentLength, 10, 64)
 			if err != nil {
-				handleError(req, err)
+				return handleError(err)
 			}
 			contentLength = cl
 		}
@@ -220,23 +218,13 @@ func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
 	}
 	if output.ContentRange != nil {
 		header.Set(httpheaders.ContentRange, *output.ContentRange)
-		statusCode = http.StatusPartialContent
-	}
-
-	return &http.Response{
-		StatusCode:    statusCode,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        header,
-		ContentLength: contentLength,
-		Body:          output.Body,
-		Close:         true,
-		Request:       req,
-	}, nil
+		return response.NewPartialContent(header, output.Body), nil
+	}
+
+	return response.NewOK(header, output.Body), nil
 }
 
-func (t *transport) getBucketClient(bucket string) s3Client {
+func (t *Storage) getBucketClient(bucket string) s3Client {
 	var client s3Client
 
 	func() {
@@ -252,7 +240,7 @@ func (t *transport) getBucketClient(bucket string) s3Client {
 	return t.defaultClient
 }
 
-func (t *transport) createBucketClient(bucket, region string) (s3Client, error) {
+func (t *Storage) createBucketClient(bucket, region string) (s3Client, error) {
 	t.mu.Lock()
 	defer t.mu.Unlock()
 
@@ -311,7 +299,7 @@ func regionFromError(err error) string {
 	return rerr.Response.Header.Get("X-Amz-Bucket-Region")
 }
 
-func handleError(req *http.Request, err error) (*http.Response, error) {
+func handleError(err error) (*response.Object, error) {
 	var rerr *awsHttp.ResponseError
 	if !errors.As(err, &rerr) {
 		return nil, ierrors.Wrap(err, 0)
@@ -321,15 +309,5 @@ func handleError(req *http.Request, err error) (*http.Response, error) {
 		return nil, ierrors.Wrap(err, 0)
 	}
 
-	return &http.Response{
-		StatusCode:    rerr.Response.StatusCode,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        http.Header{"Content-Type": {"text/plain"}},
-		ContentLength: int64(len(err.Error())),
-		Body:          io.NopCloser(strings.NewReader(err.Error())),
-		Close:         false,
-		Request:       req,
-	}, nil
+	return response.NewError(rerr.Response.StatusCode, err.Error()), nil
 }

+ 59 - 32
fetcher/transport/s3/s3_test.go → storage/s3/s3_test.go

@@ -16,13 +16,15 @@ import (
 	"github.com/stretchr/testify/suite"
 
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
 )
 
 type S3TestSuite struct {
 	suite.Suite
 
 	server       *httptest.Server
-	transport    http.RoundTripper
+	storage      storage.Reader
 	etag         string
 	lastModified time.Time
 }
@@ -39,20 +41,19 @@ func (s *S3TestSuite) SetupSuite() {
 	os.Setenv("AWS_ACCESS_KEY_ID", "Foo")
 	os.Setenv("AWS_SECRET_ACCESS_KEY", "Bar")
 
-	tc := generichttp.NewDefaultConfig()
-	tc.IgnoreSslVerification = true
+	c := generichttp.NewDefaultConfig()
+	c.IgnoreSslVerification = true
 
-	trans, gerr := generichttp.New(false, &tc)
-	s.Require().NoError(gerr)
+	trans, err := generichttp.New(false, &c)
+	s.Require().NoError(err)
 
-	var err error
-	s.transport, err = New(&config, trans, "?")
+	s.storage, err = New(&config, trans)
 	s.Require().NoError(err)
 
 	err = backend.CreateBucket("test")
 	s.Require().NoError(err)
 
-	svc := s.transport.(*transport).defaultClient
+	svc := s.storage.(*Storage).defaultClient
 	s.Require().NotNil(svc)
 	s.Require().IsType(&s3.Client{}, svc)
 
@@ -81,57 +82,83 @@ func (s *S3TestSuite) TearDownSuite() {
 }
 
 func (s *S3TestSuite) TestRoundTripWithETagEnabled() {
-	request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.etag, response.Header.Get("ETag"))
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *S3TestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil)
-	request.Header.Set("If-None-Match", s.etag)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
 }
 
 func (s *S3TestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil)
-	request.Header.Set("If-None-Match", s.etag+"_wrong")
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *S3TestSuite) TestRoundTripWithLastModifiedEnabled() {
-	request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Header.Get("Last-Modified"))
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *S3TestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil)
-	request.Header.Set("If-Modified-Since", s.lastModified.Format(http.TimeFormat))
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
 }
 
 func (s *S3TestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil)
-	request.Header.Set("If-Modified-Since", s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func TestS3Transport(t *testing.T) {

+ 82 - 0
storage/swift/config.go

@@ -0,0 +1,82 @@
+package swift
+
+import (
+	"errors"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/ensure"
+	"github.com/imgproxy/imgproxy/v3/env"
+)
+
+// ConfigDesc holds the configuration descriptions for Swift storage
+type ConfigDesc struct {
+	Username       env.Desc
+	APIKey         env.Desc
+	AuthURL        env.Desc
+	Domain         env.Desc
+	Tenant         env.Desc
+	AuthVersion    env.Desc
+	ConnectTimeout env.Desc
+	Timeout        env.Desc
+	AllowedBuckets env.Desc
+	DeniedBuckets  env.Desc
+}
+
+// Config holds the configuration for Swift storage
+type Config struct {
+	Username       string        // Username for Swift authentication
+	APIKey         string        // API key for Swift authentication
+	AuthURL        string        // Authentication URL for Swift
+	Domain         string        // Domain for Swift authentication
+	Tenant         string        // Tenant for Swift authentication
+	AuthVersion    int           // Authentication version for Swift
+	ConnectTimeout time.Duration // Connection timeout for Swift
+	Timeout        time.Duration // Request timeout for Swift
+	ReadOnly       bool          // Read-only access
+	AllowedBuckets []string      // List of allowed buckets (containers)
+	DeniedBuckets  []string      // List of denied buckets (containers)
+	desc           ConfigDesc
+}
+
+// NewDefaultConfig returns a new default configuration for Swift storage
+func NewDefaultConfig() Config {
+	return Config{
+		Username:       "",
+		APIKey:         "",
+		AuthURL:        "",
+		Domain:         "",
+		Tenant:         "",
+		AuthVersion:    0,
+		ConnectTimeout: 10 * time.Second,
+		Timeout:        60 * time.Second,
+		AllowedBuckets: nil,
+		DeniedBuckets:  nil,
+	}
+}
+
+// LoadConfigFromEnv loads configuration from the global config package
+func LoadConfigFromEnv(desc ConfigDesc, c *Config) (*Config, error) {
+	c = ensure.Ensure(c, NewDefaultConfig)
+
+	err := errors.Join(
+		env.String(&c.Username, desc.Username),
+		env.String(&c.APIKey, desc.APIKey),
+		env.String(&c.AuthURL, desc.AuthURL),
+		env.String(&c.Domain, desc.Domain),
+		env.String(&c.Tenant, desc.Tenant),
+		env.Int(&c.AuthVersion, desc.AuthVersion),
+		env.Duration(&c.ConnectTimeout, desc.ConnectTimeout),
+		env.Duration(&c.Timeout, desc.Timeout),
+		env.StringSlice(&c.AllowedBuckets, desc.AllowedBuckets),
+		env.StringSlice(&c.DeniedBuckets, desc.DeniedBuckets),
+	)
+
+	c.desc = desc
+
+	return c, err
+}
+
+// Validate checks the configuration for errors
+func (c *Config) Validate() error {
+	return nil
+}

+ 120 - 0
storage/swift/swift.go

@@ -0,0 +1,120 @@
+package swift
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+
+	"github.com/ncw/swift/v2"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage/common"
+	"github.com/imgproxy/imgproxy/v3/storage/response"
+)
+
+// Storage implements Openstack Swift storage.
+type Storage struct {
+	config     *Config
+	connection *swift.Connection
+}
+
+// New creates a new Swift storage with the provided configuration.
+func New(
+	ctx context.Context,
+	config *Config,
+	trans *http.Transport,
+) (*Storage, error) {
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	c := &swift.Connection{
+		UserName:       config.Username,
+		ApiKey:         config.APIKey,
+		AuthUrl:        config.AuthURL,
+		AuthVersion:    config.AuthVersion,
+		Domain:         config.Domain, // v3 auth only
+		Tenant:         config.Tenant, // v2 auth only
+		Timeout:        config.Timeout,
+		ConnectTimeout: config.ConnectTimeout,
+		Transport:      trans,
+	}
+
+	err := c.Authenticate(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("swift authentication failed: %v", err)
+	}
+
+	return &Storage{
+		config:     config,
+		connection: c,
+	}, nil
+}
+
+// GetObject retrieves an object from Swift storage.
+func (s *Storage) GetObject(
+	ctx context.Context, reqHeader http.Header, bucket, name, _ string,
+) (*response.Object, error) {
+	// If either bucket or object key is empty, return 404
+	if len(bucket) == 0 || len(name) == 0 {
+		return response.NewNotFound(
+			"invalid Swift URL: bucket name or object name are empty",
+		), nil
+	}
+
+	// Check if access to the container is allowed
+	if !common.IsBucketAllowed(bucket, s.config.AllowedBuckets, s.config.DeniedBuckets) {
+		return nil, fmt.Errorf("access to the Swift bucket %s is denied", bucket)
+	}
+
+	// Copy if-modified-since, if-none-match and range headers from
+	// the original request. They act as the parameters for this storage.
+	h := make(swift.Headers)
+
+	for _, k := range []string{
+		httpheaders.Range,           // Range for partial requests
+		httpheaders.IfNoneMatch,     // If-None-Match for caching
+		httpheaders.IfModifiedSince, // If-Modified-Since for caching
+	} {
+		v := reqHeader.Get(k)
+		if len(v) > 0 {
+			h[k] = v
+		}
+	}
+
+	// Fetch the object from Swift
+	object, objectHeaders, err := s.connection.ObjectOpen(ctx, bucket, name, false, h)
+
+	// Convert Swift response headers to normal headers (if any)
+	header := make(http.Header)
+	for k, v := range objectHeaders {
+		header.Set(k, v)
+	}
+
+	if err != nil {
+		// Handle not found errors gracefully
+		if errors.Is(err, swift.ObjectNotFound) || errors.Is(err, swift.ContainerNotFound) {
+			return response.NewNotFound(err.Error()), nil
+		}
+
+		// Same for NotModified
+		if errors.Is(err, swift.NotModified) {
+			return response.NewNotModified(header), nil
+		}
+
+		return nil, fmt.Errorf("error opening swift object: %v", err)
+	}
+
+	// Range header: means partial content
+	partial := len(reqHeader.Get(httpheaders.Range)) > 0
+
+	// By default, Swift storage handles this.
+	// Just in case, let's double check.
+	if !partial && common.IsNotModified(reqHeader, header) {
+		object.Close()
+		return response.NewNotModified(header), nil
+	}
+
+	return response.NewOK(header, object), nil
+}

+ 66 - 37
fetcher/transport/swift/swift_test.go → storage/swift/swift_test.go

@@ -11,6 +11,8 @@ import (
 	"github.com/stretchr/testify/suite"
 
 	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
 )
 
 const (
@@ -21,7 +23,7 @@ const (
 type SwiftTestSuite struct {
 	suite.Suite
 	server       *swifttest.SwiftServer
-	transport    http.RoundTripper
+	storage      storage.Reader
 	etag         string
 	lastModified time.Time
 }
@@ -38,14 +40,13 @@ func (s *SwiftTestSuite) SetupSuite() {
 
 	s.setupTestFile(&config)
 
-	tc := generichttp.NewDefaultConfig()
-	tc.IgnoreSslVerification = true
+	c := generichttp.NewDefaultConfig()
+	c.IgnoreSslVerification = true
 
-	trans, gerr := generichttp.New(false, &tc)
-	s.Require().NoError(gerr)
+	trans, err := generichttp.New(false, &c)
+	s.Require().NoError(err)
 
-	var err error
-	s.transport, err = New(&config, trans, "?")
+	s.storage, err = New(s.T().Context(), &config, trans)
 	s.Require().NoError(err, "failed to initialize swift transport")
 }
 
@@ -90,73 +91,101 @@ func (s *SwiftTestSuite) TearDownSuite() {
 }
 
 func (s *SwiftTestSuite) TestRoundTripReturns404WhenObjectNotFound() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/not-here.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/not-here.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(404, response.StatusCode)
+	s.Require().Equal(404, response.Status)
 }
 
 func (s *SwiftTestSuite) TestRoundTripReturns404WhenContainerNotFound() {
-	request, _ := http.NewRequest("GET", "swift://invalid/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "invalid", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(404, response.StatusCode)
+	s.Require().Equal(404, response.Status)
 }
 
 func (s *SwiftTestSuite) TestRoundTripWithETagEnabled() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.etag, response.Header.Get("ETag"))
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *SwiftTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/test.png", nil)
-	request.Header.Set("If-None-Match", s.etag)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
 }
 
 func (s *SwiftTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/test.png", nil)
-	request.Header.Set("If-None-Match", s.etag+"_wrong")
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *SwiftTestSuite) TestRoundTripWithLastModifiedEnabled() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/test.png", nil)
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(200, response.StatusCode)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Header.Get("Last-Modified"))
+	s.Require().Equal(200, response.Status)
+	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func (s *SwiftTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/test.png", nil)
-	request.Header.Set("If-Modified-Since", s.lastModified.Format(http.TimeFormat))
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.StatusCode)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
 }
 
 func (s *SwiftTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	request, _ := http.NewRequest("GET", "swift://test/foo/test.png", nil)
-	request.Header.Set("If-Modified-Since", s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
 
-	response, err := s.transport.RoundTrip(request)
+	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
 	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.StatusCode)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
 }
 
 func TestSwiftTransport(t *testing.T) {