Explorar el Código

Adds range checks to storage tests (#1566)

* Adds range checks to storage test

* Storage tests unified
Victor Sokolov hace 3 meses
padre
commit
d5370d8077

+ 1 - 29
fetcher/transport/config.go

@@ -142,33 +142,5 @@ func LoadConfigFromEnv(c *Config) (*Config, error) {
 }
 }
 
 
 func (c *Config) Validate() error {
 func (c *Config) Validate() error {
-	// Since all the subsequent configuration files are part of
-	// the base config, we need to forward validations downstream.
-	//
-	// We assume that transport is going to use all the transports
-	// at once when created so we make an exception here and move
-	// specific validator calls level up.
-	var err []error
-
-	if c.Local.Root != "" {
-		err = append(err, c.Local.Validate())
-	}
-
-	if c.ABSEnabled {
-		err = append(err, c.ABS.Validate())
-	}
-
-	if c.GCSEnabled {
-		err = append(err, c.GCS.Validate())
-	}
-
-	if c.S3Enabled {
-		err = append(err, c.S3.Validate())
-	}
-
-	if c.SwiftEnabled {
-		err = append(err, c.Swift.Validate())
-	}
-
-	return errors.Join(err...)
+	return nil
 }
 }

+ 7 - 7
fetcher/transport/round_tripper_test.go

@@ -10,15 +10,15 @@ import (
 	"github.com/stretchr/testify/suite"
 	"github.com/stretchr/testify/suite"
 
 
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/storage/response"
+	"github.com/imgproxy/imgproxy/v3/storage"
 )
 )
 
 
 // mockStorage is a simple mock implementation of storage.Reader
 // mockStorage is a simple mock implementation of storage.Reader
 type mockStorage struct {
 type mockStorage struct {
-	getObject func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error)
+	getObject func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*storage.ObjectReader, error)
 }
 }
 
 
-func (m *mockStorage) GetObject(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error) {
+func (m *mockStorage) GetObject(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*storage.ObjectReader, error) {
 	if m.getObject == nil {
 	if m.getObject == nil {
 		return nil, nil
 		return nil, nil
 	}
 	}
@@ -33,7 +33,7 @@ type RoundTripperTestSuite struct {
 func (s *RoundTripperTestSuite) TestRoundTripperSuccess() {
 func (s *RoundTripperTestSuite) TestRoundTripperSuccess() {
 	// Create mock storage that returns a successful response
 	// Create mock storage that returns a successful response
 	mock := &mockStorage{
 	mock := &mockStorage{
-		getObject: func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error) {
+		getObject: func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*storage.ObjectReader, error) {
 			s.Equal("test-bucket", bucket)
 			s.Equal("test-bucket", bucket)
 			s.Equal("test-key", key)
 			s.Equal("test-key", key)
 			s.Equal("version=123", query)
 			s.Equal("version=123", query)
@@ -43,7 +43,7 @@ func (s *RoundTripperTestSuite) TestRoundTripperSuccess() {
 			headers.Set(httpheaders.Etag, "test-etag")
 			headers.Set(httpheaders.Etag, "test-etag")
 
 
 			body := io.NopCloser(strings.NewReader("test data"))
 			body := io.NopCloser(strings.NewReader("test data"))
-			return response.NewOK(headers, body), nil
+			return storage.NewObjectOK(headers, body), nil
 		},
 		},
 	}
 	}
 
 
@@ -73,8 +73,8 @@ func (s *RoundTripperTestSuite) TestRoundTripperSuccess() {
 func (s *RoundTripperTestSuite) TestRoundTripperNotFound() {
 func (s *RoundTripperTestSuite) TestRoundTripperNotFound() {
 	// Create mock storage that returns 404
 	// Create mock storage that returns 404
 	mock := &mockStorage{
 	mock := &mockStorage{
-		getObject: func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*response.Object, error) {
-			return response.NewNotFound("object not found"), nil
+		getObject: func(ctx context.Context, reqHeader http.Header, bucket, key, query string) (*storage.ObjectReader, error) {
+			return storage.NewObjectNotFound("object not found"), nil
 		},
 		},
 	}
 	}
 
 

+ 2 - 2
fetcher/transport/transport.go

@@ -82,7 +82,7 @@ func (t *Transport) registerAllProtocols() error {
 	}
 	}
 
 
 	if t.config.Local.Root != "" {
 	if t.config.Local.Root != "" {
-		tr, err := fsStorage.New(&t.config.Local, sep)
+		tr, err := fsStorage.New(&t.config.Local)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -98,7 +98,7 @@ func (t *Transport) registerAllProtocols() error {
 	}
 	}
 
 
 	if t.config.GCSEnabled {
 	if t.config.GCSEnabled {
-		tr, err := gcsStorage.New(&t.config.GCS, transp, true)
+		tr, err := gcsStorage.New(&t.config.GCS, transp)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}

+ 0 - 145
storage/abs/abs_test.go

@@ -1,145 +0,0 @@
-package azure
-
-import (
-	"net/http"
-	"net/http/httptest"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/logger"
-	"github.com/imgproxy/imgproxy/v3/storage"
-)
-
-type AbsTest struct {
-	suite.Suite
-
-	server       *httptest.Server // TODO: use testutils.TestServer
-	storage      storage.Reader
-	etag         string
-	lastModified time.Time
-}
-
-func (s *AbsTest) SetupSuite() {
-	data := make([]byte, 32)
-
-	logger.Mute()
-
-	s.etag = "testetag"
-	s.lastModified, _ = time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
-
-	s.server = httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
-		s.Equal("/test/foo/test.png", r.URL.Path)
-
-		rw.Header().Set(httpheaders.Etag, s.etag)
-		rw.Header().Set(httpheaders.LastModified, s.lastModified.Format(http.TimeFormat))
-		rw.WriteHeader(200)
-		rw.Write(data)
-	}))
-
-	config := NewDefaultConfig()
-	config.Endpoint = s.server.URL
-	config.Name = "testname"
-	config.Key = "dGVzdGtleQ=="
-
-	c := generichttp.NewDefaultConfig()
-	c.IgnoreSslVerification = true
-
-	trans, err := generichttp.New(false, &c)
-	s.Require().NoError(err)
-
-	s.storage, err = New(&config, trans)
-	s.Require().NoError(err)
-}
-
-func (s *AbsTest) TearDownSuite() {
-	s.server.Close()
-	logger.Unmute()
-}
-
-func (s *AbsTest) TestRoundTripWithETag() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *AbsTest) TestRoundTripWithIfNoneMatchReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *AbsTest) TestRoundTripWithUpdatedETagReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *AbsTest) TestRoundTripWithLastModifiedEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *AbsTest) TestRoundTripWithIfModifiedSinceReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *AbsTest) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func TestAzureTransport(t *testing.T) {
-	suite.Run(t, new(AbsTest))
-}

+ 1 - 1
storage/abs/config.go

@@ -1,4 +1,4 @@
-package azure
+package abs
 
 
 import (
 import (
 	"errors"
 	"errors"

+ 9 - 71
storage/abs/abs.go → storage/abs/reader.go

@@ -1,92 +1,30 @@
-package azure
+package abs
 
 
 import (
 import (
 	"context"
 	"context"
 	"fmt"
 	"fmt"
 	"net/http"
 	"net/http"
-	"net/url"
 	"strconv"
 	"strconv"
 
 
 	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
 	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
-	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
-	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
 
 
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httprange"
 	"github.com/imgproxy/imgproxy/v3/httprange"
+	"github.com/imgproxy/imgproxy/v3/storage"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
-	"github.com/imgproxy/imgproxy/v3/storage/response"
 )
 )
 
 
-// Storage represents Azure Storage
-type Storage struct {
-	config *Config
-	client *azblob.Client
-}
-
-// New creates a new Azure Storage instance
-func New(config *Config, trans *http.Transport) (*Storage, error) {
-	var (
-		client                 *azblob.Client
-		sharedKeyCredential    *azblob.SharedKeyCredential
-		defaultAzureCredential *azidentity.DefaultAzureCredential
-		err                    error
-	)
-
-	if err = config.Validate(); err != nil {
-		return nil, err
-	}
-
-	endpoint := config.Endpoint
-	if len(endpoint) == 0 {
-		endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", config.Name)
-	}
-
-	endpointURL, err := url.Parse(endpoint)
-	if err != nil {
-		return nil, err
-	}
-
-	opts := azblob.ClientOptions{
-		ClientOptions: policy.ClientOptions{
-			Transport: &http.Client{Transport: trans},
-		},
-	}
-
-	if len(config.Key) > 0 {
-		sharedKeyCredential, err = azblob.NewSharedKeyCredential(config.Name, config.Key)
-		if err != nil {
-			return nil, err
-		}
-
-		client, err = azblob.NewClientWithSharedKeyCredential(endpointURL.String(), sharedKeyCredential, &opts)
-	} else {
-		defaultAzureCredential, err = azidentity.NewDefaultAzureCredential(nil)
-		if err != nil {
-			return nil, err
-		}
-
-		client, err = azblob.NewClient(endpointURL.String(), defaultAzureCredential, &opts)
-	}
-
-	if err != nil {
-		return nil, err
-	}
-
-	return &Storage{config, client}, nil
-}
-
 // GetObject retrieves an object from Azure cloud
 // GetObject retrieves an object from Azure cloud
 func (s *Storage) GetObject(
 func (s *Storage) GetObject(
 	ctx context.Context,
 	ctx context.Context,
 	reqHeader http.Header,
 	reqHeader http.Header,
 	container, key, _ string,
 	container, key, _ string,
-) (*response.Object, error) {
+) (*storage.ObjectReader, error) {
 	// If either container or object name is empty, return 404
 	// If either container or object name is empty, return 404
 	if len(container) == 0 || len(key) == 0 {
 	if len(container) == 0 || len(key) == 0 {
-		return response.NewNotFound(
+		return storage.NewObjectNotFound(
 			"invalid Azure Storage URL: container name or object key are empty",
 			"invalid Azure Storage URL: container name or object key are empty",
 		), nil
 		), nil
 	}
 	}
@@ -102,7 +40,7 @@ func (s *Storage) GetObject(
 	// Check if this is partial request
 	// Check if this is partial request
 	partial, err := parseRangeHeader(opts, reqHeader)
 	partial, err := parseRangeHeader(opts, reqHeader)
 	if err != nil {
 	if err != nil {
-		return response.NewInvalidRange(), nil
+		return storage.NewObjectInvalidRange(), nil
 	}
 	}
 
 
 	// Open the object
 	// Open the object
@@ -111,7 +49,7 @@ func (s *Storage) GetObject(
 		if azError, ok := err.(*azcore.ResponseError); !ok || azError.StatusCode < 100 || azError.StatusCode == 301 {
 		if azError, ok := err.(*azcore.ResponseError); !ok || azError.StatusCode < 100 || azError.StatusCode == 301 {
 			return nil, err
 			return nil, err
 		} else {
 		} else {
-			return response.NewError(azError.StatusCode, azError.Error()), nil
+			return storage.NewObjectError(azError.StatusCode, azError.Error()), nil
 		}
 		}
 	}
 	}
 
 
@@ -132,7 +70,7 @@ func (s *Storage) GetObject(
 			result.Body.Close()
 			result.Body.Close()
 		}
 		}
 
 
-		return response.NewNotModified(header), nil
+		return storage.NewObjectNotModified(header), nil
 	}
 	}
 
 
 	// Pass through important headers
 	// Pass through important headers
@@ -156,10 +94,10 @@ func (s *Storage) GetObject(
 
 
 	// If the request was partial, let's respond with partial
 	// If the request was partial, let's respond with partial
 	if partial {
 	if partial {
-		return response.NewPartialContent(header, result.Body), nil
+		return storage.NewObjectPartialContent(header, result.Body), nil
 	}
 	}
 
 
-	return response.NewOK(header, result.Body), nil
+	return storage.NewObjectOK(header, result.Body), nil
 }
 }
 
 
 func parseRangeHeader(opts *blob.DownloadStreamOptions, reqHeader http.Header) (bool, error) {
 func parseRangeHeader(opts *blob.DownloadStreamOptions, reqHeader http.Header) (bool, error) {

+ 52 - 0
storage/abs/reader_test.go

@@ -0,0 +1,52 @@
+package abs
+
+import (
+	"crypto/rand"
+	"testing"
+
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/storage/testsuite"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+const (
+	testDataSize = 128
+)
+
+type ReaderTestSuite struct {
+	testsuite.ReaderSuite
+
+	absStorage testutil.LazyObj[*absStorageWrapper]
+}
+
+func (s *ReaderTestSuite) SetupSuite() {
+	s.ReaderSuite.SetupSuite()
+
+	// Generate random test data for content verification
+	s.TestData = make([]byte, testDataSize)
+	rand.Read(s.TestData)
+
+	s.TestContainer = "test-container"
+	s.TestObjectKey = "test-object.txt"
+
+	// Initialize ABS storage
+	s.absStorage, _ = NewLazySuiteStorage(s.Lazy())
+
+	s.Storage, _ = testutil.NewLazySuiteObj(s,
+		func() (storage.Reader, error) {
+			return s.absStorage().Storage, nil
+		},
+	)
+}
+
+func (s *ReaderTestSuite) SetupTest() {
+	// Recreate ABS blob for each test
+	abs := s.absStorage().Client().ServiceClient().NewContainerClient(s.TestContainer).NewBlockBlobClient(s.TestObjectKey)
+	_, err := abs.UploadBuffer(s.T().Context(), s.TestData, nil)
+	s.Require().NoError(err)
+}
+
+func TestReader(t *testing.T) {
+	suite.Run(t, new(ReaderTestSuite))
+}

+ 69 - 0
storage/abs/storage.go

@@ -0,0 +1,69 @@
+package abs
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+)
+
+// Storage represents Azure Storage
+type Storage struct {
+	config *Config
+	client *azblob.Client
+}
+
+// New creates a new Azure Storage instance
+func New(config *Config, trans *http.Transport) (*Storage, error) {
+	var (
+		client                 *azblob.Client
+		sharedKeyCredential    *azblob.SharedKeyCredential
+		defaultAzureCredential *azidentity.DefaultAzureCredential
+		err                    error
+	)
+
+	if err = config.Validate(); err != nil {
+		return nil, err
+	}
+
+	endpoint := config.Endpoint
+	if len(endpoint) == 0 {
+		endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", config.Name)
+	}
+
+	endpointURL, err := url.Parse(endpoint)
+	if err != nil {
+		return nil, err
+	}
+
+	opts := azblob.ClientOptions{
+		ClientOptions: policy.ClientOptions{
+			Transport: &http.Client{Transport: trans},
+		},
+	}
+
+	if len(config.Key) > 0 {
+		sharedKeyCredential, err = azblob.NewSharedKeyCredential(config.Name, config.Key)
+		if err != nil {
+			return nil, err
+		}
+
+		client, err = azblob.NewClientWithSharedKeyCredential(endpointURL.String(), sharedKeyCredential, &opts)
+	} else {
+		defaultAzureCredential, err = azidentity.NewDefaultAzureCredential(nil)
+		if err != nil {
+			return nil, err
+		}
+
+		client, err = azblob.NewClient(endpointURL.String(), defaultAzureCredential, &opts)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &Storage{config, client}, nil
+}

+ 240 - 0
storage/abs/test_server.go

@@ -0,0 +1,240 @@
+package abs
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+	"net/http"
+	"net/http/httptest"
+	"slices"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/httprange"
+)
+
+// TestServer is a mock Azure Blob Storage server for testing
+// It's a very light version of azurito
+type TestServer struct {
+	server         *httptest.Server
+	stagedBlocks   map[string][]string          // container/blob -> blockIDs
+	blockData      map[string]map[string][]byte // container/blob -> blockID -> data
+	committedBlobs map[string][]byte            // container/blob -> committed data
+	headers        map[string]http.Header       // container/blob -> HTTP headers (ETag, Last-Modified, Content-Type, etc.)
+	mu             sync.Mutex
+}
+
+// NewAbsServer creates and starts a new mock Azure Blob Storage server
+func NewAbsServer() (*TestServer, error) {
+	abs := &TestServer{
+		stagedBlocks:   make(map[string][]string),
+		blockData:      make(map[string]map[string][]byte),
+		committedBlobs: make(map[string][]byte),
+		headers:        make(map[string]http.Header),
+	}
+
+	abs.server = httptest.NewTLSServer(http.HandlerFunc(abs.handler))
+
+	return abs, nil
+}
+
+// handler handles Azure Blob Storage API requests
+func (s *TestServer) handler(rw http.ResponseWriter, r *http.Request) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	// Parse path: /{container}/{blob}
+	parts := strings.SplitN(strings.TrimPrefix(r.URL.Path, "/"), "/", 2)
+	if len(parts) < 2 {
+		rw.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	container := parts[0]
+	blobName := parts[1]
+	key := fmt.Sprintf("%s/%s", container, blobName)
+
+	// Handle different Azure Blob Storage operations
+	comp := r.URL.Query().Get("comp")
+	blockID := r.URL.Query().Get("blockid")
+
+	switch {
+	case r.Method == http.MethodPut && comp == "block" && blockID != "":
+		// StageBlock operation
+		data, err := io.ReadAll(r.Body)
+		if err != nil {
+			rw.WriteHeader(http.StatusInternalServerError)
+			return
+		}
+
+		// Initialize block data map if needed
+		if s.blockData[key] == nil {
+			s.blockData[key] = make(map[string][]byte)
+		}
+
+		// Store the block data
+		s.blockData[key][blockID] = data
+
+		// Track block ID in order
+		if s.stagedBlocks[key] == nil {
+			s.stagedBlocks[key] = []string{}
+		}
+
+		// Only add if not already present
+		if !slices.Contains(s.stagedBlocks[key], blockID) {
+			s.stagedBlocks[key] = append(s.stagedBlocks[key], blockID)
+		}
+
+		rw.WriteHeader(http.StatusCreated)
+
+	case r.Method == http.MethodPut && comp == "blocklist":
+		// CommitBlockList operation
+		body, _ := io.ReadAll(r.Body)
+
+		// Parse block IDs from XML (simplified - just extract blockid values)
+		blockIDs := []string{}
+		for _, id := range s.stagedBlocks[key] {
+			if strings.Contains(string(body), id) {
+				blockIDs = append(blockIDs, id)
+			}
+		}
+
+		// Commit the blocks
+		var result []byte
+		for _, blockID := range blockIDs {
+			if data, ok := s.blockData[key][blockID]; ok {
+				result = append(result, data...)
+			}
+		}
+
+		s.committedBlobs[key] = result
+
+		// Store headers
+		lastMod := time.Now().UTC()
+
+		headers := make(http.Header)
+		headers.Set(httpheaders.ContentType, r.Header.Get(httpheaders.ContentType))
+		headers.Set(httpheaders.Etag, fmt.Sprintf(`"%x"`, md5.Sum(result)))
+		headers.Set(httpheaders.LastModified, lastMod.Format(http.TimeFormat))
+		headers.Set(httpheaders.ContentLength, fmt.Sprintf("%d", len(result)))
+
+		s.headers[key] = headers
+
+		// Clean up staged blocks
+		delete(s.stagedBlocks, key)
+		delete(s.blockData, key)
+
+		rw.WriteHeader(http.StatusCreated)
+
+	case r.Method == http.MethodPut && comp == "" && blockID == "":
+		// Normal (non-partial) blob upload - PUT without block operations
+		data, err := io.ReadAll(r.Body)
+		if err != nil {
+			rw.WriteHeader(http.StatusInternalServerError)
+			return
+		}
+
+		// Store the blob data directly
+		s.committedBlobs[key] = data
+
+		// Store headers
+		lastMod := time.Now().UTC()
+
+		headers := make(http.Header)
+		etag := fmt.Sprintf(`"%x"`, md5.Sum(data))
+		headers.Set(httpheaders.Etag, etag)
+		headers.Set(httpheaders.LastModified, lastMod.Format(http.TimeFormat))
+		headers.Set(httpheaders.ContentType, r.Header.Get(httpheaders.ContentType))
+		headers.Set(httpheaders.ContentLength, fmt.Sprintf("%d", len(data)))
+		s.headers[key] = headers
+
+		// Set response headers
+		rw.Header().Set(httpheaders.Etag, etag)
+		rw.Header().Set(httpheaders.LastModified, lastMod.Format(http.TimeFormat))
+
+		rw.WriteHeader(http.StatusCreated)
+
+	case r.Method == http.MethodDelete:
+		// Delete blob operation
+		delete(s.committedBlobs, key)
+		delete(s.stagedBlocks, key)
+		delete(s.blockData, key)
+		delete(s.headers, key)
+		rw.WriteHeader(http.StatusAccepted)
+
+	case r.Method == http.MethodGet:
+		// Get blob operation
+		data, ok := s.committedBlobs[key]
+		if !ok {
+			rw.WriteHeader(http.StatusNotFound)
+			rw.Write([]byte(`<?xml version="1.0" encoding="utf-8"?>
+<Error>
+  <Code>BlobNotFound</Code>
+  <Message>The specified blob does not exist.</Message>
+</Error>`))
+			return
+		}
+
+		// Get stored headers
+		headers := s.headers[key].Clone()
+
+		// Handle range requests - Azure uses x-ms-range header
+		rangeHeader := r.Header.Get("x-ms-range")
+		if rangeHeader != "" {
+			headers.Del(httpheaders.ContentLength)
+		}
+		httpheaders.CopyAll(headers, rw.Header(), true)
+
+		rw.Header().Set(httpheaders.AcceptRanges, "bytes")
+
+		if rangeHeader == "" {
+			// Full content
+			rw.Header().Set(httpheaders.ContentLength, fmt.Sprintf("%d", len(data)))
+			rw.WriteHeader(http.StatusOK)
+			rw.Write(data)
+			return
+		}
+
+		// Parse range header
+		start, end, err := httprange.Parse(rangeHeader)
+		if err != nil {
+			rw.Header().Set(httpheaders.ContentRange, fmt.Sprintf("bytes */%d", len(data)))
+			rw.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+			return
+		}
+
+		// Handle open-ended range (e.g., "bytes=0-")
+		if end == -1 {
+			end = int64(len(data)) - 1
+		}
+
+		// Validate range
+		if start < 0 || start >= int64(len(data)) || end >= int64(len(data)) || start > end {
+			rw.Header().Set(httpheaders.ContentRange, fmt.Sprintf("bytes */%d", len(data)))
+			rw.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+			return
+		}
+
+		// Serve partial content
+		rangeData := data[start : end+1]
+		rw.Header().Set(httpheaders.ContentLength, fmt.Sprintf("%d", len(rangeData)))
+		rw.Header().Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", start, end, len(data)))
+		rw.WriteHeader(http.StatusPartialContent)
+		rw.Write(rangeData)
+
+	default:
+		rw.WriteHeader(http.StatusNotImplemented)
+	}
+}
+
+// Close stops the server
+func (s *TestServer) Close() {
+	s.server.Close()
+}
+
+// URL returns the server URL
+func (s *TestServer) URL() string {
+	return s.server.URL
+}

+ 100 - 0
storage/abs/test_storage.go

@@ -0,0 +1,100 @@
+package abs
+
+import (
+	"context"
+	"net/http"
+
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+
+	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+)
+
+// absStorageWrapper wraps the storage and optionally holds a server for cleanup
+type absStorageWrapper struct {
+	*Storage
+	server      *TestServer
+	client      *azblob.Client
+	shouldClose bool
+}
+
+// Server returns the underlying AbsServer
+func (w *absStorageWrapper) Server() *TestServer {
+	return w.server
+}
+
+// Client returns the underlying ABS client for direct API access
+func (w *absStorageWrapper) Client() *azblob.Client {
+	return w.client
+}
+
+// Sugar alias
+type LazySuiteStorage = testutil.LazyObj[*absStorageWrapper]
+
+// NewLazySuiteStorage creates a lazy ABS Storage object for use in test suites
+// A new server will be created internally and cleaned up automatically
+func NewLazySuiteStorage(
+	l testutil.LazySuiteFrom,
+) (testutil.LazyObj[*absStorageWrapper], context.CancelFunc) {
+	return testutil.NewLazySuiteObj(
+		l,
+		func() (*absStorageWrapper, error) {
+			wrapper := &absStorageWrapper{}
+
+			// Create server internally
+			absServer, err := NewAbsServer()
+			if err != nil {
+				return nil, err
+			}
+			wrapper.server = absServer
+			wrapper.shouldClose = true
+
+			config := NewDefaultConfig()
+			config.Endpoint = absServer.URL()
+			config.Name = "testaccount"
+			config.Key = "dGVzdGtleQ=="
+
+			c := generichttp.NewDefaultConfig()
+			c.IgnoreSslVerification = true
+
+			trans, err := generichttp.New(false, &c)
+			if err != nil {
+				return nil, err
+			}
+
+			storage, err := New(&config, trans)
+			if err != nil {
+				return nil, err
+			}
+
+			// Create ABS client for direct API access
+			sharedKeyCredential, err := azblob.NewSharedKeyCredential(config.Name, config.Key)
+			if err != nil {
+				return nil, err
+			}
+
+			clientOpts := &azblob.ClientOptions{
+				ClientOptions: policy.ClientOptions{
+					Transport: &http.Client{Transport: trans},
+				},
+			}
+
+			client, err := azblob.NewClientWithSharedKeyCredential(absServer.URL(), sharedKeyCredential, clientOpts)
+			if err != nil {
+				return nil, err
+			}
+
+			wrapper.Storage = storage
+			wrapper.client = client
+			return wrapper, nil
+		},
+		func(w *absStorageWrapper) error {
+			// Clean up internal server if we created it
+			if w.shouldClose {
+				w.server.Close()
+			}
+			return nil
+		},
+	)
+}

+ 2 - 1
storage/fs/config.go

@@ -11,7 +11,8 @@ import (
 // ConfigDesc holds the configuration descriptions for
 // ConfigDesc holds the configuration descriptions for
 // local file system storage
 // local file system storage
 type ConfigDesc struct {
 type ConfigDesc struct {
-	Root env.Desc
+	Root   env.Desc
+	Config *Config
 }
 }
 
 
 // Config holds the configuration for local file system transport
 // Config holds the configuration for local file system transport

+ 0 - 138
storage/fs/fs_test.go

@@ -1,138 +0,0 @@
-package fs
-
-import (
-	"fmt"
-	"net/http"
-	"os"
-	"path/filepath"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/storage"
-	"github.com/imgproxy/imgproxy/v3/testutil"
-)
-
-type FsTestSuite struct {
-	suite.Suite
-
-	storage storage.Reader
-	etag    string
-	modTime time.Time
-	size    int64
-}
-
-func (s *FsTestSuite) SetupSuite() {
-	tdp := testutil.NewTestDataProvider(s.T)
-	fsRoot := tdp.Root()
-
-	fi, err := os.Stat(filepath.Join(fsRoot, "test1.png"))
-	s.Require().NoError(err)
-
-	s.etag = buildEtag("/test1.png", fi)
-	s.modTime = fi.ModTime()
-	s.size = fi.Size()
-
-	s.storage, _ = New(&Config{Root: fsRoot}, "?")
-}
-
-func (s *FsTestSuite) TestRoundTripWithETagEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-func (s *FsTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *FsTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *FsTestSuite) TestRoundTripWithLastModifiedEnabledReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.modTime.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *FsTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.modTime.Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *FsTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.modTime.Add(-time.Minute).Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *FsTestSuite) TestRoundTripWithRangeReturns206() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.Range, "bytes=10-19")
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "", "test1.png", "")
-
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusPartialContent, response.Status)
-	s.Require().Equal(fmt.Sprintf("bytes 10-19/%d", s.size), response.Headers.Get(httpheaders.ContentRange))
-	s.Require().Equal("10", response.Headers.Get(httpheaders.ContentLength))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func TestFSTransport(t *testing.T) {
-	suite.Run(t, new(FsTestSuite))
-}

+ 9 - 24
storage/fs/fs.go → storage/fs/reader.go

@@ -16,34 +16,19 @@ import (
 
 
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httprange"
 	"github.com/imgproxy/imgproxy/v3/httprange"
+	"github.com/imgproxy/imgproxy/v3/storage"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
-	"github.com/imgproxy/imgproxy/v3/storage/response"
 )
 )
 
 
-// Storage represents fs file storage
-type Storage struct {
-	fs             http.Dir
-	querySeparator string
-}
-
-// New creates a new Storage instance.
-func New(config *Config, qsSeparator string) (*Storage, error) {
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	return &Storage{fs: http.Dir(config.Root), querySeparator: qsSeparator}, nil
-}
-
 // GetObject retrieves an object from file system.
 // GetObject retrieves an object from file system.
 func (s *Storage) GetObject(
 func (s *Storage) GetObject(
 	ctx context.Context,
 	ctx context.Context,
 	reqHeader http.Header,
 	reqHeader http.Header,
 	_, name, _ string,
 	_, name, _ string,
-) (*response.Object, error) {
+) (*storage.ObjectReader, error) {
 	// If either container or object name is empty, return 404
 	// If either container or object name is empty, return 404
 	if len(name) == 0 {
 	if len(name) == 0 {
-		return response.NewNotFound(
+		return storage.NewObjectNotFound(
 			"invalid FS Storage URL: object name is empty",
 			"invalid FS Storage URL: object name is empty",
 		), nil
 		), nil
 	}
 	}
@@ -54,7 +39,7 @@ func (s *Storage) GetObject(
 	f, err := s.fs.Open(name)
 	f, err := s.fs.Open(name)
 	if err != nil {
 	if err != nil {
 		if os.IsNotExist(err) {
 		if os.IsNotExist(err) {
-			return response.NewNotFound(fmt.Sprintf("%s doesn't exist", name)), nil
+			return storage.NewObjectNotFound(fmt.Sprintf("%s doesn't exist", name)), nil
 		}
 		}
 
 
 		return nil, err
 		return nil, err
@@ -67,7 +52,7 @@ func (s *Storage) GetObject(
 	}
 	}
 
 
 	if fi.IsDir() {
 	if fi.IsDir() {
-		return response.NewNotFound(fmt.Sprintf("%s is directory", name)), nil
+		return storage.NewObjectNotFound(fmt.Sprintf("%s is directory", name)), nil
 	}
 	}
 
 
 	// file basic properties
 	// file basic properties
@@ -90,7 +75,7 @@ func (s *Storage) GetObject(
 	switch {
 	switch {
 	case err != nil:
 	case err != nil:
 		f.Close()
 		f.Close()
-		return response.NewInvalidRange(), nil
+		return storage.NewObjectInvalidRange(), nil
 
 
 	// Range requested: partial content should be returned
 	// Range requested: partial content should be returned
 	case end != 0:
 	case end != 0:
@@ -106,7 +91,7 @@ func (s *Storage) GetObject(
 		header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
 		header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
 		header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", start, end, fi.Size()))
 		header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", start, end, fi.Size()))
 
 
-		return response.NewPartialContent(header, body), nil
+		return storage.NewObjectPartialContent(header, body), nil
 
 
 	// Full object requested
 	// Full object requested
 	default:
 	default:
@@ -120,10 +105,10 @@ func (s *Storage) GetObject(
 	// In case file was not modified, let's not return reader
 	// In case file was not modified, let's not return reader
 	if common.IsNotModified(reqHeader, header) {
 	if common.IsNotModified(reqHeader, header) {
 		f.Close()
 		f.Close()
-		return response.NewNotModified(header), nil
+		return storage.NewObjectNotModified(header), nil
 	}
 	}
 
 
-	return response.NewOK(header, body), nil
+	return storage.NewObjectOK(header, body), nil
 }
 }
 
 
 func buildEtag(path string, fi fs.FileInfo) string {
 func buildEtag(path string, fi fs.FileInfo) string {

+ 49 - 0
storage/fs/reader_test.go

@@ -0,0 +1,49 @@
+package fs
+
+import (
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/storage/testsuite"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type ReaderTestSuite struct {
+	testsuite.ReaderSuite
+
+	fsStorage testutil.LazyObj[*Storage]
+	tmpDir    testutil.LazyObj[string]
+}
+
+func (s *ReaderTestSuite) SetupSuite() {
+	s.ReaderSuite.SetupSuite()
+	s.TestObjectKey = "test-object.txt"
+
+	s.tmpDir, _ = testutil.NewLazySuiteObj(s,
+		func() (string, error) {
+			return s.T().TempDir(), nil
+		})
+
+	s.fsStorage, _ = NewLazySuiteStorage(s.Lazy(), s.tmpDir())
+	s.Storage, _ = testutil.NewLazySuiteObj(s,
+		func() (storage.Reader, error) {
+			return s.fsStorage(), nil
+		},
+	)
+}
+
+func (s *ReaderTestSuite) SetupTest() {
+	// Prepare FS storage - write test file directly
+	testFile := filepath.Join(s.tmpDir(), s.TestObjectKey)
+	err := os.MkdirAll(filepath.Dir(testFile), 0755)
+	s.Require().NoError(err)
+	err = os.WriteFile(testFile, s.TestData, 0644)
+	s.Require().NoError(err)
+}
+
+func TestReader(t *testing.T) {
+	suite.Run(t, new(ReaderTestSuite))
+}

+ 18 - 0
storage/fs/storage.go

@@ -0,0 +1,18 @@
+package fs
+
+import "net/http"
+
+// Storage represents fs file storage
+type Storage struct {
+	fs     http.Dir
+	config *Config
+}
+
+// New creates a new Storage instance.
+func New(config *Config) (*Storage, error) {
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	return &Storage{config: config, fs: http.Dir(config.Root)}, nil
+}

+ 32 - 0
storage/fs/test_storage.go

@@ -0,0 +1,32 @@
+package fs
+
+import (
+	"context"
+
+	"github.com/imgproxy/imgproxy/v3/testutil"
+)
+
+// LazySuiteStorage is a lazy object that provides FS storage for tests
+type LazySuiteStorage = testutil.LazyObj[*Storage]
+
+// NewLazySuiteStorage creates a lazy FS Storage object for use in test suites
+// The tmpDir parameter specifies the root directory for the filesystem storage
+func NewLazySuiteStorage(
+	l testutil.LazySuiteFrom,
+	tmpDir string,
+) (testutil.LazyObj[*Storage], context.CancelFunc) {
+	return testutil.NewLazySuiteObj(
+		l,
+		func() (*Storage, error) {
+			config := NewDefaultConfig()
+			config.Root = tmpDir
+
+			storage, err := New(&config)
+			if err != nil {
+				return nil, err
+			}
+
+			return storage, nil
+		},
+	)
+}

+ 2 - 0
storage/gcs/config.go

@@ -23,6 +23,7 @@ type Config struct {
 	ReadOnly       bool     // Read-only access
 	ReadOnly       bool     // Read-only access
 	AllowedBuckets []string // List of allowed buckets
 	AllowedBuckets []string // List of allowed buckets
 	DeniedBuckets  []string // List of denied buckets
 	DeniedBuckets  []string // List of denied buckets
+	TestNoAuth     bool     // disable authentication for tests
 	desc           ConfigDesc
 	desc           ConfigDesc
 }
 }
 
 
@@ -34,6 +35,7 @@ func NewDefaultConfig() Config {
 		ReadOnly:       true,
 		ReadOnly:       true,
 		AllowedBuckets: nil,
 		AllowedBuckets: nil,
 		DeniedBuckets:  nil,
 		DeniedBuckets:  nil,
+		TestNoAuth:     false,
 	}
 	}
 }
 }
 
 

+ 0 - 167
storage/gcs/gcs_test.go

@@ -1,167 +0,0 @@
-package gcs
-
-import (
-	"fmt"
-	"net"
-	"net/http"
-	"testing"
-	"time"
-
-	"github.com/fsouza/fake-gcs-server/fakestorage"
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/storage"
-)
-
-func getFreePort() (int, error) {
-	addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
-	if err != nil {
-		return 0, err
-	}
-
-	l, err := net.ListenTCP("tcp", addr)
-	if err != nil {
-		return 0, err
-	}
-	defer l.Close()
-	return l.Addr().(*net.TCPAddr).Port, nil
-}
-
-type GCSTestSuite struct {
-	suite.Suite
-
-	server       *fakestorage.Server
-	storage      storage.Reader
-	etag         string
-	lastModified time.Time
-}
-
-func (s *GCSTestSuite) SetupSuite() {
-	// s.etag = "testetag"
-	s.lastModified, _ = time.Parse(http.TimeFormat, "Wed, 21 Oct 2015 07:28:00 GMT")
-
-	port, err := getFreePort()
-	s.Require().NoError(err)
-
-	s.server, err = fakestorage.NewServerWithOptions(fakestorage.Options{
-		Scheme:     "http",
-		Port:       uint16(port),
-		PublicHost: fmt.Sprintf("localhost:%d", port),
-		InitialObjects: []fakestorage.Object{
-			{
-				ObjectAttrs: fakestorage.ObjectAttrs{
-					BucketName: "test",
-					Name:       "foo/test.png",
-					// Etag:       s.etag,
-					Updated: s.lastModified,
-				},
-				Content: make([]byte, 32),
-			},
-		},
-	})
-	s.Require().NoError(err)
-
-	obj, err := s.server.GetObject("test", "foo/test.png")
-	s.Require().NoError(err)
-	s.etag = obj.Etag
-
-	config := NewDefaultConfig()
-	config.Endpoint = s.server.PublicURL() + "/storage/v1/"
-
-	c := generichttp.NewDefaultConfig()
-	c.IgnoreSslVerification = true
-
-	trans, err := generichttp.New(false, &c)
-	s.Require().NoError(err)
-
-	s.storage, err = New(&config, trans, false)
-	s.Require().NoError(err)
-}
-
-func (s *GCSTestSuite) TearDownSuite() {
-	s.server.Stop()
-}
-
-func (s *GCSTestSuite) TestRoundTripWithETagEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *GCSTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *GCSTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *GCSTestSuite) TestRoundTripWithLastModifiedEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-func (s *GCSTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *GCSTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-func TestGCSTransport(t *testing.T) {
-	suite.Run(t, new(GCSTestSuite))
-}

+ 15 - 76
storage/gcs/gcs.go → storage/gcs/reader.go

@@ -3,87 +3,26 @@ package gcs
 import (
 import (
 	"context"
 	"context"
 	"fmt"
 	"fmt"
-	"log/slog"
 	"net/http"
 	"net/http"
 	"strconv"
 	"strconv"
 
 
-	"cloud.google.com/go/storage"
+	gcs "cloud.google.com/go/storage"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httprange"
 	"github.com/imgproxy/imgproxy/v3/httprange"
-	"github.com/imgproxy/imgproxy/v3/ierrors"
+	"github.com/imgproxy/imgproxy/v3/storage"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
-	"github.com/imgproxy/imgproxy/v3/storage/response"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"google.golang.org/api/option"
-	raw "google.golang.org/api/storage/v1"
-	htransport "google.golang.org/api/transport/http"
 )
 )
 
 
-// Storage represents Google Cloud Storage implementation
-type Storage struct {
-	config *Config
-	client *storage.Client
-}
-
-// New creates a new Storage instance.
-func New(
-	config *Config,
-	trans *http.Transport,
-	auth bool, // use authentication, should be false in tests
-) (*Storage, error) {
-	var client *storage.Client
-
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	opts := []option.ClientOption{
-		option.WithScopes(raw.DevstorageReadOnlyScope),
-	}
-
-	if !config.ReadOnly {
-		opts = append(opts, option.WithScopes(raw.DevstorageReadWriteScope))
-	}
-
-	if len(config.Key) > 0 {
-		opts = append(opts, option.WithCredentialsJSON([]byte(config.Key)))
-	}
-
-	if len(config.Endpoint) > 0 {
-		opts = append(opts, option.WithEndpoint(config.Endpoint))
-	}
-
-	if !auth {
-		slog.Warn("GCS storage: authentication disabled")
-		opts = append(opts, option.WithoutAuthentication())
-	}
-
-	htrans, err := htransport.NewTransport(context.TODO(), trans, opts...)
-	if err != nil {
-		return nil, errors.Wrap(err, "error creating GCS transport")
-	}
-
-	httpClient := &http.Client{Transport: htrans}
-	opts = append(opts, option.WithHTTPClient(httpClient))
-
-	client, err = storage.NewClient(context.Background(), opts...)
-
-	if err != nil {
-		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("Can't create GCS client"))
-	}
-
-	return &Storage{config, client}, nil
-}
-
 // GetObject retrieves an object from Azure cloud
 // GetObject retrieves an object from Azure cloud
 func (s *Storage) GetObject(
 func (s *Storage) GetObject(
 	ctx context.Context,
 	ctx context.Context,
 	reqHeader http.Header,
 	reqHeader http.Header,
 	bucket, key, query string,
 	bucket, key, query string,
-) (*response.Object, error) {
+) (*storage.ObjectReader, error) {
 	// If either bucket or object key is empty, return 404
 	// If either bucket or object key is empty, return 404
 	if len(bucket) == 0 || len(key) == 0 {
 	if len(bucket) == 0 || len(key) == 0 {
-		return response.NewNotFound(
+		return storage.NewObjectNotFound(
 			"invalid GCS Storage URL: bucket name or object key are empty",
 			"invalid GCS Storage URL: bucket name or object key are empty",
 		), nil
 		), nil
 	}
 	}
@@ -94,7 +33,7 @@ func (s *Storage) GetObject(
 	}
 	}
 
 
 	var (
 	var (
-		reader *storage.Reader
+		reader *gcs.Reader
 		size   int64
 		size   int64
 	)
 	)
 
 
@@ -121,7 +60,7 @@ func (s *Storage) GetObject(
 	header.Set(httpheaders.LastModified, attrs.Updated.Format(http.TimeFormat))
 	header.Set(httpheaders.LastModified, attrs.Updated.Format(http.TimeFormat))
 
 
 	if common.IsNotModified(reqHeader, header) {
 	if common.IsNotModified(reqHeader, header) {
-		return response.NewNotModified(header), nil
+		return storage.NewObjectNotModified(header), nil
 	}
 	}
 
 
 	var err error
 	var err error
@@ -133,17 +72,17 @@ func (s *Storage) GetObject(
 	size = reader.Attrs.Size
 	size = reader.Attrs.Size
 	setHeadersFromReader(header, reader, size)
 	setHeadersFromReader(header, reader, size)
 
 
-	return response.NewOK(header, reader), nil
+	return storage.NewObjectOK(header, reader), nil
 }
 }
 
 
 // tryRespondWithPartial tries to respond with a partial object
 // tryRespondWithPartial tries to respond with a partial object
 // if the Range header is set.
 // if the Range header is set.
 func (s *Storage) tryRespondWithPartial(
 func (s *Storage) tryRespondWithPartial(
 	ctx context.Context,
 	ctx context.Context,
-	obj *storage.ObjectHandle,
+	obj *gcs.ObjectHandle,
 	reqHeader http.Header,
 	reqHeader http.Header,
 	header http.Header,
 	header http.Header,
-) (*response.Object, error) {
+) (*storage.ObjectReader, error) {
 	r := reqHeader.Get(httpheaders.Range)
 	r := reqHeader.Get(httpheaders.Range)
 	if len(r) == 0 {
 	if len(r) == 0 {
 		return nil, nil
 		return nil, nil
@@ -151,7 +90,7 @@ func (s *Storage) tryRespondWithPartial(
 
 
 	start, end, err := httprange.Parse(r)
 	start, end, err := httprange.Parse(r)
 	if err != nil {
 	if err != nil {
-		return response.NewInvalidRange(), nil
+		return storage.NewObjectInvalidRange(), nil
 	}
 	}
 
 
 	if end == 0 {
 	if end == 0 {
@@ -177,18 +116,18 @@ func (s *Storage) tryRespondWithPartial(
 	header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", reader.Attrs.StartOffset, end, reader.Attrs.Size))
 	header.Set(httpheaders.ContentRange, fmt.Sprintf("bytes %d-%d/%d", reader.Attrs.StartOffset, end, reader.Attrs.Size))
 	setHeadersFromReader(header, reader, size)
 	setHeadersFromReader(header, reader, size)
 
 
-	return response.NewPartialContent(header, reader), nil
+	return storage.NewObjectPartialContent(header, reader), nil
 }
 }
 
 
-func handleError(err error) (*response.Object, error) {
-	if err != storage.ErrBucketNotExist && err != storage.ErrObjectNotExist {
+func handleError(err error) (*storage.ObjectReader, error) {
+	if !errors.Is(err, gcs.ErrBucketNotExist) && !errors.Is(err, gcs.ErrObjectNotExist) {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	return response.NewNotFound(err.Error()), nil
+	return storage.NewObjectNotFound(err.Error()), nil
 }
 }
 
 
-func setHeadersFromReader(header http.Header, reader *storage.Reader, size int64) {
+func setHeadersFromReader(header http.Header, reader *gcs.Reader, size int64) {
 	header.Set(httpheaders.AcceptRanges, "bytes")
 	header.Set(httpheaders.AcceptRanges, "bytes")
 	header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
 	header.Set(httpheaders.ContentLength, strconv.Itoa(int(size)))
 	header.Set(httpheaders.ContentType, reader.Attrs.ContentType)
 	header.Set(httpheaders.ContentType, reader.Attrs.ContentType)

+ 50 - 0
storage/gcs/reader_test.go

@@ -0,0 +1,50 @@
+package gcs
+
+import (
+	"testing"
+	"time"
+
+	"github.com/fsouza/fake-gcs-server/fakestorage"
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/storage/testsuite"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type ReaderTestSuite struct {
+	testsuite.ReaderSuite
+
+	gcsStorage testutil.LazyObj[*gcsStorageWrapper]
+}
+
+func (s *ReaderTestSuite) SetupSuite() {
+	s.ReaderSuite.SetupSuite()
+
+	s.TestContainer = "test-container"
+	s.TestObjectKey = "test-object.txt"
+
+	// Prepare GCS storage with initial objects
+	gcsInitialObjects := []fakestorage.Object{
+		{
+			ObjectAttrs: fakestorage.ObjectAttrs{
+				BucketName: s.TestContainer,
+				Name:       s.TestObjectKey,
+				Updated:    time.Now(),
+			},
+			Content: s.TestData,
+		},
+	}
+
+	// Initialize GCS storage
+	s.gcsStorage, _ = NewLazySuiteStorage(s.Lazy(), gcsInitialObjects)
+
+	s.Storage, _ = testutil.NewLazySuiteObj(s,
+		func() (storage.Reader, error) {
+			return s.gcsStorage().Storage, nil
+		},
+	)
+}
+
+func TestReader(t *testing.T) {
+	suite.Run(t, new(ReaderTestSuite))
+}

+ 69 - 0
storage/gcs/storage.go

@@ -0,0 +1,69 @@
+package gcs
+
+import (
+	"context"
+	"log/slog"
+	"net/http"
+
+	gcs "cloud.google.com/go/storage"
+	"github.com/imgproxy/imgproxy/v3/ierrors"
+	"github.com/pkg/errors"
+	"google.golang.org/api/option"
+	raw "google.golang.org/api/storage/v1"
+	htransport "google.golang.org/api/transport/http"
+)
+
+// Storage represents Google Cloud Storage implementation
+type Storage struct {
+	config *Config
+	client *gcs.Client
+}
+
+// New creates a new Storage instance.
+func New(
+	config *Config,
+	trans *http.Transport,
+) (*Storage, error) {
+	var client *gcs.Client
+
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	opts := []option.ClientOption{
+		option.WithScopes(raw.DevstorageReadOnlyScope),
+	}
+
+	if !config.ReadOnly {
+		opts = append(opts, option.WithScopes(raw.DevstorageReadWriteScope))
+	}
+
+	if len(config.Key) > 0 {
+		opts = append(opts, option.WithCredentialsJSON([]byte(config.Key)))
+	}
+
+	if len(config.Endpoint) > 0 {
+		opts = append(opts, option.WithEndpoint(config.Endpoint))
+	}
+
+	if config.TestNoAuth {
+		slog.Warn("GCS storage: authentication disabled")
+		opts = append(opts, option.WithoutAuthentication())
+	}
+
+	htrans, err := htransport.NewTransport(context.TODO(), trans, opts...)
+	if err != nil {
+		return nil, errors.Wrap(err, "error creating GCS transport")
+	}
+
+	httpClient := &http.Client{Transport: htrans}
+	opts = append(opts, option.WithHTTPClient(httpClient))
+
+	client, err = gcs.NewClient(context.Background(), opts...)
+
+	if err != nil {
+		return nil, ierrors.Wrap(err, 0, ierrors.WithPrefix("Can't create GCS client"))
+	}
+
+	return &Storage{config, client}, nil
+}

+ 142 - 0
storage/gcs/test_storage.go

@@ -0,0 +1,142 @@
+package gcs
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+
+	"github.com/fsouza/fake-gcs-server/fakestorage"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+)
+
+// TestServer is a mock Google Cloud Storage server for testing
+type TestServer struct {
+	server *fakestorage.Server
+}
+
+// gcsStorageWrapper wraps the storage and optionally holds a server for cleanup
+type gcsStorageWrapper struct {
+	*Storage
+	server      *TestServer
+	shouldClose bool
+}
+
+// Server returns the underlying GcsServer
+func (w *gcsStorageWrapper) Server() *TestServer {
+	return w.server
+}
+
+// Sugar alias
+type LazySuiteStorage = testutil.LazyObj[*gcsStorageWrapper]
+
+// NewTestServer creates and starts a new mock GCS server
+func NewTestServer() (*TestServer, error) {
+	port, err := getFreePort()
+	if err != nil {
+		return nil, err
+	}
+
+	server, err := fakestorage.NewServerWithOptions(fakestorage.Options{
+		Scheme:     "http",
+		Port:       uint16(port),
+		PublicHost: fmt.Sprintf("localhost:%d", port),
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &TestServer{
+		server: server,
+	}, nil
+}
+
+// getFreePort finds an available TCP port
+func getFreePort() (int, error) {
+	addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
+	if err != nil {
+		return 0, err
+	}
+
+	l, err := net.ListenTCP("tcp", addr)
+	if err != nil {
+		return 0, err
+	}
+	defer l.Close()
+	return l.Addr().(*net.TCPAddr).Port, nil
+}
+
+// Close stops the server
+func (s *TestServer) Close() {
+	s.server.Stop()
+}
+
+// URL returns the server URL for storage API
+func (s *TestServer) URL() string {
+	return s.server.URL() + "/storage/v1/"
+}
+
+// PublicURL returns the public server URL (for storage_test.go compatibility)
+func (s *TestServer) PublicURL() string {
+	return s.server.PublicURL()
+}
+
+// Server returns the underlying fake storage server
+func (s *TestServer) Server() *fakestorage.Server {
+	return s.server
+}
+
+// NewLazySuiteStorage creates a lazy GCS Storage object for use in test suites
+// A new server will be created internally with optional initial objects and cleaned up automatically
+func NewLazySuiteStorage(
+	l testutil.LazySuiteFrom,
+	initialObjects []fakestorage.Object,
+) (testutil.LazyObj[*gcsStorageWrapper], context.CancelFunc) {
+	return testutil.NewLazySuiteObj(
+		l,
+		func() (*gcsStorageWrapper, error) {
+			wrapper := &gcsStorageWrapper{}
+
+			// Create server internally with optional initial objects
+			port, err := getFreePort()
+			if err != nil {
+				return nil, err
+			}
+
+			server, err := fakestorage.NewServerWithOptions(fakestorage.Options{
+				Scheme:         "http",
+				Port:           uint16(port),
+				PublicHost:     fmt.Sprintf("localhost:%d", port),
+				InitialObjects: initialObjects,
+			})
+			if err != nil {
+				return nil, err
+			}
+
+			gcsServer := &TestServer{
+				server: server,
+			}
+			wrapper.server = gcsServer
+			wrapper.shouldClose = true
+
+			config := NewDefaultConfig()
+			config.Endpoint = gcsServer.PublicURL() + "/storage/v1/"
+			config.TestNoAuth = true
+
+			storage, err := New(&config, http.DefaultTransport.(*http.Transport))
+			if err != nil {
+				return nil, err
+			}
+
+			wrapper.Storage = storage
+			return wrapper, nil
+		},
+		func(w *gcsStorageWrapper) error {
+			// Clean up internal server if we created it
+			if w.shouldClose {
+				w.server.Close()
+			}
+			return nil
+		},
+	)
+}

+ 108 - 2
storage/reader.go

@@ -2,9 +2,12 @@ package storage
 
 
 import (
 import (
 	"context"
 	"context"
+	"io"
 	"net/http"
 	"net/http"
+	"strconv"
+	"strings"
 
 
-	"github.com/imgproxy/imgproxy/v3/storage/response"
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
 )
 )
 
 
 // Reader represents a generic storage interface, which can read
 // Reader represents a generic storage interface, which can read
@@ -16,5 +19,108 @@ type Reader interface {
 		ctx context.Context,
 		ctx context.Context,
 		reqHeader http.Header,
 		reqHeader http.Header,
 		bucket, key, query string,
 		bucket, key, query string,
-	) (*response.Object, error)
+	) (*ObjectReader, error)
+}
+
+// ObjectReader represents a generic reader for a storage object.
+// It can be in any state: success, error, not found, etc.
+// It can be converted to HTTP response or used as-is.
+type ObjectReader struct {
+	Status        int           // HTTP status code
+	Headers       http.Header   // Response headers harvested from the engine response
+	Body          io.ReadCloser // Response body reader
+	contentLength int64
+}
+
+// NewObjectOK creates a new Reader with a 200 OK status.
+func NewObjectOK(headers http.Header, body io.ReadCloser) *ObjectReader {
+	return &ObjectReader{
+		Status:        http.StatusOK,
+		Headers:       headers,
+		Body:          body,
+		contentLength: -1, // is set in Response()
+	}
+}
+
+// NewObjectPartialContent creates a new Reader with a 206 Partial Content status.
+func NewObjectPartialContent(headers http.Header, body io.ReadCloser) *ObjectReader {
+	return &ObjectReader{
+		Status:        http.StatusPartialContent,
+		Headers:       headers,
+		Body:          body,
+		contentLength: -1, // is set in Response()
+	}
+}
+
+// NewObjectNotFound creates a new Reader with a 404 Not Found status.
+func NewObjectNotFound(message string) *ObjectReader {
+	return NewObjectError(http.StatusNotFound, message)
+}
+
+// NewObjectError creates a new Reader with a custom status code
+func NewObjectError(statusCode int, message string) *ObjectReader {
+	return &ObjectReader{
+		Status:        statusCode,
+		Body:          io.NopCloser(strings.NewReader(message)),
+		Headers:       http.Header{httpheaders.ContentType: {"text/plain"}},
+		contentLength: int64(len(message)),
+	}
+}
+
+// NewObjectNotModified creates a new Reader with a 304 Not Modified status.
+func NewObjectNotModified(headers http.Header) *ObjectReader {
+	// Copy headers relevant to NotModified response only
+	nmHeaders := make(http.Header)
+	httpheaders.Copy(
+		headers,
+		nmHeaders,
+		[]string{httpheaders.Etag, httpheaders.LastModified},
+	)
+
+	return &ObjectReader{
+		Status:        http.StatusNotModified,
+		Headers:       nmHeaders,
+		contentLength: 0,
+	}
+}
+
+// NewInvalidRang creates a new Reader with a 416 Range Not Satisfiable status.
+func NewObjectInvalidRange() *ObjectReader {
+	return &ObjectReader{
+		Status:        http.StatusRequestedRangeNotSatisfiable,
+		contentLength: 0,
+	}
+}
+
+// ContentLength returns the content length of the response.
+func (r *ObjectReader) ContentLength() int64 {
+	if r.contentLength > 0 {
+		return r.contentLength
+	}
+
+	h := r.Headers.Get(httpheaders.ContentLength)
+	if len(h) > 0 {
+		p, err := strconv.ParseInt(h, 10, 64)
+		if err != nil {
+			return p
+		}
+	}
+
+	return -1
+}
+
+// Response converts Reader to http.Response
+func (r *ObjectReader) Response(req *http.Request) *http.Response {
+	return &http.Response{
+		Status:        http.StatusText(r.Status),
+		StatusCode:    r.Status,
+		Proto:         "HTTP/1.0",
+		ProtoMajor:    1,
+		ProtoMinor:    0,
+		Header:        r.Headers,
+		Body:          r.Body,
+		Close:         true,
+		Request:       req,
+		ContentLength: r.ContentLength(),
+	}
 }
 }

+ 0 - 112
storage/response/object.go

@@ -1,112 +0,0 @@
-package response
-
-import (
-	"io"
-	"net/http"
-	"strconv"
-	"strings"
-
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-)
-
-// Object represents a generic response for a storage object.
-// It can be converted to HTTP response or used as-is.
-type Object struct {
-	Status        int           // HTTP status code
-	Headers       http.Header   // Response headers harvested from the engine response
-	Body          io.ReadCloser // Response body reader
-	contentLength int64
-}
-
-// NewOK creates a new ObjectReader with a 200 OK status.
-func NewOK(headers http.Header, body io.ReadCloser) *Object {
-	return &Object{
-		Status:        http.StatusOK,
-		Headers:       headers,
-		Body:          body,
-		contentLength: -1, // is set in Response()
-	}
-}
-
-// NewPartialContent creates a new ObjectReader with a 206 Partial Content status.
-func NewPartialContent(headers http.Header, body io.ReadCloser) *Object {
-	return &Object{
-		Status:        http.StatusPartialContent,
-		Headers:       headers,
-		Body:          body,
-		contentLength: -1, // is set in Response()
-	}
-}
-
-// NewNotFound creates a new ObjectReader with a 404 Not Found status.
-func NewNotFound(message string) *Object {
-	return NewError(http.StatusNotFound, message)
-}
-
-// NewError creates a new ObjectReader with a custom status code
-func NewError(statusCode int, message string) *Object {
-	return &Object{
-		Status:        statusCode,
-		Body:          io.NopCloser(strings.NewReader(message)),
-		Headers:       http.Header{httpheaders.ContentType: {"text/plain"}},
-		contentLength: int64(len(message)),
-	}
-}
-
-// NewNotModified creates a new ObjectReader with a 304 Not Modified status.
-func NewNotModified(headers http.Header) *Object {
-	// Copy headers relevant to NotModified response only
-	nmHeaders := make(http.Header)
-	httpheaders.Copy(
-		headers,
-		nmHeaders,
-		[]string{httpheaders.Etag, httpheaders.LastModified},
-	)
-
-	return &Object{
-		Status:        http.StatusNotModified,
-		Headers:       nmHeaders,
-		contentLength: 0,
-	}
-}
-
-// NewInvalidRang creates a new ObjectReader with a 416 Range Not Satisfiable status.
-func NewInvalidRange() *Object {
-	return &Object{
-		Status:        http.StatusRequestedRangeNotSatisfiable,
-		contentLength: 0,
-	}
-}
-
-// ContentLength returns the content length of the response.
-func (r *Object) ContentLength() int64 {
-	if r.contentLength > 0 {
-		return r.contentLength
-	}
-
-	h := r.Headers.Get(httpheaders.ContentLength)
-	if len(h) > 0 {
-		p, err := strconv.ParseInt(h, 10, 64)
-		if err != nil {
-			return p
-		}
-	}
-
-	return -1
-}
-
-// Response converts ObjectReader to http.Response
-func (r *Object) Response(req *http.Request) *http.Response {
-	return &http.Response{
-		Status:        http.StatusText(r.Status),
-		StatusCode:    r.Status,
-		Proto:         "HTTP/1.0",
-		ProtoMajor:    1,
-		ProtoMinor:    0,
-		Header:        r.Headers,
-		Body:          r.Body,
-		Close:         true,
-		Request:       req,
-		ContentLength: r.ContentLength(),
-	}
-}

+ 122 - 0
storage/s3/reader.go

@@ -0,0 +1,122 @@
+package s3
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strconv"
+	"time"
+
+	"github.com/aws/aws-sdk-go-v2/aws"
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/storage/common"
+)
+
+// GetObject retrieves an object from Azure cloud
+func (s *Storage) GetObject(
+	ctx context.Context,
+	reqHeader http.Header,
+	bucket, key, query string,
+) (*storage.ObjectReader, error) {
+	// If either bucket or object key is empty, return 404
+	if len(bucket) == 0 || len(key) == 0 {
+		return storage.NewObjectNotFound(
+			"invalid S3 Storage URL: bucket name or object key are empty",
+		), nil
+	}
+
+	// Check if access to the container is allowed
+	if !common.IsBucketAllowed(bucket, s.config.AllowedBuckets, s.config.DeniedBuckets) {
+		return nil, fmt.Errorf("access to the S3 bucket %s is denied", bucket)
+	}
+
+	input := &s3.GetObjectInput{
+		Bucket: aws.String(bucket),
+		Key:    aws.String(key),
+	}
+
+	if len(query) > 0 {
+		input.VersionId = aws.String(query)
+	}
+
+	if r := reqHeader.Get(httpheaders.Range); len(r) != 0 {
+		input.Range = aws.String(r)
+	} else {
+		if ifNoneMatch := reqHeader.Get(httpheaders.IfNoneMatch); len(ifNoneMatch) > 0 {
+			input.IfNoneMatch = aws.String(ifNoneMatch)
+		}
+
+		if ifModifiedSince := reqHeader.Get(httpheaders.IfModifiedSince); len(ifModifiedSince) > 0 {
+			parsedIfModifiedSince, err := time.Parse(http.TimeFormat, ifModifiedSince)
+			if err == nil {
+				input.IfModifiedSince = &parsedIfModifiedSince
+			}
+		}
+	}
+
+	output, _, err := callWithClient(s, bucket, func(client s3Client) (*s3.GetObjectOutput, error) {
+		output, err := client.GetObject(ctx, input)
+
+		defer func() {
+			if err != nil && output != nil && output.Body != nil {
+				output.Body.Close()
+			}
+		}()
+
+		return output, err
+	})
+
+	if err != nil {
+		return handleError(err)
+	}
+
+	contentLength := int64(-1)
+	if output.ContentLength != nil {
+		contentLength = *output.ContentLength
+	}
+
+	if s.config.DecryptionClientEnabled {
+		if unencryptedContentLength := output.Metadata["X-Amz-Meta-X-Amz-Unencrypted-Content-Length"]; len(unencryptedContentLength) != 0 {
+			cl, err := strconv.ParseInt(unencryptedContentLength, 10, 64)
+			if err != nil {
+				return handleError(err)
+			}
+			contentLength = cl
+		}
+	}
+
+	header := make(http.Header)
+	if contentLength > 0 {
+		header.Set(httpheaders.ContentLength, strconv.FormatInt(contentLength, 10))
+	}
+	if output.ContentType != nil {
+		header.Set(httpheaders.ContentType, *output.ContentType)
+	}
+	if output.ContentEncoding != nil {
+		header.Set(httpheaders.ContentEncoding, *output.ContentEncoding)
+	}
+	if output.CacheControl != nil {
+		header.Set(httpheaders.CacheControl, *output.CacheControl)
+	}
+	if output.ExpiresString != nil {
+		header.Set(httpheaders.Expires, *output.ExpiresString)
+	}
+	if output.ETag != nil {
+		header.Set(httpheaders.Etag, *output.ETag)
+	}
+	if output.LastModified != nil {
+		header.Set(httpheaders.LastModified, output.LastModified.Format(http.TimeFormat))
+	}
+	if output.AcceptRanges != nil {
+		header.Set(httpheaders.AcceptRanges, *output.AcceptRanges)
+	}
+	if output.ContentRange != nil {
+		header.Set(httpheaders.ContentRange, *output.ContentRange)
+		return storage.NewObjectPartialContent(header, output.Body), nil
+	}
+
+	return storage.NewObjectOK(header, output.Body), nil
+}

+ 51 - 0
storage/s3/reader_test.go

@@ -0,0 +1,51 @@
+package s3
+
+import (
+	"bytes"
+	"net/http"
+	"testing"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/storage/testsuite"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type ReaderTestSuite struct {
+	testsuite.ReaderSuite
+
+	s3Storage testutil.LazyObj[*s3StorageWrapper]
+}
+
+func (s *ReaderTestSuite) SetupSuite() {
+	s.ReaderSuite.SetupSuite()
+
+	s.TestContainer = "test-container"
+	s.TestObjectKey = "test-object.txt"
+
+	// Initialize S3 storage
+	s.s3Storage, _ = NewLazySuiteStorage(s.Lazy())
+
+	s.Storage, _ = testutil.NewLazySuiteObj(s,
+		func() (storage.Reader, error) {
+			return s.s3Storage().Storage, nil
+		},
+	)
+}
+
+func (s *ReaderTestSuite) SetupTest() {
+	// Recreate S3 blob for each test using backend directly
+	backend := s.s3Storage().Server().Backend()
+	metadata := map[string]string{
+		"Content-Type":  "application/octet-stream",
+		"Last-Modified": time.Now().Format(http.TimeFormat),
+	}
+	_, err := backend.PutObject(s.TestContainer, s.TestObjectKey, metadata,
+		bytes.NewReader(s.TestData), int64(len(s.TestData)), nil)
+	s.Require().NoError(err)
+}
+
+func TestReader(t *testing.T) {
+	suite.Run(t, new(ReaderTestSuite))
+}

+ 12 - 0
storage/s3/s3_client.go

@@ -0,0 +1,12 @@
+package s3
+
+import (
+	"context"
+
+	"github.com/aws/aws-sdk-go-v2/service/s3"
+)
+
+// s3Client is an interface for S3 normal and crypto client
+type s3Client interface {
+	GetObject(ctx context.Context, input *s3.GetObjectInput, opts ...func(*s3.Options)) (*s3.GetObjectOutput, error)
+}

+ 0 - 166
storage/s3/s3_test.go

@@ -1,166 +0,0 @@
-package s3
-
-import (
-	"bytes"
-	"context"
-	"net/http"
-	"net/http/httptest"
-	"os"
-	"testing"
-	"time"
-
-	"github.com/aws/aws-sdk-go-v2/aws"
-	"github.com/aws/aws-sdk-go-v2/service/s3"
-	"github.com/johannesboyne/gofakes3"
-	"github.com/johannesboyne/gofakes3/backend/s3mem"
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/storage"
-)
-
-type S3TestSuite struct {
-	suite.Suite
-
-	server       *httptest.Server
-	storage      storage.Reader
-	etag         string
-	lastModified time.Time
-}
-
-func (s *S3TestSuite) SetupSuite() {
-	backend := s3mem.New()
-	faker := gofakes3.New(backend)
-	s.server = httptest.NewServer(faker.Server())
-
-	config := NewDefaultConfig()
-	config.Endpoint = s.server.URL
-
-	os.Setenv("AWS_REGION", "eu-central-1")
-	os.Setenv("AWS_ACCESS_KEY_ID", "Foo")
-	os.Setenv("AWS_SECRET_ACCESS_KEY", "Bar")
-
-	c := generichttp.NewDefaultConfig()
-	c.IgnoreSslVerification = true
-
-	trans, err := generichttp.New(false, &c)
-	s.Require().NoError(err)
-
-	s.storage, err = New(&config, trans)
-	s.Require().NoError(err)
-
-	err = backend.CreateBucket("test")
-	s.Require().NoError(err)
-
-	svc := s.storage.(*Storage).defaultClient
-	s.Require().NotNil(svc)
-	s.Require().IsType(&s3.Client{}, svc)
-
-	client := svc.(*s3.Client)
-
-	_, err = client.PutObject(context.Background(), &s3.PutObjectInput{
-		Body:   bytes.NewReader(make([]byte, 32)),
-		Bucket: aws.String("test"),
-		Key:    aws.String("foo/test.png"),
-	})
-	s.Require().NoError(err)
-
-	obj, err := client.GetObject(context.Background(), &s3.GetObjectInput{
-		Bucket: aws.String("test"),
-		Key:    aws.String("foo/test.png"),
-	})
-	s.Require().NoError(err)
-	defer obj.Body.Close()
-
-	s.etag = *obj.ETag
-	s.lastModified = *obj.LastModified
-}
-
-func (s *S3TestSuite) TearDownSuite() {
-	s.server.Close()
-}
-
-func (s *S3TestSuite) TestRoundTripWithETagEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *S3TestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *S3TestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *S3TestSuite) TestRoundTripWithLastModifiedEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *S3TestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *S3TestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func TestS3Transport(t *testing.T) {
-	suite.Run(t, new(S3TestSuite))
-}

+ 30 - 129
storage/s3/s3.go → storage/s3/storage.go

@@ -3,12 +3,9 @@ package s3
 import (
 import (
 	"context"
 	"context"
 	"errors"
 	"errors"
-	"fmt"
 	"net/http"
 	"net/http"
-	"strconv"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
-	"time"
 
 
 	s3Crypto "github.com/aws/amazon-s3-encryption-client-go/v3/client"
 	s3Crypto "github.com/aws/amazon-s3-encryption-client-go/v3/client"
 	s3CryptoMaterials "github.com/aws/amazon-s3-encryption-client-go/v3/materials"
 	s3CryptoMaterials "github.com/aws/amazon-s3-encryption-client-go/v3/materials"
@@ -20,16 +17,10 @@ import (
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/aws/aws-sdk-go-v2/service/s3"
 	"github.com/aws/aws-sdk-go-v2/service/sts"
 	"github.com/aws/aws-sdk-go-v2/service/sts"
 
 
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/ierrors"
 	"github.com/imgproxy/imgproxy/v3/ierrors"
-	"github.com/imgproxy/imgproxy/v3/storage/common"
-	"github.com/imgproxy/imgproxy/v3/storage/response"
+	"github.com/imgproxy/imgproxy/v3/storage"
 )
 )
 
 
-type s3Client interface {
-	GetObject(ctx context.Context, input *s3.GetObjectInput, opts ...func(*s3.Options)) (*s3.GetObjectOutput, error)
-}
-
 // Storage implements S3 Storage
 // Storage implements S3 Storage
 type Storage struct {
 type Storage struct {
 	clientOptions []func(*s3.Options)
 	clientOptions []func(*s3.Options)
@@ -107,123 +98,6 @@ func New(config *Config, trans *http.Transport) (*Storage, error) {
 	}, nil
 	}, nil
 }
 }
 
 
-// GetObject retrieves an object from Azure cloud
-func (s *Storage) GetObject(
-	ctx context.Context,
-	reqHeader http.Header,
-	bucket, key, query string,
-) (*response.Object, error) {
-	// If either bucket or object key is empty, return 404
-	if len(bucket) == 0 || len(key) == 0 {
-		return response.NewNotFound(
-			"invalid S3 Storage URL: bucket name or object key are empty",
-		), nil
-	}
-
-	// Check if access to the container is allowed
-	if !common.IsBucketAllowed(bucket, s.config.AllowedBuckets, s.config.DeniedBuckets) {
-		return nil, fmt.Errorf("access to the S3 bucket %s is denied", bucket)
-	}
-
-	input := &s3.GetObjectInput{
-		Bucket: aws.String(bucket),
-		Key:    aws.String(key),
-	}
-
-	if len(query) > 0 {
-		input.VersionId = aws.String(query)
-	}
-
-	if r := reqHeader.Get(httpheaders.Range); len(r) != 0 {
-		input.Range = aws.String(r)
-	} else {
-		if ifNoneMatch := reqHeader.Get(httpheaders.IfNoneMatch); len(ifNoneMatch) > 0 {
-			input.IfNoneMatch = aws.String(ifNoneMatch)
-		}
-
-		if ifModifiedSince := reqHeader.Get(httpheaders.IfModifiedSince); len(ifModifiedSince) > 0 {
-			parsedIfModifiedSince, err := time.Parse(http.TimeFormat, ifModifiedSince)
-			if err == nil {
-				input.IfModifiedSince = &parsedIfModifiedSince
-			}
-		}
-	}
-
-	client := s.getBucketClient(bucket)
-
-	output, err := client.GetObject(ctx, input)
-
-	defer func() {
-		if err != nil && output != nil && output.Body != nil {
-			output.Body.Close()
-		}
-	}()
-
-	if err != nil {
-		// Check if the error is the region mismatch error.
-		// If so, create a new client with the correct region and retry the request.
-		if region := regionFromError(err); len(region) != 0 {
-			client, err = s.createBucketClient(bucket, region)
-			if err != nil {
-				return handleError(err)
-			}
-
-			output, err = client.GetObject(ctx, input)
-		}
-	}
-
-	if err != nil {
-		return handleError(err)
-	}
-
-	contentLength := int64(-1)
-	if output.ContentLength != nil {
-		contentLength = *output.ContentLength
-	}
-
-	if s.config.DecryptionClientEnabled {
-		if unencryptedContentLength := output.Metadata["X-Amz-Meta-X-Amz-Unencrypted-Content-Length"]; len(unencryptedContentLength) != 0 {
-			cl, err := strconv.ParseInt(unencryptedContentLength, 10, 64)
-			if err != nil {
-				return handleError(err)
-			}
-			contentLength = cl
-		}
-	}
-
-	header := make(http.Header)
-	if contentLength > 0 {
-		header.Set(httpheaders.ContentLength, strconv.FormatInt(contentLength, 10))
-	}
-	if output.ContentType != nil {
-		header.Set(httpheaders.ContentType, *output.ContentType)
-	}
-	if output.ContentEncoding != nil {
-		header.Set(httpheaders.ContentEncoding, *output.ContentEncoding)
-	}
-	if output.CacheControl != nil {
-		header.Set(httpheaders.CacheControl, *output.CacheControl)
-	}
-	if output.ExpiresString != nil {
-		header.Set(httpheaders.Expires, *output.ExpiresString)
-	}
-	if output.ETag != nil {
-		header.Set(httpheaders.Etag, *output.ETag)
-	}
-	if output.LastModified != nil {
-		header.Set(httpheaders.LastModified, output.LastModified.Format(http.TimeFormat))
-	}
-	if output.AcceptRanges != nil {
-		header.Set(httpheaders.AcceptRanges, *output.AcceptRanges)
-	}
-	if output.ContentRange != nil {
-		header.Set(httpheaders.ContentRange, *output.ContentRange)
-		return response.NewPartialContent(header, output.Body), nil
-	}
-
-	return response.NewOK(header, output.Body), nil
-}
-
 func (t *Storage) getBucketClient(bucket string) s3Client {
 func (t *Storage) getBucketClient(bucket string) s3Client {
 	var client s3Client
 	var client s3Client
 
 
@@ -299,7 +173,7 @@ func regionFromError(err error) string {
 	return rerr.Response.Header.Get("X-Amz-Bucket-Region")
 	return rerr.Response.Header.Get("X-Amz-Bucket-Region")
 }
 }
 
 
-func handleError(err error) (*response.Object, error) {
+func handleError(err error) (*storage.ObjectReader, error) {
 	var rerr *awsHttp.ResponseError
 	var rerr *awsHttp.ResponseError
 	if !errors.As(err, &rerr) {
 	if !errors.As(err, &rerr) {
 		return nil, ierrors.Wrap(err, 0)
 		return nil, ierrors.Wrap(err, 0)
@@ -309,5 +183,32 @@ func handleError(err error) (*response.Object, error) {
 		return nil, ierrors.Wrap(err, 0)
 		return nil, ierrors.Wrap(err, 0)
 	}
 	}
 
 
-	return response.NewError(rerr.Response.StatusCode, err.Error()), nil
+	return storage.NewObjectError(rerr.Response.StatusCode, err.Error()), nil
+}
+
+// callWithClient is a helper function to call S3 client method with automatic region
+// error handling
+func callWithClient[T any](s *Storage, bucket string, fn func(client s3Client) (*T, error)) (*T, s3Client, error) {
+	client := s.getBucketClient(bucket)
+
+	r, err := fn(client)
+
+	if err != nil {
+		// Check if the error is the region mismatch error.
+		// If so, create a new client with the correct region and retry the request.
+		if region := regionFromError(err); len(region) != 0 {
+			client, err = s.createBucketClient(bucket, region)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			r, err = fn(client)
+		}
+	}
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return r, client, nil
 }
 }

+ 108 - 0
storage/s3/test_storage.go

@@ -0,0 +1,108 @@
+package s3
+
+import (
+	"context"
+	"net/http"
+	"net/http/httptest"
+	"os"
+
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/johannesboyne/gofakes3"
+	"github.com/johannesboyne/gofakes3/backend/s3mem"
+)
+
+// TestServer is a mock S3 server for testing
+type TestServer struct {
+	server  *httptest.Server
+	backend *s3mem.Backend
+}
+
+// Backend returns the underlying s3mem.Backend for direct API access
+func (s *TestServer) Backend() *s3mem.Backend {
+	return s.backend
+}
+
+// s3StorageWrapper wraps the storage and optionally holds a server for cleanup
+type s3StorageWrapper struct {
+	*Storage
+	server      *TestServer
+	shouldClose bool
+}
+
+// Server returns the underlying S3Server
+func (w *s3StorageWrapper) Server() *TestServer {
+	return w.server
+}
+
+// Sugar alias
+type LazySuiteStorage = testutil.LazyObj[*s3StorageWrapper]
+
+// NewLazySuiteStorage creates a lazy S3 Storage object for use in test suites
+// A new server will be created internally and cleaned up automatically
+func NewLazySuiteStorage(
+	l testutil.LazySuiteFrom,
+) (testutil.LazyObj[*s3StorageWrapper], context.CancelFunc) {
+	return testutil.NewLazySuiteObj(
+		l,
+		func() (*s3StorageWrapper, error) {
+			wrapper := &s3StorageWrapper{}
+
+			// Create server internally
+			s3Server := NewS3Server()
+			wrapper.server = s3Server
+			wrapper.shouldClose = true
+
+			// Create bucket first using backend directly
+			err := s3Server.backend.CreateBucket("test-container")
+			if err != nil {
+				return nil, err
+			}
+
+			os.Setenv("AWS_ACCESS_KEY_ID", "TEST")
+			os.Setenv("AWS_SECRET_ACCESS_KEY", "TEST")
+			os.Setenv("AWS_REGION", "us-east-1")
+
+			config := NewDefaultConfig()
+			config.Endpoint = s3Server.URL()
+			config.Region = "us-east-1"
+			config.EndpointUsePathStyle = true
+
+			storage, err := New(&config, http.DefaultTransport.(*http.Transport))
+			if err != nil {
+				return nil, err
+			}
+
+			wrapper.Storage = storage
+			return wrapper, nil
+		},
+		func(w *s3StorageWrapper) error {
+			// Clean up internal server if we created it
+			if w.shouldClose {
+				w.server.Close()
+			}
+			return nil
+		},
+	)
+}
+
+// NewS3Server creates and starts a new mock S3 server
+func NewS3Server() *TestServer {
+	backend := s3mem.New()
+	faker := gofakes3.New(backend)
+	server := httptest.NewServer(faker.Server())
+
+	return &TestServer{
+		server:  server,
+		backend: backend,
+	}
+}
+
+// Close stops the server
+func (s *TestServer) Close() {
+	s.server.Close()
+}
+
+// URL returns the server URL
+func (s *TestServer) URL() string {
+	return s.server.URL
+}

+ 9 - 48
storage/swift/swift.go → storage/swift/reader.go

@@ -9,56 +9,17 @@ import (
 	"github.com/ncw/swift/v2"
 	"github.com/ncw/swift/v2"
 
 
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
 	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
 	"github.com/imgproxy/imgproxy/v3/storage/common"
-	"github.com/imgproxy/imgproxy/v3/storage/response"
 )
 )
 
 
-// Storage implements Openstack Swift storage.
-type Storage struct {
-	config     *Config
-	connection *swift.Connection
-}
-
-// New creates a new Swift storage with the provided configuration.
-func New(
-	ctx context.Context,
-	config *Config,
-	trans *http.Transport,
-) (*Storage, error) {
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	c := &swift.Connection{
-		UserName:       config.Username,
-		ApiKey:         config.APIKey,
-		AuthUrl:        config.AuthURL,
-		AuthVersion:    config.AuthVersion,
-		Domain:         config.Domain, // v3 auth only
-		Tenant:         config.Tenant, // v2 auth only
-		Timeout:        config.Timeout,
-		ConnectTimeout: config.ConnectTimeout,
-		Transport:      trans,
-	}
-
-	err := c.Authenticate(ctx)
-	if err != nil {
-		return nil, fmt.Errorf("swift authentication failed: %v", err)
-	}
-
-	return &Storage{
-		config:     config,
-		connection: c,
-	}, nil
-}
-
 // GetObject retrieves an object from Swift storage.
 // GetObject retrieves an object from Swift storage.
 func (s *Storage) GetObject(
 func (s *Storage) GetObject(
 	ctx context.Context, reqHeader http.Header, bucket, name, _ string,
 	ctx context.Context, reqHeader http.Header, bucket, name, _ string,
-) (*response.Object, error) {
+) (*storage.ObjectReader, error) {
 	// If either bucket or object key is empty, return 404
 	// If either bucket or object key is empty, return 404
 	if len(bucket) == 0 || len(name) == 0 {
 	if len(bucket) == 0 || len(name) == 0 {
-		return response.NewNotFound(
+		return storage.NewObjectNotFound(
 			"invalid Swift URL: bucket name or object name are empty",
 			"invalid Swift URL: bucket name or object name are empty",
 		), nil
 		), nil
 	}
 	}
@@ -84,7 +45,7 @@ func (s *Storage) GetObject(
 	}
 	}
 
 
 	// Fetch the object from Swift
 	// Fetch the object from Swift
-	object, objectHeaders, err := s.connection.ObjectOpen(ctx, bucket, name, false, h)
+	obj, objectHeaders, err := s.connection.ObjectOpen(ctx, bucket, name, false, h)
 
 
 	// Convert Swift response headers to normal headers (if any)
 	// Convert Swift response headers to normal headers (if any)
 	header := make(http.Header)
 	header := make(http.Header)
@@ -95,12 +56,12 @@ func (s *Storage) GetObject(
 	if err != nil {
 	if err != nil {
 		// Handle not found errors gracefully
 		// Handle not found errors gracefully
 		if errors.Is(err, swift.ObjectNotFound) || errors.Is(err, swift.ContainerNotFound) {
 		if errors.Is(err, swift.ObjectNotFound) || errors.Is(err, swift.ContainerNotFound) {
-			return response.NewNotFound(err.Error()), nil
+			return storage.NewObjectNotFound(err.Error()), nil
 		}
 		}
 
 
 		// Same for NotModified
 		// Same for NotModified
 		if errors.Is(err, swift.NotModified) {
 		if errors.Is(err, swift.NotModified) {
-			return response.NewNotModified(header), nil
+			return storage.NewObjectNotModified(header), nil
 		}
 		}
 
 
 		return nil, fmt.Errorf("error opening swift object: %v", err)
 		return nil, fmt.Errorf("error opening swift object: %v", err)
@@ -112,9 +73,9 @@ func (s *Storage) GetObject(
 	// By default, Swift storage handles this.
 	// By default, Swift storage handles this.
 	// Just in case, let's double check.
 	// Just in case, let's double check.
 	if !partial && common.IsNotModified(reqHeader, header) {
 	if !partial && common.IsNotModified(reqHeader, header) {
-		object.Close()
-		return response.NewNotModified(header), nil
+		obj.Close()
+		return storage.NewObjectNotModified(header), nil
 	}
 	}
 
 
-	return response.NewOK(header, object), nil
+	return storage.NewObjectOK(header, obj), nil
 }
 }

+ 51 - 0
storage/swift/reader_test.go

@@ -0,0 +1,51 @@
+package swift
+
+import (
+	"testing"
+
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/storage/testsuite"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/stretchr/testify/suite"
+)
+
+type ReaderTestSuite struct {
+	testsuite.ReaderSuite
+
+	swiftStorage testutil.LazyObj[*swiftStorageWrapper]
+}
+
+func (s *ReaderTestSuite) SetupSuite() {
+	s.ReaderSuite.SetupSuite()
+
+	s.TestContainer = "test-container"
+	s.TestObjectKey = "test-object.txt"
+
+	// Initialize Swift storage
+	s.swiftStorage, _ = NewLazySuiteStorage(s.Lazy())
+
+	// Swift test storage returns 200 for range requests
+	// We have to skip partial content checks
+	s.SkipPartialContentChecks = true
+
+	s.Storage, _ = testutil.NewLazySuiteObj(s,
+		func() (storage.Reader, error) {
+			return s.swiftStorage().Storage, nil
+		},
+	)
+}
+
+func (s *ReaderTestSuite) SetupTest() {
+	// Recreate Swift blob for each test
+	conn := s.swiftStorage().Connection()
+	f, err := conn.ObjectCreate(s.T().Context(), s.TestContainer, s.TestObjectKey, true, "", "application/octet-stream", nil)
+	s.Require().NoError(err)
+	n, err := f.Write(s.TestData)
+	s.Require().Len(s.TestData, n)
+	s.Require().NoError(err)
+	f.Close()
+}
+
+func TestReader(t *testing.T) {
+	suite.Run(t, new(ReaderTestSuite))
+}

+ 48 - 0
storage/swift/storage.go

@@ -0,0 +1,48 @@
+package swift
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+
+	"github.com/ncw/swift/v2"
+)
+
+// Storage implements Openstack Swift storage.
+type Storage struct {
+	config     *Config
+	connection *swift.Connection
+}
+
+// New creates a new Swift storage with the provided configuration.
+func New(
+	ctx context.Context,
+	config *Config,
+	trans *http.Transport,
+) (*Storage, error) {
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	c := &swift.Connection{
+		UserName:       config.Username,
+		ApiKey:         config.APIKey,
+		AuthUrl:        config.AuthURL,
+		AuthVersion:    config.AuthVersion,
+		Domain:         config.Domain, // v3 auth only
+		Tenant:         config.Tenant, // v2 auth only
+		Timeout:        config.Timeout,
+		ConnectTimeout: config.ConnectTimeout,
+		Transport:      trans,
+	}
+
+	err := c.Authenticate(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("swift authentication failed: %v", err)
+	}
+
+	return &Storage{
+		config:     config,
+		connection: c,
+	}, nil
+}

+ 0 - 193
storage/swift/swift_test.go

@@ -1,193 +0,0 @@
-package swift
-
-import (
-	"context"
-	"net/http"
-	"testing"
-	"time"
-
-	"github.com/ncw/swift/v2"
-	"github.com/ncw/swift/v2/swifttest"
-	"github.com/stretchr/testify/suite"
-
-	"github.com/imgproxy/imgproxy/v3/fetcher/transport/generichttp"
-	"github.com/imgproxy/imgproxy/v3/httpheaders"
-	"github.com/imgproxy/imgproxy/v3/storage"
-)
-
-const (
-	testContainer = "test"
-	testObject    = "foo/test.png"
-)
-
-type SwiftTestSuite struct {
-	suite.Suite
-	server       *swifttest.SwiftServer
-	storage      storage.Reader
-	etag         string
-	lastModified time.Time
-}
-
-func (s *SwiftTestSuite) SetupSuite() {
-	s.server, _ = swifttest.NewSwiftServer("localhost")
-
-	config := NewDefaultConfig()
-
-	config.AuthURL = s.server.AuthURL
-	config.Username = swifttest.TEST_ACCOUNT
-	config.APIKey = swifttest.TEST_ACCOUNT
-	config.AuthVersion = 1
-
-	s.setupTestFile(&config)
-
-	c := generichttp.NewDefaultConfig()
-	c.IgnoreSslVerification = true
-
-	trans, err := generichttp.New(false, &c)
-	s.Require().NoError(err)
-
-	s.storage, err = New(s.T().Context(), &config, trans)
-	s.Require().NoError(err, "failed to initialize swift transport")
-}
-
-func (s *SwiftTestSuite) setupTestFile(config *Config) {
-	c := &swift.Connection{
-		UserName:    config.Username,
-		ApiKey:      config.APIKey,
-		AuthUrl:     config.AuthURL,
-		AuthVersion: config.AuthVersion,
-	}
-
-	ctx := context.Background()
-
-	err := c.Authenticate(ctx)
-	s.Require().NoError(err, "failed to authenticate with test server")
-
-	err = c.ContainerCreate(ctx, testContainer, nil)
-	s.Require().NoError(err, "failed to create container")
-
-	f, err := c.ObjectCreate(ctx, testContainer, testObject, true, "", "image/png", nil)
-	s.Require().NoError(err, "failed to create object")
-
-	defer f.Close()
-
-	data := make([]byte, 32)
-
-	n, err := f.Write(data)
-	s.Require().Len(data, n)
-	s.Require().NoError(err)
-
-	f.Close()
-	// The Etag is written on file close; but Last-Modified is only available when we get the object again.
-	_, h, err := c.Object(ctx, testContainer, testObject)
-	s.Require().NoError(err)
-	s.etag = h["Etag"]
-	s.lastModified, err = time.Parse(http.TimeFormat, h["Date"])
-	s.Require().NoError(err)
-}
-
-func (s *SwiftTestSuite) TearDownSuite() {
-	s.server.Close()
-}
-
-func (s *SwiftTestSuite) TestRoundTripReturns404WhenObjectNotFound() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/not-here.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(404, response.Status)
-}
-
-func (s *SwiftTestSuite) TestRoundTripReturns404WhenContainerNotFound() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "invalid", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(404, response.Status)
-}
-
-func (s *SwiftTestSuite) TestRoundTripWithETagEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.etag, response.Headers.Get(httpheaders.Etag))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *SwiftTestSuite) TestRoundTripWithIfNoneMatchReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *SwiftTestSuite) TestRoundTripWithUpdatedETagReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfNoneMatch, s.etag+"_wrong")
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *SwiftTestSuite) TestRoundTripWithLastModifiedEnabled() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(200, response.Status)
-	s.Require().Equal(s.lastModified.Format(http.TimeFormat), response.Headers.Get(httpheaders.LastModified))
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func (s *SwiftTestSuite) TestRoundTripWithIfModifiedSinceReturns304() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusNotModified, response.Status)
-
-	if response.Body != nil {
-		response.Body.Close()
-	}
-}
-
-func (s *SwiftTestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() {
-	ctx := s.T().Context()
-	reqHeader := make(http.Header)
-	reqHeader.Set(httpheaders.IfModifiedSince, s.lastModified.Add(-24*time.Hour).Format(http.TimeFormat))
-
-	response, err := s.storage.GetObject(ctx, reqHeader, "test", "foo/test.png", "")
-	s.Require().NoError(err)
-	s.Require().Equal(http.StatusOK, response.Status)
-	s.Require().NotNil(response.Body)
-
-	response.Body.Close()
-}
-
-func TestSwiftTransport(t *testing.T) {
-	suite.Run(t, new(SwiftTestSuite))
-}

+ 129 - 0
storage/swift/test_storage.go

@@ -0,0 +1,129 @@
+package swift
+
+import (
+	"context"
+	"net/http"
+
+	"github.com/imgproxy/imgproxy/v3/testutil"
+	"github.com/ncw/swift/v2"
+	"github.com/ncw/swift/v2/swifttest"
+)
+
+// TestServer is a mock Swift server for testing
+type TestServer struct {
+	server     *swifttest.SwiftServer
+	connection *swift.Connection
+}
+
+// swiftStorageWrapper wraps the storage and optionally holds a server for cleanup
+type swiftStorageWrapper struct {
+	*Storage
+	server      *TestServer
+	connection  *swift.Connection
+	shouldClose bool
+}
+
+// Server returns the underlying SwiftServer
+func (w *swiftStorageWrapper) Server() *TestServer {
+	return w.server
+}
+
+// Connection returns the Swift connection for direct API access
+func (w *swiftStorageWrapper) Connection() *swift.Connection {
+	return w.connection
+}
+
+// Sugar alias
+type LazySuiteStorage = testutil.LazyObj[*swiftStorageWrapper]
+
+// NewLazySuiteStorage creates a lazy Swift Storage object for use in test suites
+// A new server will be created internally and cleaned up automatically
+func NewLazySuiteStorage(
+	l testutil.LazySuiteFrom,
+) (testutil.LazyObj[*swiftStorageWrapper], context.CancelFunc) {
+	return testutil.NewLazySuiteObj(
+		l,
+		func() (*swiftStorageWrapper, error) {
+			wrapper := &swiftStorageWrapper{}
+
+			// Create server internally
+			swiftServer, err := NewSwiftServer()
+			if err != nil {
+				return nil, err
+			}
+			wrapper.server = swiftServer
+			wrapper.shouldClose = true
+
+			// Create container first using connection directly
+			err = swiftServer.connection.ContainerCreate(context.Background(), "test-container", nil)
+			if err != nil {
+				return nil, err
+			}
+
+			config := NewDefaultConfig()
+			config.AuthURL = swiftServer.server.AuthURL
+			config.Username = swifttest.TEST_ACCOUNT
+			config.APIKey = swifttest.TEST_ACCOUNT
+			config.AuthVersion = 1
+
+			storage, err := New(l.Lazy().T().Context(), &config, http.DefaultTransport.(*http.Transport))
+			if err != nil {
+				return nil, err
+			}
+
+			wrapper.Storage = storage
+			wrapper.connection = swiftServer.connection
+			return wrapper, nil
+		},
+		func(w *swiftStorageWrapper) error {
+			// Clean up internal server if we created it
+			if w.shouldClose {
+				w.server.Close()
+			}
+			return nil
+		},
+	)
+}
+
+// NewSwiftServer creates and starts a new mock Swift server
+func NewSwiftServer() (*TestServer, error) {
+	server, err := swifttest.NewSwiftServer("localhost")
+	if err != nil {
+		return nil, err
+	}
+
+	// Create connection
+	conn := &swift.Connection{
+		UserName:    swifttest.TEST_ACCOUNT,
+		ApiKey:      swifttest.TEST_ACCOUNT,
+		AuthUrl:     server.AuthURL,
+		AuthVersion: 1,
+	}
+
+	// Authenticate
+	err = conn.Authenticate(context.Background())
+	if err != nil {
+		server.Close()
+		return nil, err
+	}
+
+	return &TestServer{
+		server:     server,
+		connection: conn,
+	}, nil
+}
+
+// Connection returns the Swift connection
+func (s *TestServer) Connection() *swift.Connection {
+	return s.connection
+}
+
+// Close stops the server
+func (s *TestServer) Close() {
+	s.server.Close()
+}
+
+// URL returns the server auth URL
+func (s *TestServer) URL() string {
+	return s.server.AuthURL
+}

+ 218 - 0
storage/testsuite/reader.go

@@ -0,0 +1,218 @@
+package testsuite
+
+import (
+	"crypto/rand"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/imgproxy/imgproxy/v3/httpheaders"
+	"github.com/imgproxy/imgproxy/v3/storage"
+	"github.com/imgproxy/imgproxy/v3/testutil"
+)
+
+const (
+	testDataSize = 128
+)
+
+type ReaderSuite struct {
+	testutil.LazySuite
+
+	Storage       testutil.LazyObj[storage.Reader]
+	TestContainer string
+	TestObjectKey string
+	TestData      []byte
+
+	SkipPartialContentChecks bool
+}
+
+func (s *ReaderSuite) SetupSuite() {
+	// Generate random test data for content verification
+	s.TestData = make([]byte, testDataSize)
+	rand.Read(s.TestData)
+}
+
+// TestETagEnabled verifies that ETag header is returned in responses
+func (s *ReaderSuite) TestETagEnabled() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	s.Require().NotEmpty(response.Headers.Get(httpheaders.Etag))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+// TestIfNoneMatchReturns304 verifies that If-None-Match header causes 304 response when ETag matches
+func (s *ReaderSuite) TestIfNoneMatchReturns304() {
+	ctx := s.T().Context()
+
+	// First, get the ETag
+	reqHeader := make(http.Header)
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	etag := response.Headers.Get(httpheaders.Etag)
+	s.Require().NotEmpty(etag)
+	response.Body.Close()
+
+	// Now request with If-None-Match
+	reqHeader = make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, etag)
+
+	response, err = s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
+}
+
+// TestUpdatedETagReturns200 verifies that a wrong If-None-Match header returns 200
+func (s *ReaderSuite) TestUpdatedETagReturns200() {
+	ctx := s.T().Context()
+
+	// First, get the ETag
+	reqHeader := make(http.Header)
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	etag := response.Headers.Get(httpheaders.Etag)
+	s.Require().NotEmpty(etag)
+	response.Body.Close()
+
+	// Now request with wrong ETag
+	reqHeader = make(http.Header)
+	reqHeader.Set(httpheaders.IfNoneMatch, etag+"_wrong")
+
+	response, err = s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+// TestLastModifiedEnabled verifies that Last-Modified header is returned in responses
+func (s *ReaderSuite) TestLastModifiedEnabled() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	s.Require().NotEmpty(response.Headers.Get(httpheaders.LastModified))
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+// TestIfModifiedSinceReturns304 verifies that If-Modified-Since header causes 304 response when date matches
+func (s *ReaderSuite) TestIfModifiedSinceReturns304() {
+	ctx := s.T().Context()
+
+	// First, get the Last-Modified time
+	reqHeader := make(http.Header)
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	lastModified := response.Headers.Get(httpheaders.LastModified)
+	s.Require().NotEmpty(lastModified)
+	response.Body.Close()
+
+	// Now request with If-Modified-Since
+	reqHeader = make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, lastModified)
+
+	response, err = s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotModified, response.Status)
+
+	if response.Body != nil {
+		response.Body.Close()
+	}
+}
+
+// TestUpdatedLastModifiedReturns200 verifies that an older If-Modified-Since header returns 200
+func (s *ReaderSuite) TestUpdatedLastModifiedReturns200() {
+	ctx := s.T().Context()
+
+	// First, get the Last-Modified time
+	reqHeader := make(http.Header)
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(200, response.Status)
+	lastModifiedStr := response.Headers.Get(httpheaders.LastModified)
+	s.Require().NotEmpty(lastModifiedStr)
+	response.Body.Close()
+
+	lastModified, err := time.Parse(http.TimeFormat, lastModifiedStr)
+	s.Require().NoError(err)
+
+	// Now request with older If-Modified-Since
+	reqHeader = make(http.Header)
+	reqHeader.Set(httpheaders.IfModifiedSince, lastModified.Add(-time.Minute).Format(http.TimeFormat))
+
+	response, err = s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusOK, response.Status)
+	s.Require().NotNil(response.Body)
+
+	response.Body.Close()
+}
+
+// TestRangeRequest verifies that Range header returns partial content
+func (s *ReaderSuite) TestRangeRequest() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+	reqHeader.Set(httpheaders.Range, "bytes=10-19")
+
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, s.TestObjectKey, "")
+	s.Require().NoError(err)
+
+	if !s.SkipPartialContentChecks {
+		s.Require().Equal(http.StatusPartialContent, response.Status)
+
+		expectedRange := fmt.Sprintf("bytes 10-19/%d", len(s.TestData))
+		s.Require().Equal(expectedRange, response.Headers.Get(httpheaders.ContentRange))
+	}
+
+	s.Require().Equal("10", response.Headers.Get(httpheaders.ContentLength))
+	s.Require().NotNil(response.Body)
+
+	// Read and verify the actual content (bytes 10-19 from testData)
+	buf := make([]byte, 10)
+	n, _ := response.Body.Read(buf)
+	s.Require().Equal(10, n)
+	s.Require().Equal(s.TestData[10:20], buf)
+
+	response.Body.Close()
+}
+
+// TestObjectNotFound verifies that requesting a non-existent object returns 404
+func (s *ReaderSuite) TestObjectNotFound() {
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.Storage().GetObject(ctx, reqHeader, s.TestContainer, "nonexistent/object.png", "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotFound, response.Status)
+}
+
+// TestContainerNotFound verifies that requesting from a non-existent container returns 404
+func (s *ReaderSuite) TestContainerNotFound() {
+	if s.TestContainer == "" {
+		s.T().Skip("Test container is blank: skipping test")
+	}
+
+	ctx := s.T().Context()
+	reqHeader := make(http.Header)
+
+	response, err := s.Storage().GetObject(ctx, reqHeader, "nonexistent-container", s.TestObjectKey, "")
+	s.Require().NoError(err)
+	s.Require().Equal(http.StatusNotFound, response.Status)
+}