diff --git a/config/config.go b/config/config.go index 1397546d..de072997 100644 --- a/config/config.go +++ b/config/config.go @@ -103,6 +103,7 @@ var ( S3Region string S3Endpoint string S3AssumeRoleArn string + S3MultiRegion bool GCSEnabled bool GCSKey string @@ -298,6 +299,7 @@ func Reset() { S3Region = "" S3Endpoint = "" S3AssumeRoleArn = "" + S3MultiRegion = false GCSEnabled = false GCSKey = "" ABSEnabled = false @@ -497,6 +499,7 @@ func Configure() error { configurators.String(&S3Region, "IMGPROXY_S3_REGION") configurators.String(&S3Endpoint, "IMGPROXY_S3_ENDPOINT") configurators.String(&S3AssumeRoleArn, "IMGPROXY_S3_ASSUME_ROLE_ARN") + configurators.Bool(&S3MultiRegion, "IMGPROXY_S3_MULTI_REGION") configurators.Bool(&GCSEnabled, "IMGPROXY_USE_GCS") configurators.String(&GCSKey, "IMGPROXY_GCS_KEY") diff --git a/docs/configuration.md b/docs/configuration.md index 1d8e11a7..c8b41f87 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -366,7 +366,9 @@ Check out the [Serving local files](serving_local_files.md) guide to learn more. imgproxy can process files from Amazon S3 buckets, but this feature is disabled by default. To enable it, set `IMGPROXY_USE_S3` to `true`: * `IMGPROXY_USE_S3`: when `true`, enables image fetching from Amazon S3 buckets. Default: `false` +* `IMGPROXY_S3_REGION`: an S3 buckets region * `IMGPROXY_S3_ENDPOINT`: a custom S3 endpoint to being used by imgproxy +* `IMGPROXY_S3_MULTI_REGION`: when `true`, allows using S3 buckets from different regions Check out the [Serving files from S3](serving_files_from_s3.md) guide to learn more. diff --git a/docs/serving_files_from_s3.md b/docs/serving_files_from_s3.md index 34874335..a45c630c 100644 --- a/docs/serving_files_from_s3.md +++ b/docs/serving_files_from_s3.md @@ -6,8 +6,9 @@ imgproxy can process images from S3 buckets. To use this feature, do the followi 2. [Set up the necessary credentials](#set-up-credentials) to grant access to your bucket. 3. _(optional)_ Specify the AWS region with `IMGPROXY_S3_REGION` or `AWS_REGION`. Default: `us-west-1` 4. _(optional)_ Specify the S3 endpoint with `IMGPROXY_S3_ENDPOINT`. -5. _(optional)_ Specify the AWS IAM Role to Assume with `IMGPROXY_S3_ASSUME_ROLE_ARN` -6. Use `s3://%bucket_name/%file_key` as the source image URL. +5. _(optional)_ Set the `IMGPROXY_S3_MULTI_REGION` environment variable to be `true`. +6. _(optional)_ Specify the AWS IAM Role to Assume with `IMGPROXY_S3_ASSUME_ROLE_ARN` +7. Use `s3://%bucket_name/%file_key` as the source image URL. If you need to specify the version of the source object, you can use the query string of the source URL: @@ -54,11 +55,17 @@ aws_secret_access_key = %secret_access_key S3 access credentials may be acquired by assuming a role using STS. To do so specify the IAM Role arn with the `IMGPROXY_S3_ASSUME_ROLE_ARN` environment variable. This approach still requires you to provide initial AWS credentials by using one of the ways described above. The provided credentials role should allow assuming the role with provided ARN. -## Minio +## Multi-Region mode -[Minio](https://github.com/minio/minio) is an object storage server released under Apache License v2.0. It is compatible with Amazon S3, so it can be used with imgproxy. +By default, imgproxy allows using S3 buckets located in a single region specified with `IMGPROXY_S3_REGION` or `AWS_REGION`. If your buckets are located in different regions, set `IMGPROXY_S3_MULTI_REGION` environment variable to be `true` to enable multi-region mode. In this mode, imgproxy will make an additional request to determine the bucket's region when the bucket is accessed for the first time. -To use Minio as source images provider, do the following: +In this mode, imgroxy uses a region specified with `IMGPROXY_S3_REGION` or `AWS_REGION` to determine the endpoint to which it should send the bucket's region determination request. Thus, it's a good idea to use one of these variables to specify a region closest to the imgproxy instance. + +## MinIO + +[MinIO](https://github.com/minio/minio) is an object storage server released under Apache License v2.0. It is compatible with Amazon S3, so it can be used with imgproxy. + +To use MinIO as source images provider, do the following: * Set up Amazon S3 support as usual using environment variables or a shared config file. * Specify an endpoint with `IMGPROXY_S3_ENDPOINT`. Use the `http://...` endpoint to disable SSL. diff --git a/transport/s3/s3.go b/transport/s3/s3.go index 3033d07a..60036985 100644 --- a/transport/s3/s3.go +++ b/transport/s3/s3.go @@ -1,10 +1,12 @@ package s3 import ( + "context" "fmt" "io" http "net/http" "strings" + "sync" "time" "github.com/aws/aws-sdk-go/aws" @@ -13,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/imgproxy/imgproxy/v3/config" defaultTransport "github.com/imgproxy/imgproxy/v3/transport" @@ -20,26 +23,28 @@ import ( // transport implements RoundTripper for the 's3' protocol. type transport struct { - svc *s3.S3 + session *session.Session + defaultClient *s3.S3 + + clientsByRegion map[string]*s3.S3 + clientsByBucket map[string]*s3.S3 + + mu sync.RWMutex } func New() (http.RoundTripper, error) { - s3Conf := aws.NewConfig() + conf := aws.NewConfig() trans, err := defaultTransport.New(false) if err != nil { return nil, err } - s3Conf.HTTPClient = &http.Client{Transport: trans} - - if len(config.S3Region) != 0 { - s3Conf.Region = aws.String(config.S3Region) - } + conf.HTTPClient = &http.Client{Transport: trans} if len(config.S3Endpoint) != 0 { - s3Conf.Endpoint = aws.String(config.S3Endpoint) - s3Conf.S3ForcePathStyle = aws.Bool(true) + conf.Endpoint = aws.String(config.S3Endpoint) + conf.S3ForcePathStyle = aws.Bool(true) } sess, err := session.NewSession() @@ -47,18 +52,35 @@ func New() (http.RoundTripper, error) { return nil, fmt.Errorf("Can't create S3 session: %s", err) } - if len(config.S3AssumeRoleArn) != 0 { - s3Conf.Credentials = stscreds.NewCredentials(sess, config.S3AssumeRoleArn) + if len(config.S3Region) != 0 { + sess.Config.Region = aws.String(config.S3Region) } if sess.Config.Region == nil || len(*sess.Config.Region) == 0 { sess.Config.Region = aws.String("us-west-1") } - return transport{s3.New(sess, s3Conf)}, nil + if len(config.S3AssumeRoleArn) != 0 { + conf.Credentials = stscreds.NewCredentials(sess, config.S3AssumeRoleArn) + } + + client := s3.New(sess, conf) + + clientRegion := "us-west-1" + if client.Config.Region != nil { + clientRegion = *client.Config.Region + } + + return &transport{ + session: sess, + defaultClient: client, + + clientsByRegion: map[string]*s3.S3{clientRegion: client}, + clientsByBucket: make(map[string]*s3.S3), + }, nil } -func (t transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { input := &s3.GetObjectInput{ Bucket: aws.String(req.URL.Host), Key: aws.String(req.URL.Path), @@ -86,7 +108,12 @@ func (t transport) RoundTrip(req *http.Request) (resp *http.Response, err error) } } - s3req, _ := t.svc.GetObjectRequest(input) + client, err := t.getClient(req.Context(), *input.Bucket) + if err != nil { + return handleError(req, err) + } + + s3req, _ := client.GetObjectRequest(input) s3req.SetContext(req.Context()) if err := s3req.Send(); err != nil { @@ -94,29 +121,81 @@ func (t transport) RoundTrip(req *http.Request) (resp *http.Response, err error) s3req.HTTPResponse.Body.Close() } - if s3err, ok := err.(awserr.Error); ok && s3err.Code() == request.CanceledErrorCode { - if e := s3err.OrigErr(); e != nil { - return nil, e - } - } - - if s3err, ok := err.(awserr.RequestFailure); !ok || s3err.StatusCode() < 100 || s3err.StatusCode() == 301 { - return nil, err - } else { - body := strings.NewReader(s3err.Message()) - return &http.Response{ - StatusCode: s3err.StatusCode(), - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{}, - ContentLength: int64(body.Len()), - Body: io.NopCloser(body), - Close: false, - Request: s3req.HTTPRequest, - }, nil - } + return handleError(req, err) } return s3req.HTTPResponse, nil } + +func (t *transport) getClient(ctx context.Context, bucket string) (*s3.S3, error) { + if !config.S3MultiRegion { + return t.defaultClient, nil + } + + var client *s3.S3 + + func() { + t.mu.RLock() + defer t.mu.RUnlock() + client = t.clientsByBucket[bucket] + }() + + if client != nil { + return client, nil + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Check again if someone did this before us + if client = t.clientsByBucket[bucket]; client != nil { + return client, nil + } + + region, err := s3manager.GetBucketRegionWithClient(ctx, t.defaultClient, bucket) + if err != nil { + return nil, err + } + + if client = t.clientsByRegion[region]; client != nil { + t.clientsByBucket[bucket] = client + return client, nil + } + + conf := t.defaultClient.Config.Copy() + conf.Region = aws.String(region) + + client = s3.New(t.session, conf) + + t.clientsByRegion[region] = client + t.clientsByBucket[bucket] = client + + return client, nil +} + +func handleError(req *http.Request, err error) (*http.Response, error) { + if s3err, ok := err.(awserr.Error); ok && s3err.Code() == request.CanceledErrorCode { + if e := s3err.OrigErr(); e != nil { + return nil, e + } + } + + s3err, ok := err.(awserr.RequestFailure) + if !ok || s3err.StatusCode() < 100 || s3err.StatusCode() == 301 { + return nil, err + } + + body := strings.NewReader(s3err.Message()) + + return &http.Response{ + StatusCode: s3err.StatusCode(), + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{}, + ContentLength: int64(body.Len()), + Body: io.NopCloser(body), + Close: false, + Request: req, + }, nil +} diff --git a/transport/s3/s3_test.go b/transport/s3/s3_test.go index 80b1b8d2..03259e59 100644 --- a/transport/s3/s3_test.go +++ b/transport/s3/s3_test.go @@ -2,6 +2,7 @@ package s3 import ( "bytes" + "context" "net/http" "net/http/httptest" "os" @@ -43,13 +44,13 @@ func (s *S3TestSuite) SetupSuite() { s.transport, err = New() require.Nil(s.T(), err) - svc := s.transport.(transport).svc - - _, err = svc.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String("test"), - }) + err = backend.CreateBucket("test") require.Nil(s.T(), err) + svc, err := s.transport.(*transport).getClient(context.Background(), "test") + require.Nil(s.T(), err) + require.NotNil(s.T(), svc) + _, err = svc.PutObject(&s3.PutObjectInput{ Body: bytes.NewReader(make([]byte, 32)), Bucket: aws.String("test"), @@ -70,6 +71,7 @@ func (s *S3TestSuite) SetupSuite() { func (s *S3TestSuite) TearDownSuite() { s.server.Close() + config.Reset() } func (s *S3TestSuite) TestRoundTripWithETagDisabledReturns200() { @@ -155,6 +157,15 @@ func (s *S3TestSuite) TestRoundTripWithUpdatedLastModifiedReturns200() { require.Equal(s.T(), http.StatusOK, response.StatusCode) } +func (s *S3TestSuite) TestRoundTripWithMultiregionEnabledReturns200() { + config.S3MultiRegion = true + request, _ := http.NewRequest("GET", "s3://test/foo/test.png", nil) + + response, err := s.transport.RoundTrip(request) + require.Nil(s.T(), err) + require.Equal(s.T(), 200, response.StatusCode) +} + func TestS3Transport(t *testing.T) { suite.Run(t, new(S3TestSuite)) }