1
0
mirror of https://github.com/imgproxy/imgproxy.git synced 2025-12-07 23:32:55 +02:00

Rename concurrency to workers

This commit is contained in:
DarthSim
2023-08-15 19:54:42 +03:00
parent e172b14377
commit 07e34a45f2
9 changed files with 26 additions and 17 deletions

View File

@@ -3,6 +3,7 @@
## [Unreleased]
### Add
- Add [multi-region mode](https://docs.imgproxy.net/latest/serving_files_from_s3?id=multi-region-mode) to S3 integration.
- Add `IMGPROXY_WORKERS` alias for the `IMGPROXY_CONCURRENCY` config.
### Change
- Don't report `The image request is cancelled` errors.
@@ -10,8 +11,6 @@
### Fix
- Fix reporting image loading errors.
### Fix
- Fix the `Cache-Control` and `Expires` headers behavior when both `IMGPROXY_CACHE_CONTROL_PASSTHROUGH` and `IMGPROXY_FALLBACK_IMAGE_TTL` configs are set.
- (pro) Fix the `IMGPROXY_FALLBACK_IMAGE_TTL` config behavior when the `fallback_image_url` processing option is used.

View File

@@ -26,7 +26,7 @@ var (
KeepAliveTimeout int
ClientKeepAliveTimeout int
DownloadTimeout int
Concurrency int
Workers int
RequestsQueueSize int
MaxClients int
@@ -219,7 +219,7 @@ func Reset() {
KeepAliveTimeout = 10
ClientKeepAliveTimeout = 90
DownloadTimeout = 5
Concurrency = runtime.GOMAXPROCS(0) * 2
Workers = runtime.GOMAXPROCS(0) * 2
RequestsQueueSize = 0
MaxClients = 2048
@@ -400,7 +400,8 @@ func Configure() error {
configurators.Int(&KeepAliveTimeout, "IMGPROXY_KEEP_ALIVE_TIMEOUT")
configurators.Int(&ClientKeepAliveTimeout, "IMGPROXY_CLIENT_KEEP_ALIVE_TIMEOUT")
configurators.Int(&DownloadTimeout, "IMGPROXY_DOWNLOAD_TIMEOUT")
configurators.Int(&Concurrency, "IMGPROXY_CONCURRENCY")
configurators.Int(&Workers, "IMGPROXY_CONCURRENCY")
configurators.Int(&Workers, "IMGPROXY_WORKERS")
configurators.Int(&RequestsQueueSize, "IMGPROXY_REQUESTS_QUEUE_SIZE")
configurators.Int(&MaxClients, "IMGPROXY_MAX_CLIENTS")
@@ -625,8 +626,8 @@ func Configure() error {
return fmt.Errorf("Download timeout should be greater than 0, now - %d\n", DownloadTimeout)
}
if Concurrency <= 0 {
return fmt.Errorf("Concurrency should be greater than 0, now - %d\n", Concurrency)
if Workers <= 0 {
return fmt.Errorf("Workers number should be greater than 0, now - %d\n", Workers)
}
if RequestsQueueSize < 0 {
@@ -634,7 +635,7 @@ func Configure() error {
}
if MaxClients < 0 {
return fmt.Errorf("Concurrency should be greater than or equal 0, now - %d\n", MaxClients)
return fmt.Errorf("Max clients number should be greater than or equal 0, now - %d\n", MaxClients)
}
if TTL <= 0 {

View File

@@ -11,7 +11,7 @@ imgproxy sends the following metrics to CloudWatch:
* `RequestsInProgress`: the number of requests currently in progress
* `ImagesInProgress`: the number of images currently in progress
* `ConcurrencyUtilization`: the percentage of imgproxy's concurrency utilization. Calculated as `RequestsInProgress / IMGPROXY_CONCURRENCY * 100`
* `WorkersUtilization`, `ConcurrencyUtilization`: the percentage of imgproxy's workers utilization. Calculated as `RequestsInProgress / IMGPROXY_WORKERS * 100`
* `BufferSize`: a summary of the download buffers sizes (in bytes)
* `BufferDefaultSize`: calibrated default buffer size (in bytes)
* `BufferMaxSize`: calibrated maximum buffer size (in bytes)

View File

@@ -33,7 +33,7 @@ echo $(xxd -g 2 -l 64 -p /dev/random | tr -d '\n')
* `IMGPROXY_KEEP_ALIVE_TIMEOUT`: the maximum duration (in seconds) to wait for the next request before closing the connection. When set to `0`, keep-alive is disabled. Default: `10`
* `IMGPROXY_CLIENT_KEEP_ALIVE_TIMEOUT`: the maximum duration (in seconds) to wait for the next request before closing the HTTP client connection. The HTTP client is used to download source images. When set to `0`, keep-alive is disabled. Default: `90`
* `IMGPROXY_DOWNLOAD_TIMEOUT`: the maximum duration (in seconds) for downloading the source image. Default: `5`
* `IMGPROXY_CONCURRENCY`: the maximum number of image requests to be processed simultaneously. Requests that exceed this limit are put in the queue. Default: the number of CPU cores multiplied by two
* `IMGPROXY_WORKERS`: _(alias: `IMGPROXY_CONCURRENCY`)_ the maximum number of images an imgproxy instance can process simultaneously without creating a queue. Default: the number of CPU cores multiplied by two
* `IMGPROXY_REQUESTS_QUEUE_SIZE`: the maximum number of image requests that can be put in the queue. Requests that exceed this limit are rejected with `429` HTTP status. When set to `0`, the requests queue is unlimited. Default: `0`
* `IMGPROXY_MAX_CLIENTS`: the maximum number of simultaneous active connections. When set to `0`, connection limit is disabled. Default: `2048`
* `IMGPROXY_TTL`: a duration (in seconds) sent via the `Expires` and `Cache-Control: max-age` HTTP headers. Default: `31536000` (1 year)

View File

@@ -746,7 +746,7 @@ When set to `1`, `t` or `true`, imgproxy will respond with a raw unprocessed, an
* While the `skip_processing` option has some conditions to skip the processing, the `raw` option allows to skip processing no matter what
* With the `raw` option set, imgproxy doesn't check the source image's type, resolution, and file size. Basically, the `raw` option allows streaming of any file type
* With the `raw` option set, imgproxy won't download the whole image to the memory. Instead, it will stream the source image directly to the response lowering memory usage
* The requests with the `raw` option set are not limited by the `IMGPROXY_CONCURRENCY` config
* The requests with the `raw` option set are not limited by the `IMGPROXY_WORKERS` config
Default: `false`

View File

@@ -18,7 +18,7 @@ var ErrSourceImageTypeNotSupported = ierrors.New(422, "Source image type not sup
var downloadBufPool *bufpool.Pool
func initRead() {
downloadBufPool = bufpool.New("download", config.Concurrency, config.DownloadBufferSize)
downloadBufPool = bufpool.New("download", config.Workers, config.DownloadBufferSize)
}
func readAndCheckImage(r io.Reader, contentLength int, secopts security.Options) (*ImageData, error) {

View File

@@ -232,7 +232,16 @@ func runMetricsCollector() {
MetricName: aws.String("ConcurrencyUtilization"),
Unit: aws.String("Percent"),
Value: aws.Float64(
stats.RequestsInProgress() / float64(config.Concurrency) * 100.0,
stats.RequestsInProgress() / float64(config.Workers) * 100.0,
),
})
metrics = append(metrics, &cloudwatch.MetricDatum{
Dimensions: []*cloudwatch.Dimension{dimension},
MetricName: aws.String("WorkersUtilization"),
Unit: aws.String("Percent"),
Value: aws.Float64(
stats.RequestsInProgress() / float64(config.Workers) * 100.0,
),
})

View File

@@ -37,10 +37,10 @@ var (
func initProcessingHandler() {
if config.RequestsQueueSize > 0 {
queueSem = semaphore.New(config.RequestsQueueSize + config.Concurrency)
queueSem = semaphore.New(config.RequestsQueueSize + config.Workers)
}
processingSem = semaphore.New(config.Concurrency)
processingSem = semaphore.New(config.Workers)
vary := make([]string, 0)
@@ -282,7 +282,7 @@ func handleProcessing(reqID string, rw http.ResponseWriter, r *http.Request) {
}
}
// The heavy part start here, so we need to restrict concurrency
// The heavy part start here, so we need to restrict workers number
var processingSemToken *semaphore.Token
func() {
defer metrics.StartQueueSegment(ctx)()

View File

@@ -30,7 +30,7 @@ func New(verifyNetworks bool) (*http.Transport, error) {
Proxy: http.ProxyFromEnvironment,
DialContext: dialer.DialContext,
MaxIdleConns: 100,
MaxIdleConnsPerHost: config.Concurrency + 1,
MaxIdleConnsPerHost: config.Workers + 1,
IdleConnTimeout: time.Duration(config.ClientKeepAliveTimeout) * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,