1
0
mirror of https://github.com/immich-app/immich.git synced 2024-11-27 09:21:05 +02:00
immich/machine-learning/locustfile.py
Mert 2b1b43a7e4
feat(ml): composable ml (#9973)
* modularize model classes

* various fixes

* expose port

* change response

* round coordinates

* simplify preload

* update server

* simplify interface

simplify

* update tests

* composable endpoint

* cleanup

fixes

remove unnecessary interface

support text input, cleanup

* ew camelcase

* update server

server fixes

fix typing

* ml fixes

update locustfile

fixes

* cleaner response

* better repo response

* update tests

formatting and typing

rename

* undo compose change

* linting

fix type

actually fix typing

* stricter typing

fix detection-only response

no need for defaultdict

* update spec file

update api

linting

* update e2e

* unnecessary dimension

* remove commented code

* remove duplicate code

* remove unused imports

* add batch dim
2024-06-07 03:09:47 +00:00

88 lines
2.9 KiB
Python

import json
from argparse import ArgumentParser
from io import BytesIO
from typing import Any
from locust import HttpUser, events, task
from locust.env import Environment
from PIL import Image
byte_image = BytesIO()
@events.init_command_line_parser.add_listener
def _(parser: ArgumentParser) -> None:
parser.add_argument("--clip-model", type=str, default="ViT-B-32::openai")
parser.add_argument("--face-model", type=str, default="buffalo_l")
parser.add_argument(
"--tag-min-score",
type=int,
default=0.0,
help="Returns all tags at or above this score. The default returns all tags.",
)
parser.add_argument(
"--face-min-score",
type=int,
default=0.034,
help=(
"Returns all faces at or above this score. The default returns 1 face per request; "
"setting this to 0 blows up the number of faces to the thousands."
),
)
parser.add_argument("--image-size", type=int, default=1000)
@events.test_start.add_listener
def on_test_start(environment: Environment, **kwargs: Any) -> None:
global byte_image
assert environment.parsed_options is not None
image = Image.new("RGB", (environment.parsed_options.image_size, environment.parsed_options.image_size))
image.save(byte_image, format="jpeg")
class InferenceLoadTest(HttpUser):
abstract: bool = True
host = "http://127.0.0.1:3003"
data: bytes
# re-use the image across all instances in a process
def on_start(self) -> None:
self.data = byte_image.getvalue()
class CLIPTextFormDataLoadTest(InferenceLoadTest):
@task
def encode_text(self) -> None:
request = {"clip": {"textual": {"modelName": self.environment.parsed_options.clip_model}}}
data = [("entries", json.dumps(request)), ("text", "test search query")]
self.client.post("/predict", data=data)
class CLIPVisionFormDataLoadTest(InferenceLoadTest):
@task
def encode_image(self) -> None:
request = {"clip": {"visual": {"modelName": self.environment.parsed_options.clip_model, "options": {}}}}
data = [("entries", json.dumps(request))]
files = {"image": self.data}
self.client.post("/predict", data=data, files=files)
class RecognitionFormDataLoadTest(InferenceLoadTest):
@task
def recognize(self) -> None:
request = {
"facial-recognition": {
"recognition": {
"modelName": self.environment.parsed_options.face_model,
"options": {"minScore": self.environment.parsed_options.face_min_score},
},
"detection": {
"modelName": self.environment.parsed_options.face_model,
},
}
}
data = [("entries", json.dumps(request))]
files = {"image": self.data}
self.client.post("/predict", data=data, files=files)