2023-08-25 06:28:51 +02:00
|
|
|
import asyncio
|
2023-11-17 04:42:44 +02:00
|
|
|
import gc
|
|
|
|
import os
|
2023-12-14 21:51:24 +02:00
|
|
|
import signal
|
2023-09-09 11:02:44 +02:00
|
|
|
import threading
|
2023-11-17 04:42:44 +02:00
|
|
|
import time
|
2023-08-25 06:28:51 +02:00
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
2024-01-13 07:00:09 +02:00
|
|
|
from contextlib import asynccontextmanager
|
2024-06-07 05:09:47 +02:00
|
|
|
from functools import partial
|
2024-01-18 07:08:48 +02:00
|
|
|
from typing import Any, AsyncGenerator, Callable, Iterator
|
2023-09-09 11:02:44 +02:00
|
|
|
from zipfile import BadZipFile
|
2023-06-07 03:48:51 +02:00
|
|
|
|
2023-08-29 15:58:00 +02:00
|
|
|
import orjson
|
2024-06-07 05:09:47 +02:00
|
|
|
from fastapi import Depends, FastAPI, File, Form, HTTPException
|
2024-10-14 00:00:21 +02:00
|
|
|
from fastapi.responses import ORJSONResponse, PlainTextResponse
|
2023-11-13 18:18:46 +02:00
|
|
|
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidProtobuf, NoSuchFile
|
2024-06-07 05:09:47 +02:00
|
|
|
from PIL.Image import Image
|
2024-08-14 17:20:12 +02:00
|
|
|
from pydantic import ValidationError
|
2023-08-29 15:58:00 +02:00
|
|
|
from starlette.formparsers import MultiPartParser
|
2023-06-25 05:18:09 +02:00
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
from app.models import get_model_deps
|
2023-08-25 06:28:51 +02:00
|
|
|
from app.models.base import InferenceModel
|
2024-06-07 05:09:47 +02:00
|
|
|
from app.models.transforms import decode_pil
|
2023-08-25 06:28:51 +02:00
|
|
|
|
2024-03-04 02:48:56 +02:00
|
|
|
from .config import PreloadModelData, log, settings
|
2023-06-25 05:18:09 +02:00
|
|
|
from .models.cache import ModelCache
|
|
|
|
from .schemas import (
|
2024-06-07 05:09:47 +02:00
|
|
|
InferenceEntries,
|
|
|
|
InferenceEntry,
|
|
|
|
InferenceResponse,
|
2024-07-10 16:20:43 +02:00
|
|
|
ModelFormat,
|
2024-06-07 05:09:47 +02:00
|
|
|
ModelIdentity,
|
|
|
|
ModelTask,
|
2023-06-25 05:18:09 +02:00
|
|
|
ModelType,
|
2024-06-07 05:09:47 +02:00
|
|
|
PipelineRequest,
|
|
|
|
T,
|
2023-06-05 16:40:48 +02:00
|
|
|
)
|
2023-06-18 05:49:19 +02:00
|
|
|
|
2023-11-20 17:05:35 +02:00
|
|
|
MultiPartParser.max_file_size = 2**26 # spools to disk if payload is 64 MiB or larger
|
2023-05-17 19:07:17 +02:00
|
|
|
|
2024-03-04 02:48:56 +02:00
|
|
|
model_cache = ModelCache(revalidate=settings.model_ttl > 0)
|
2023-12-14 21:51:24 +02:00
|
|
|
thread_pool: ThreadPoolExecutor | None = None
|
|
|
|
lock = threading.Lock()
|
|
|
|
active_requests = 0
|
|
|
|
last_called: float | None = None
|
2023-04-26 12:39:24 +02:00
|
|
|
|
2023-12-14 21:51:24 +02:00
|
|
|
|
2024-01-13 07:00:09 +02:00
|
|
|
@asynccontextmanager
|
|
|
|
async def lifespan(_: FastAPI) -> AsyncGenerator[None, None]:
|
2023-12-14 21:51:24 +02:00
|
|
|
global thread_pool
|
2023-08-30 10:22:01 +02:00
|
|
|
log.info(
|
|
|
|
(
|
|
|
|
"Created in-memory cache with unloading "
|
|
|
|
f"{f'after {settings.model_ttl}s of inactivity' if settings.model_ttl > 0 else 'disabled'}."
|
|
|
|
)
|
|
|
|
)
|
2024-01-13 07:25:26 +02:00
|
|
|
|
2024-01-13 07:00:09 +02:00
|
|
|
try:
|
|
|
|
if settings.request_threads > 0:
|
|
|
|
# asyncio is a huge bottleneck for performance, so we use a thread pool to run blocking code
|
|
|
|
thread_pool = ThreadPoolExecutor(settings.request_threads) if settings.request_threads > 0 else None
|
|
|
|
log.info(f"Initialized request thread pool with {settings.request_threads} threads.")
|
|
|
|
if settings.model_ttl > 0 and settings.model_ttl_poll_s > 0:
|
|
|
|
asyncio.ensure_future(idle_shutdown_task())
|
2024-03-04 02:48:56 +02:00
|
|
|
if settings.preload is not None:
|
|
|
|
await preload_models(settings.preload)
|
2024-01-13 07:00:09 +02:00
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
log.handlers.clear()
|
|
|
|
for model in model_cache.cache._cache.values():
|
|
|
|
del model
|
|
|
|
if thread_pool is not None:
|
|
|
|
thread_pool.shutdown()
|
|
|
|
gc.collect()
|
2023-12-14 21:51:24 +02:00
|
|
|
|
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
async def preload_models(preload: PreloadModelData) -> None:
|
|
|
|
log.info(f"Preloading models: {preload}")
|
|
|
|
if preload.clip is not None:
|
|
|
|
model = await model_cache.get(preload.clip, ModelType.TEXTUAL, ModelTask.SEARCH)
|
|
|
|
await load(model)
|
|
|
|
|
|
|
|
model = await model_cache.get(preload.clip, ModelType.VISUAL, ModelTask.SEARCH)
|
|
|
|
await load(model)
|
|
|
|
|
|
|
|
if preload.facial_recognition is not None:
|
|
|
|
model = await model_cache.get(preload.facial_recognition, ModelType.DETECTION, ModelTask.FACIAL_RECOGNITION)
|
|
|
|
await load(model)
|
|
|
|
|
|
|
|
model = await model_cache.get(preload.facial_recognition, ModelType.RECOGNITION, ModelTask.FACIAL_RECOGNITION)
|
|
|
|
await load(model)
|
2024-03-04 02:48:56 +02:00
|
|
|
|
|
|
|
|
2023-12-14 21:51:24 +02:00
|
|
|
def update_state() -> Iterator[None]:
|
|
|
|
global active_requests, last_called
|
|
|
|
active_requests += 1
|
|
|
|
last_called = time.time()
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
active_requests -= 1
|
2023-06-25 05:18:09 +02:00
|
|
|
|
2023-06-18 05:49:19 +02:00
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
def get_entries(entries: str = Form()) -> InferenceEntries:
|
|
|
|
try:
|
|
|
|
request: PipelineRequest = orjson.loads(entries)
|
|
|
|
without_deps: list[InferenceEntry] = []
|
|
|
|
with_deps: list[InferenceEntry] = []
|
|
|
|
for task, types in request.items():
|
|
|
|
for type, entry in types.items():
|
|
|
|
parsed: InferenceEntry = {
|
|
|
|
"name": entry["modelName"],
|
|
|
|
"task": task,
|
|
|
|
"type": type,
|
|
|
|
"options": entry.get("options", {}),
|
|
|
|
}
|
|
|
|
dep = get_model_deps(parsed["name"], type, task)
|
|
|
|
(with_deps if dep else without_deps).append(parsed)
|
|
|
|
return without_deps, with_deps
|
|
|
|
except (orjson.JSONDecodeError, ValidationError, KeyError, AttributeError) as e:
|
|
|
|
log.error(f"Invalid request format: {e}")
|
|
|
|
raise HTTPException(422, "Invalid request format.")
|
|
|
|
|
|
|
|
|
2024-01-13 07:00:09 +02:00
|
|
|
app = FastAPI(lifespan=lifespan)
|
|
|
|
|
|
|
|
|
2024-10-14 00:00:21 +02:00
|
|
|
@app.get("/")
|
|
|
|
async def root() -> ORJSONResponse:
|
|
|
|
return ORJSONResponse({"message": "Immich ML"})
|
2023-04-26 12:39:24 +02:00
|
|
|
|
|
|
|
|
2024-10-14 00:00:21 +02:00
|
|
|
@app.get("/ping")
|
|
|
|
def ping() -> PlainTextResponse:
|
|
|
|
return PlainTextResponse("pong")
|
2023-02-18 17:13:37 +02:00
|
|
|
|
2023-06-03 04:42:47 +02:00
|
|
|
|
2023-12-14 21:51:24 +02:00
|
|
|
@app.post("/predict", dependencies=[Depends(update_state)])
|
2023-08-29 15:58:00 +02:00
|
|
|
async def predict(
|
2024-06-07 05:09:47 +02:00
|
|
|
entries: InferenceEntries = Depends(get_entries),
|
|
|
|
image: bytes | None = File(default=None),
|
2023-08-29 15:58:00 +02:00
|
|
|
text: str | None = Form(default=None),
|
|
|
|
) -> Any:
|
|
|
|
if image is not None:
|
2024-06-07 05:09:47 +02:00
|
|
|
inputs: Image | str = await run(lambda: decode_pil(image))
|
2023-08-29 15:58:00 +02:00
|
|
|
elif text is not None:
|
|
|
|
inputs = text
|
|
|
|
else:
|
|
|
|
raise HTTPException(400, "Either image or text must be provided")
|
2024-06-07 05:09:47 +02:00
|
|
|
response = await run_inference(inputs, entries)
|
|
|
|
return ORJSONResponse(response)
|
|
|
|
|
|
|
|
|
|
|
|
async def run_inference(payload: Image | str, entries: InferenceEntries) -> InferenceResponse:
|
|
|
|
outputs: dict[ModelIdentity, Any] = {}
|
|
|
|
response: InferenceResponse = {}
|
|
|
|
|
|
|
|
async def _run_inference(entry: InferenceEntry) -> None:
|
|
|
|
model = await model_cache.get(entry["name"], entry["type"], entry["task"], ttl=settings.model_ttl)
|
|
|
|
inputs = [payload]
|
|
|
|
for dep in model.depends:
|
|
|
|
try:
|
|
|
|
inputs.append(outputs[dep])
|
|
|
|
except KeyError:
|
|
|
|
message = f"Task {entry['task']} of type {entry['type']} depends on output of {dep}"
|
|
|
|
raise HTTPException(400, message)
|
|
|
|
model = await load(model)
|
|
|
|
output = await run(model.predict, *inputs, **entry["options"])
|
|
|
|
outputs[model.identity] = output
|
|
|
|
response[entry["task"]] = output
|
|
|
|
|
|
|
|
without_deps, with_deps = entries
|
|
|
|
await asyncio.gather(*[_run_inference(entry) for entry in without_deps])
|
|
|
|
if with_deps:
|
|
|
|
await asyncio.gather(*[_run_inference(entry) for entry in with_deps])
|
|
|
|
if isinstance(payload, Image):
|
|
|
|
response["imageHeight"], response["imageWidth"] = payload.height, payload.width
|
|
|
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
async def run(func: Callable[..., T], *args: Any, **kwargs: Any) -> T:
|
2023-12-14 21:51:24 +02:00
|
|
|
if thread_pool is None:
|
2024-06-07 05:09:47 +02:00
|
|
|
return func(*args, **kwargs)
|
|
|
|
partial_func = partial(func, *args, **kwargs)
|
|
|
|
return await asyncio.get_running_loop().run_in_executor(thread_pool, partial_func)
|
2023-09-09 11:02:44 +02:00
|
|
|
|
|
|
|
|
|
|
|
async def load(model: InferenceModel) -> InferenceModel:
|
|
|
|
if model.loaded:
|
|
|
|
return model
|
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
def _load(model: InferenceModel) -> InferenceModel:
|
2024-06-20 20:13:18 +02:00
|
|
|
if model.load_attempts > 1:
|
|
|
|
raise HTTPException(500, f"Failed to load model '{model.model_name}'")
|
2023-12-14 21:51:24 +02:00
|
|
|
with lock:
|
2024-07-10 16:20:43 +02:00
|
|
|
try:
|
|
|
|
model.load()
|
|
|
|
except FileNotFoundError as e:
|
|
|
|
if model.model_format == ModelFormat.ONNX:
|
|
|
|
raise e
|
|
|
|
log.exception(e)
|
|
|
|
log.warning(
|
|
|
|
f"{model.model_format.upper()} is available, but model '{model.model_name}' does not support it."
|
|
|
|
)
|
|
|
|
model.model_format = ModelFormat.ONNX
|
|
|
|
model.load()
|
2024-06-07 05:09:47 +02:00
|
|
|
return model
|
2023-09-09 11:02:44 +02:00
|
|
|
|
|
|
|
try:
|
2024-06-20 20:13:18 +02:00
|
|
|
return await run(_load, model)
|
2023-09-09 11:02:44 +02:00
|
|
|
except (OSError, InvalidProtobuf, BadZipFile, NoSuchFile):
|
2024-06-20 20:13:18 +02:00
|
|
|
log.warning(f"Failed to load {model.model_type.replace('_', ' ')} model '{model.model_name}'. Clearing cache.")
|
2023-09-09 11:02:44 +02:00
|
|
|
model.clear_cache()
|
2024-06-20 20:13:18 +02:00
|
|
|
return await run(_load, model)
|
2023-11-17 04:42:44 +02:00
|
|
|
|
|
|
|
|
|
|
|
async def idle_shutdown_task() -> None:
|
|
|
|
while True:
|
|
|
|
log.debug("Checking for inactivity...")
|
2023-12-14 21:51:24 +02:00
|
|
|
if (
|
|
|
|
last_called is not None
|
|
|
|
and not active_requests
|
|
|
|
and not lock.locked()
|
|
|
|
and time.time() - last_called > settings.model_ttl
|
|
|
|
):
|
2023-11-17 04:42:44 +02:00
|
|
|
log.info("Shutting down due to inactivity.")
|
2023-12-14 21:51:24 +02:00
|
|
|
os.kill(os.getpid(), signal.SIGINT)
|
|
|
|
break
|
2023-11-17 04:42:44 +02:00
|
|
|
await asyncio.sleep(settings.model_ttl_poll_s)
|