2023-06-28 01:21:33 +02:00
|
|
|
from typing import Any
|
2023-06-25 05:18:09 +02:00
|
|
|
|
2023-06-07 03:48:51 +02:00
|
|
|
from aiocache.backends.memory import SimpleMemoryCache
|
|
|
|
from aiocache.lock import OptimisticLock
|
2024-03-04 02:48:56 +02:00
|
|
|
from aiocache.plugins import TimingPlugin
|
2023-06-25 05:18:09 +02:00
|
|
|
|
2023-10-31 12:02:04 +02:00
|
|
|
from app.models import from_model_type
|
2024-06-07 05:09:47 +02:00
|
|
|
from app.models.base import InferenceModel
|
2023-10-31 12:02:04 +02:00
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
from ..schemas import ModelTask, ModelType, has_profiling
|
2023-06-07 03:48:51 +02:00
|
|
|
|
|
|
|
|
|
|
|
class ModelCache:
|
|
|
|
"""Fetches a model from an in-memory cache, instantiating it if it's missing."""
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
revalidate: bool = False,
|
|
|
|
timeout: int | None = None,
|
|
|
|
profiling: bool = False,
|
2023-09-09 11:02:44 +02:00
|
|
|
) -> None:
|
2023-06-07 03:48:51 +02:00
|
|
|
"""
|
|
|
|
Args:
|
|
|
|
revalidate: Resets TTL on cache hit. Useful to keep models in memory while active. Defaults to False.
|
|
|
|
timeout: Maximum allowed time for model to load. Disabled if None. Defaults to None.
|
|
|
|
profiling: Collects metrics for cache operations, adding slight overhead. Defaults to False.
|
|
|
|
"""
|
|
|
|
|
|
|
|
plugins = []
|
|
|
|
|
|
|
|
if profiling:
|
|
|
|
plugins.append(TimingPlugin())
|
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
self.should_revalidate = revalidate
|
2024-03-04 02:48:56 +02:00
|
|
|
|
|
|
|
self.cache = SimpleMemoryCache(timeout=timeout, plugins=plugins, namespace=None)
|
2023-06-07 03:48:51 +02:00
|
|
|
|
2024-06-07 05:09:47 +02:00
|
|
|
async def get(
|
|
|
|
self, model_name: str, model_type: ModelType, model_task: ModelTask, **model_kwargs: Any
|
|
|
|
) -> InferenceModel:
|
|
|
|
key = f"{model_name}{model_type}{model_task}"
|
2024-03-04 02:48:56 +02:00
|
|
|
|
2023-07-11 19:01:21 +02:00
|
|
|
async with OptimisticLock(self.cache, key) as lock:
|
2023-11-13 18:18:46 +02:00
|
|
|
model: InferenceModel | None = await self.cache.get(key)
|
2023-07-11 19:01:21 +02:00
|
|
|
if model is None:
|
2024-06-07 05:09:47 +02:00
|
|
|
model = from_model_type(model_name, model_type, model_task, **model_kwargs)
|
2024-03-04 02:48:56 +02:00
|
|
|
await lock.cas(model, ttl=model_kwargs.get("ttl", None))
|
2024-06-07 05:09:47 +02:00
|
|
|
elif self.should_revalidate:
|
2024-03-04 02:48:56 +02:00
|
|
|
await self.revalidate(key, model_kwargs.get("ttl", None))
|
2023-06-07 03:48:51 +02:00
|
|
|
return model
|
|
|
|
|
|
|
|
async def get_profiling(self) -> dict[str, float] | None:
|
2023-11-13 18:18:46 +02:00
|
|
|
if not has_profiling(self.cache):
|
2023-06-07 03:48:51 +02:00
|
|
|
return None
|
|
|
|
|
2023-11-13 18:18:46 +02:00
|
|
|
return self.cache.profiling
|
2023-06-07 03:48:51 +02:00
|
|
|
|
2024-03-04 02:48:56 +02:00
|
|
|
async def revalidate(self, key: str, ttl: int | None) -> None:
|
|
|
|
if ttl is not None and key in self.cache._handlers:
|
|
|
|
await self.cache.expire(key, ttl)
|