2023-09-09 11:02:44 +02:00
|
|
|
import json
|
2023-08-25 06:28:51 +02:00
|
|
|
import pickle
|
2023-06-28 01:21:33 +02:00
|
|
|
from io import BytesIO
|
2023-10-31 12:02:04 +02:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Any, Callable
|
2023-06-28 01:21:33 +02:00
|
|
|
from unittest import mock
|
|
|
|
|
|
|
|
import cv2
|
2023-08-06 04:45:13 +02:00
|
|
|
import numpy as np
|
2024-01-22 01:22:39 +02:00
|
|
|
import onnxruntime as ort
|
2023-06-28 01:21:33 +02:00
|
|
|
import pytest
|
|
|
|
from fastapi.testclient import TestClient
|
|
|
|
from PIL import Image
|
2023-08-06 04:45:13 +02:00
|
|
|
from pytest_mock import MockerFixture
|
2023-06-28 01:21:33 +02:00
|
|
|
|
2024-01-22 01:22:39 +02:00
|
|
|
from .config import log, settings
|
2024-01-11 19:26:46 +02:00
|
|
|
from .models.base import InferenceModel, PicklableSessionOptions
|
2023-06-28 01:21:33 +02:00
|
|
|
from .models.cache import ModelCache
|
2023-10-31 12:02:04 +02:00
|
|
|
from .models.clip import OpenCLIPEncoder
|
2023-06-28 01:21:33 +02:00
|
|
|
from .models.facial_recognition import FaceRecognizer
|
2024-01-28 17:31:59 +02:00
|
|
|
from .schemas import ModelRuntime, ModelType
|
2023-06-28 01:21:33 +02:00
|
|
|
|
|
|
|
|
2024-01-22 01:22:39 +02:00
|
|
|
class TestBase:
|
|
|
|
CPU_EP = ["CPUExecutionProvider"]
|
|
|
|
CUDA_EP = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
|
|
|
OV_EP = ["OpenVINOExecutionProvider", "CPUExecutionProvider"]
|
|
|
|
CUDA_EP_OUT_OF_ORDER = ["CPUExecutionProvider", "CUDAExecutionProvider"]
|
|
|
|
TRT_EP = ["TensorrtExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider"]
|
|
|
|
|
|
|
|
@pytest.mark.providers(CPU_EP)
|
|
|
|
def test_sets_cpu_provider(self, providers: list[str]) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.providers == self.CPU_EP
|
|
|
|
|
|
|
|
@pytest.mark.providers(CUDA_EP)
|
|
|
|
def test_sets_cuda_provider_if_available(self, providers: list[str]) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.providers == self.CUDA_EP
|
|
|
|
|
|
|
|
@pytest.mark.providers(OV_EP)
|
|
|
|
def test_sets_openvino_provider_if_available(self, providers: list[str]) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.providers == self.OV_EP
|
|
|
|
|
|
|
|
@pytest.mark.providers(CUDA_EP_OUT_OF_ORDER)
|
|
|
|
def test_sets_providers_in_correct_order(self, providers: list[str]) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.providers == self.CUDA_EP
|
|
|
|
|
|
|
|
@pytest.mark.providers(TRT_EP)
|
|
|
|
def test_ignores_unsupported_providers(self, providers: list[str]) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.providers == self.CUDA_EP
|
|
|
|
|
|
|
|
def test_sets_provider_kwarg(self) -> None:
|
|
|
|
providers = ["CUDAExecutionProvider"]
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=providers)
|
|
|
|
|
|
|
|
assert encoder.providers == providers
|
|
|
|
|
|
|
|
def test_sets_default_provider_options(self) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"])
|
|
|
|
|
|
|
|
assert encoder.provider_options == [
|
|
|
|
{},
|
|
|
|
{"arena_extend_strategy": "kSameAsRequested"},
|
|
|
|
]
|
|
|
|
|
|
|
|
def test_sets_provider_options_kwarg(self) -> None:
|
|
|
|
encoder = OpenCLIPEncoder(
|
|
|
|
"ViT-B-32__openai",
|
|
|
|
providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"],
|
|
|
|
provider_options=[],
|
|
|
|
)
|
|
|
|
|
|
|
|
assert encoder.provider_options == []
|
|
|
|
|
|
|
|
def test_sets_default_sess_options(self) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.sess_options.execution_mode == ort.ExecutionMode.ORT_SEQUENTIAL
|
|
|
|
assert encoder.sess_options.inter_op_num_threads == 1
|
|
|
|
assert encoder.sess_options.intra_op_num_threads == 2
|
|
|
|
assert encoder.sess_options.enable_cpu_mem_arena is False
|
|
|
|
|
|
|
|
def test_sets_default_sess_options_does_not_set_threads_if_non_cpu_and_default_threads(self) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
|
|
|
|
|
|
|
assert encoder.sess_options.inter_op_num_threads == 0
|
|
|
|
assert encoder.sess_options.intra_op_num_threads == 0
|
|
|
|
|
|
|
|
def test_sets_default_sess_options_sets_threads_if_non_cpu_and_set_threads(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_settings = mocker.patch("app.models.base.settings", autospec=True)
|
|
|
|
mock_settings.model_inter_op_threads = 2
|
|
|
|
mock_settings.model_intra_op_threads = 4
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
|
|
|
|
|
|
|
assert encoder.sess_options.inter_op_num_threads == 2
|
|
|
|
assert encoder.sess_options.intra_op_num_threads == 4
|
|
|
|
|
|
|
|
def test_sets_sess_options_kwarg(self) -> None:
|
|
|
|
sess_options = ort.SessionOptions()
|
|
|
|
encoder = OpenCLIPEncoder(
|
|
|
|
"ViT-B-32__openai",
|
|
|
|
providers=["OpenVINOExecutionProvider", "CPUExecutionProvider"],
|
|
|
|
provider_options=[],
|
|
|
|
sess_options=sess_options,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert sess_options is encoder.sess_options
|
|
|
|
|
|
|
|
def test_sets_default_cache_dir(self) -> None:
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.cache_dir == Path("/cache/clip/ViT-B-32__openai")
|
|
|
|
|
|
|
|
def test_sets_cache_dir_kwarg(self) -> None:
|
|
|
|
cache_dir = Path("/test_cache")
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=cache_dir)
|
|
|
|
|
|
|
|
assert encoder.cache_dir == cache_dir
|
|
|
|
|
2024-01-28 17:31:59 +02:00
|
|
|
def test_sets_default_preferred_runtime(self, mocker: MockerFixture) -> None:
|
|
|
|
mocker.patch.object(settings, "ann", True)
|
|
|
|
mocker.patch("ann.ann.is_available", False)
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.preferred_runtime == ModelRuntime.ONNX
|
|
|
|
|
|
|
|
def test_sets_default_preferred_runtime_to_armnn_if_available(self, mocker: MockerFixture) -> None:
|
|
|
|
mocker.patch.object(settings, "ann", True)
|
|
|
|
mocker.patch("ann.ann.is_available", True)
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
|
|
|
|
assert encoder.preferred_runtime == ModelRuntime.ARMNN
|
|
|
|
|
|
|
|
def test_sets_preferred_runtime_kwarg(self, mocker: MockerFixture) -> None:
|
|
|
|
mocker.patch.object(settings, "ann", False)
|
|
|
|
mocker.patch("ann.ann.is_available", False)
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", preferred_runtime=ModelRuntime.ARMNN)
|
|
|
|
|
|
|
|
assert encoder.preferred_runtime == ModelRuntime.ARMNN
|
|
|
|
|
2024-01-22 01:22:39 +02:00
|
|
|
def test_casts_cache_dir_string_to_path(self) -> None:
|
|
|
|
cache_dir = "/test_cache"
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=cache_dir)
|
|
|
|
|
|
|
|
assert encoder.cache_dir == Path(cache_dir)
|
|
|
|
|
|
|
|
def test_clear_cache(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
|
|
|
mock_rmtree.avoids_symlink_attacks = True
|
|
|
|
mock_cache_dir = mocker.Mock()
|
|
|
|
mock_cache_dir.exists.return_value = True
|
|
|
|
mock_cache_dir.is_dir.return_value = True
|
|
|
|
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
|
|
|
info = mocker.spy(log, "info")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
|
|
|
encoder.clear_cache()
|
|
|
|
|
|
|
|
mock_rmtree.assert_called_once_with(encoder.cache_dir)
|
|
|
|
info.assert_called_once()
|
|
|
|
|
|
|
|
def test_clear_cache_warns_if_path_does_not_exist(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
|
|
|
mock_rmtree.avoids_symlink_attacks = True
|
|
|
|
mock_cache_dir = mocker.Mock()
|
|
|
|
mock_cache_dir.exists.return_value = False
|
|
|
|
mock_cache_dir.is_dir.return_value = True
|
|
|
|
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
|
|
|
warning = mocker.spy(log, "warning")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
|
|
|
encoder.clear_cache()
|
|
|
|
|
|
|
|
mock_rmtree.assert_not_called()
|
|
|
|
warning.assert_called_once()
|
|
|
|
|
|
|
|
def test_clear_cache_raises_exception_if_vulnerable_to_symlink_attack(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
|
|
|
mock_rmtree.avoids_symlink_attacks = False
|
|
|
|
mock_cache_dir = mocker.Mock()
|
|
|
|
mock_cache_dir.exists.return_value = True
|
|
|
|
mock_cache_dir.is_dir.return_value = True
|
|
|
|
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
|
|
|
with pytest.raises(RuntimeError):
|
|
|
|
encoder.clear_cache()
|
|
|
|
|
|
|
|
mock_rmtree.assert_not_called()
|
|
|
|
|
|
|
|
def test_clear_cache_replaces_file_with_dir_if_path_is_file(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_rmtree = mocker.patch("app.models.base.rmtree", autospec=True)
|
|
|
|
mock_rmtree.avoids_symlink_attacks = True
|
|
|
|
mock_cache_dir = mocker.Mock()
|
|
|
|
mock_cache_dir.exists.return_value = True
|
|
|
|
mock_cache_dir.is_dir.return_value = False
|
|
|
|
mocker.patch("app.models.base.Path", return_value=mock_cache_dir)
|
|
|
|
warning = mocker.spy(log, "warning")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir=mock_cache_dir)
|
|
|
|
encoder.clear_cache()
|
|
|
|
|
|
|
|
mock_rmtree.assert_not_called()
|
|
|
|
mock_cache_dir.unlink.assert_called_once()
|
|
|
|
mock_cache_dir.mkdir.assert_called_once()
|
|
|
|
warning.assert_called_once()
|
|
|
|
|
|
|
|
def test_make_session_return_ann_if_available(self, mocker: MockerFixture) -> None:
|
2024-01-28 17:31:59 +02:00
|
|
|
mock_model_path = mocker.Mock()
|
|
|
|
mock_model_path.is_file.return_value = True
|
|
|
|
mock_model_path.suffix = ".armnn"
|
|
|
|
mock_model_path.with_suffix.return_value = mock_model_path
|
2024-01-22 01:22:39 +02:00
|
|
|
mock_session = mocker.patch("app.models.base.AnnSession")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
2024-01-28 17:31:59 +02:00
|
|
|
encoder._make_session(mock_model_path)
|
2024-01-22 01:22:39 +02:00
|
|
|
|
|
|
|
mock_session.assert_called_once()
|
|
|
|
|
|
|
|
def test_make_session_return_ort_if_available_and_ann_is_not(self, mocker: MockerFixture) -> None:
|
2024-01-28 17:31:59 +02:00
|
|
|
mock_armnn_path = mocker.Mock()
|
|
|
|
mock_armnn_path.is_file.return_value = False
|
|
|
|
mock_armnn_path.suffix = ".armnn"
|
|
|
|
|
|
|
|
mock_onnx_path = mocker.Mock()
|
|
|
|
mock_onnx_path.is_file.return_value = True
|
|
|
|
mock_onnx_path.suffix = ".onnx"
|
|
|
|
mock_armnn_path.with_suffix.return_value = mock_onnx_path
|
|
|
|
|
|
|
|
mock_ann = mocker.patch("app.models.base.AnnSession")
|
|
|
|
mock_ort = mocker.patch("app.models.base.ort.InferenceSession")
|
2024-01-22 01:22:39 +02:00
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
2024-01-28 17:31:59 +02:00
|
|
|
encoder._make_session(mock_armnn_path)
|
2024-01-22 01:22:39 +02:00
|
|
|
|
2024-01-28 17:31:59 +02:00
|
|
|
mock_ort.assert_called_once()
|
|
|
|
mock_ann.assert_not_called()
|
2024-01-22 01:22:39 +02:00
|
|
|
|
|
|
|
def test_make_session_raises_exception_if_path_does_not_exist(self, mocker: MockerFixture) -> None:
|
2024-01-28 17:31:59 +02:00
|
|
|
mock_model_path = mocker.Mock()
|
|
|
|
mock_model_path.is_file.return_value = False
|
|
|
|
mock_model_path.suffix = ".onnx"
|
|
|
|
mock_model_path.with_suffix.return_value = mock_model_path
|
|
|
|
mock_ann = mocker.patch("app.models.base.AnnSession")
|
2024-01-22 01:22:39 +02:00
|
|
|
mock_ort = mocker.patch("app.models.base.ort.InferenceSession")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
with pytest.raises(ValueError):
|
2024-01-28 17:31:59 +02:00
|
|
|
encoder._make_session(mock_model_path)
|
2024-01-22 01:22:39 +02:00
|
|
|
|
|
|
|
mock_ann.assert_not_called()
|
|
|
|
mock_ort.assert_not_called()
|
|
|
|
|
2024-01-28 17:31:59 +02:00
|
|
|
def test_download(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_snapshot_download = mocker.patch("app.models.base.snapshot_download")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai")
|
|
|
|
encoder.download()
|
|
|
|
|
|
|
|
mock_snapshot_download.assert_called_once_with(
|
|
|
|
"immich-app/ViT-B-32__openai",
|
|
|
|
cache_dir=encoder.cache_dir,
|
|
|
|
local_dir=encoder.cache_dir,
|
|
|
|
local_dir_use_symlinks=False,
|
|
|
|
ignore_patterns=["*.armnn"],
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_download_downloads_armnn_if_preferred_runtime(self, mocker: MockerFixture) -> None:
|
|
|
|
mock_snapshot_download = mocker.patch("app.models.base.snapshot_download")
|
|
|
|
|
|
|
|
encoder = OpenCLIPEncoder("ViT-B-32__openai", preferred_runtime=ModelRuntime.ARMNN)
|
|
|
|
encoder.download()
|
|
|
|
|
|
|
|
mock_snapshot_download.assert_called_once_with(
|
|
|
|
"immich-app/ViT-B-32__openai",
|
|
|
|
cache_dir=encoder.cache_dir,
|
|
|
|
local_dir=encoder.cache_dir,
|
|
|
|
local_dir_use_symlinks=False,
|
|
|
|
ignore_patterns=[],
|
|
|
|
)
|
|
|
|
|
2024-01-22 01:22:39 +02:00
|
|
|
|
2023-06-28 01:21:33 +02:00
|
|
|
class TestCLIP:
|
2023-08-06 04:45:13 +02:00
|
|
|
embedding = np.random.rand(512).astype(np.float32)
|
2023-10-31 12:02:04 +02:00
|
|
|
cache_dir = Path("test_cache")
|
|
|
|
|
|
|
|
def test_basic_image(
|
|
|
|
self,
|
|
|
|
pil_image: Image.Image,
|
|
|
|
mocker: MockerFixture,
|
|
|
|
clip_model_cfg: dict[str, Any],
|
|
|
|
clip_preprocess_cfg: Callable[[Path], dict[str, Any]],
|
2023-12-21 03:47:56 +02:00
|
|
|
clip_tokenizer_cfg: Callable[[Path], dict[str, Any]],
|
2023-10-31 12:02:04 +02:00
|
|
|
) -> None:
|
|
|
|
mocker.patch.object(OpenCLIPEncoder, "download")
|
|
|
|
mocker.patch.object(OpenCLIPEncoder, "model_cfg", clip_model_cfg)
|
|
|
|
mocker.patch.object(OpenCLIPEncoder, "preprocess_cfg", clip_preprocess_cfg)
|
2023-12-21 03:47:56 +02:00
|
|
|
mocker.patch.object(OpenCLIPEncoder, "tokenizer_cfg", clip_tokenizer_cfg)
|
2024-01-11 19:26:46 +02:00
|
|
|
|
|
|
|
mocked = mocker.patch.object(InferenceModel, "_make_session", autospec=True).return_value
|
|
|
|
mocked.run.return_value = [[self.embedding]]
|
2023-12-21 03:47:56 +02:00
|
|
|
mocker.patch("app.models.clip.Tokenizer.from_file", autospec=True)
|
2023-10-31 12:02:04 +02:00
|
|
|
|
2024-01-22 01:22:39 +02:00
|
|
|
clip_encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir="test_cache", mode="vision")
|
2023-06-28 01:21:33 +02:00
|
|
|
embedding = clip_encoder.predict(pil_image)
|
|
|
|
|
2023-10-31 12:02:04 +02:00
|
|
|
assert clip_encoder.mode == "vision"
|
2023-11-13 21:37:39 +02:00
|
|
|
assert isinstance(embedding, np.ndarray)
|
|
|
|
assert embedding.shape[0] == clip_model_cfg["embed_dim"]
|
|
|
|
assert embedding.dtype == np.float32
|
2024-01-11 19:26:46 +02:00
|
|
|
mocked.run.assert_called_once()
|
2023-06-28 01:21:33 +02:00
|
|
|
|
2023-10-31 12:02:04 +02:00
|
|
|
def test_basic_text(
|
|
|
|
self,
|
|
|
|
mocker: MockerFixture,
|
|
|
|
clip_model_cfg: dict[str, Any],
|
|
|
|
clip_preprocess_cfg: Callable[[Path], dict[str, Any]],
|
2023-12-21 03:47:56 +02:00
|
|
|
clip_tokenizer_cfg: Callable[[Path], dict[str, Any]],
|
2023-10-31 12:02:04 +02:00
|
|
|
) -> None:
|
|
|
|
mocker.patch.object(OpenCLIPEncoder, "download")
|
|
|
|
mocker.patch.object(OpenCLIPEncoder, "model_cfg", clip_model_cfg)
|
|
|
|
mocker.patch.object(OpenCLIPEncoder, "preprocess_cfg", clip_preprocess_cfg)
|
2023-12-21 03:47:56 +02:00
|
|
|
mocker.patch.object(OpenCLIPEncoder, "tokenizer_cfg", clip_tokenizer_cfg)
|
2024-01-11 19:26:46 +02:00
|
|
|
|
|
|
|
mocked = mocker.patch.object(InferenceModel, "_make_session", autospec=True).return_value
|
|
|
|
mocked.run.return_value = [[self.embedding]]
|
2023-12-21 03:47:56 +02:00
|
|
|
mocker.patch("app.models.clip.Tokenizer.from_file", autospec=True)
|
2023-10-31 12:02:04 +02:00
|
|
|
|
2024-01-22 01:22:39 +02:00
|
|
|
clip_encoder = OpenCLIPEncoder("ViT-B-32__openai", cache_dir="test_cache", mode="text")
|
2023-06-28 01:21:33 +02:00
|
|
|
embedding = clip_encoder.predict("test search query")
|
|
|
|
|
2023-10-31 12:02:04 +02:00
|
|
|
assert clip_encoder.mode == "text"
|
2023-11-13 21:37:39 +02:00
|
|
|
assert isinstance(embedding, np.ndarray)
|
|
|
|
assert embedding.shape[0] == clip_model_cfg["embed_dim"]
|
|
|
|
assert embedding.dtype == np.float32
|
2024-01-11 19:26:46 +02:00
|
|
|
mocked.run.assert_called_once()
|
2023-06-28 01:21:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
class TestFaceRecognition:
|
2023-08-06 04:45:13 +02:00
|
|
|
def test_set_min_score(self, mocker: MockerFixture) -> None:
|
|
|
|
mocker.patch.object(FaceRecognizer, "load")
|
2023-11-12 03:04:49 +02:00
|
|
|
face_recognizer = FaceRecognizer("buffalo_s", cache_dir="test_cache", min_score=0.5)
|
2023-06-28 01:21:33 +02:00
|
|
|
|
2023-08-06 04:45:13 +02:00
|
|
|
assert face_recognizer.min_score == 0.5
|
|
|
|
|
|
|
|
def test_basic(self, cv_image: cv2.Mat, mocker: MockerFixture) -> None:
|
|
|
|
mocker.patch.object(FaceRecognizer, "load")
|
2023-11-12 03:04:49 +02:00
|
|
|
face_recognizer = FaceRecognizer("buffalo_s", min_score=0.0, cache_dir="test_cache")
|
2023-08-06 04:45:13 +02:00
|
|
|
|
|
|
|
det_model = mock.Mock()
|
|
|
|
num_faces = 2
|
|
|
|
bbox = np.random.rand(num_faces, 4).astype(np.float32)
|
|
|
|
score = np.array([[0.67]] * num_faces).astype(np.float32)
|
|
|
|
kpss = np.random.rand(num_faces, 5, 2).astype(np.float32)
|
|
|
|
det_model.detect.return_value = (np.concatenate([bbox, score], axis=-1), kpss)
|
|
|
|
face_recognizer.det_model = det_model
|
|
|
|
|
|
|
|
rec_model = mock.Mock()
|
|
|
|
embedding = np.random.rand(num_faces, 512).astype(np.float32)
|
|
|
|
rec_model.get_feat.return_value = embedding
|
|
|
|
face_recognizer.rec_model = rec_model
|
|
|
|
|
2023-06-28 01:21:33 +02:00
|
|
|
faces = face_recognizer.predict(cv_image)
|
|
|
|
|
2023-08-06 04:45:13 +02:00
|
|
|
assert len(faces) == num_faces
|
2023-06-28 01:21:33 +02:00
|
|
|
for face in faces:
|
|
|
|
assert face["imageHeight"] == 800
|
|
|
|
assert face["imageWidth"] == 600
|
2023-11-13 21:37:39 +02:00
|
|
|
assert isinstance(face["embedding"], np.ndarray)
|
|
|
|
assert face["embedding"].shape[0] == 512
|
|
|
|
assert face["embedding"].dtype == np.float32
|
2023-06-28 01:21:33 +02:00
|
|
|
|
2023-08-06 04:45:13 +02:00
|
|
|
det_model.detect.assert_called_once()
|
|
|
|
assert rec_model.get_feat.call_count == num_faces
|
2023-06-28 01:21:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
class TestCache:
|
|
|
|
async def test_caches(self, mock_get_model: mock.Mock) -> None:
|
|
|
|
model_cache = ModelCache()
|
2023-12-21 03:47:56 +02:00
|
|
|
await model_cache.get("test_model_name", ModelType.FACIAL_RECOGNITION)
|
|
|
|
await model_cache.get("test_model_name", ModelType.FACIAL_RECOGNITION)
|
2023-06-28 01:21:33 +02:00
|
|
|
assert len(model_cache.cache._cache) == 1
|
|
|
|
mock_get_model.assert_called_once()
|
|
|
|
|
|
|
|
async def test_kwargs_used(self, mock_get_model: mock.Mock) -> None:
|
|
|
|
model_cache = ModelCache()
|
2023-12-21 03:47:56 +02:00
|
|
|
await model_cache.get("test_model_name", ModelType.FACIAL_RECOGNITION, cache_dir="test_cache")
|
|
|
|
mock_get_model.assert_called_once_with(ModelType.FACIAL_RECOGNITION, "test_model_name", cache_dir="test_cache")
|
2023-06-28 01:21:33 +02:00
|
|
|
|
|
|
|
async def test_different_clip(self, mock_get_model: mock.Mock) -> None:
|
|
|
|
model_cache = ModelCache()
|
|
|
|
await model_cache.get("test_image_model_name", ModelType.CLIP)
|
|
|
|
await model_cache.get("test_text_model_name", ModelType.CLIP)
|
|
|
|
mock_get_model.assert_has_calls(
|
|
|
|
[
|
|
|
|
mock.call(ModelType.CLIP, "test_image_model_name"),
|
|
|
|
mock.call(ModelType.CLIP, "test_text_model_name"),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
assert len(model_cache.cache._cache) == 2
|
|
|
|
|
|
|
|
@mock.patch("app.models.cache.OptimisticLock", autospec=True)
|
|
|
|
async def test_model_ttl(self, mock_lock_cls: mock.Mock, mock_get_model: mock.Mock) -> None:
|
|
|
|
model_cache = ModelCache(ttl=100)
|
2023-12-21 03:47:56 +02:00
|
|
|
await model_cache.get("test_model_name", ModelType.FACIAL_RECOGNITION)
|
2023-06-28 01:21:33 +02:00
|
|
|
mock_lock_cls.return_value.__aenter__.return_value.cas.assert_called_with(mock.ANY, ttl=100)
|
|
|
|
|
|
|
|
@mock.patch("app.models.cache.SimpleMemoryCache.expire")
|
|
|
|
async def test_revalidate(self, mock_cache_expire: mock.Mock, mock_get_model: mock.Mock) -> None:
|
|
|
|
model_cache = ModelCache(ttl=100, revalidate=True)
|
2023-12-21 03:47:56 +02:00
|
|
|
await model_cache.get("test_model_name", ModelType.FACIAL_RECOGNITION)
|
|
|
|
await model_cache.get("test_model_name", ModelType.FACIAL_RECOGNITION)
|
2023-06-28 01:21:33 +02:00
|
|
|
mock_cache_expire.assert_called_once_with(mock.ANY, 100)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.skipif(
|
|
|
|
not settings.test_full,
|
|
|
|
reason="More time-consuming since it deploys the app and loads models.",
|
|
|
|
)
|
|
|
|
class TestEndpoints:
|
2023-09-09 11:02:44 +02:00
|
|
|
def test_clip_image_endpoint(
|
|
|
|
self, pil_image: Image.Image, responses: dict[str, Any], deployed_app: TestClient
|
|
|
|
) -> None:
|
2023-06-28 01:21:33 +02:00
|
|
|
byte_image = BytesIO()
|
|
|
|
pil_image.save(byte_image, format="jpeg")
|
|
|
|
response = deployed_app.post(
|
2023-09-09 11:02:44 +02:00
|
|
|
"http://localhost:3003/predict",
|
2024-01-22 01:22:39 +02:00
|
|
|
data={"modelName": "ViT-B-32__openai", "modelType": "clip", "options": json.dumps({"mode": "vision"})},
|
2023-09-09 11:02:44 +02:00
|
|
|
files={"image": byte_image.getvalue()},
|
2023-06-28 01:21:33 +02:00
|
|
|
)
|
|
|
|
assert response.status_code == 200
|
2023-09-09 11:02:44 +02:00
|
|
|
assert response.json() == responses["clip"]["image"]
|
2023-06-28 01:21:33 +02:00
|
|
|
|
2023-09-09 11:02:44 +02:00
|
|
|
def test_clip_text_endpoint(self, responses: dict[str, Any], deployed_app: TestClient) -> None:
|
2023-06-28 01:21:33 +02:00
|
|
|
response = deployed_app.post(
|
2023-09-09 11:02:44 +02:00
|
|
|
"http://localhost:3003/predict",
|
|
|
|
data={
|
2024-01-22 01:22:39 +02:00
|
|
|
"modelName": "ViT-B-32__openai",
|
2023-09-09 11:02:44 +02:00
|
|
|
"modelType": "clip",
|
|
|
|
"text": "test search query",
|
|
|
|
"options": json.dumps({"mode": "text"}),
|
|
|
|
},
|
2023-06-28 01:21:33 +02:00
|
|
|
)
|
|
|
|
assert response.status_code == 200
|
2023-09-09 11:02:44 +02:00
|
|
|
assert response.json() == responses["clip"]["text"]
|
2023-06-28 01:21:33 +02:00
|
|
|
|
2023-09-09 11:02:44 +02:00
|
|
|
def test_face_endpoint(self, pil_image: Image.Image, responses: dict[str, Any], deployed_app: TestClient) -> None:
|
2023-06-28 01:21:33 +02:00
|
|
|
byte_image = BytesIO()
|
|
|
|
pil_image.save(byte_image, format="jpeg")
|
2023-09-09 11:02:44 +02:00
|
|
|
|
2023-06-28 01:21:33 +02:00
|
|
|
response = deployed_app.post(
|
2023-09-09 11:02:44 +02:00
|
|
|
"http://localhost:3003/predict",
|
|
|
|
data={
|
|
|
|
"modelName": "buffalo_l",
|
|
|
|
"modelType": "facial-recognition",
|
|
|
|
"options": json.dumps({"minScore": 0.034}),
|
|
|
|
},
|
|
|
|
files={"image": byte_image.getvalue()},
|
2023-06-28 01:21:33 +02:00
|
|
|
)
|
|
|
|
assert response.status_code == 200
|
2023-09-09 11:02:44 +02:00
|
|
|
assert response.json() == responses["facial-recognition"]
|
2023-08-25 06:28:51 +02:00
|
|
|
|
|
|
|
|
|
|
|
def test_sess_options() -> None:
|
|
|
|
sess_options = PicklableSessionOptions()
|
|
|
|
sess_options.intra_op_num_threads = 1
|
|
|
|
sess_options.inter_op_num_threads = 1
|
|
|
|
pickled = pickle.dumps(sess_options)
|
|
|
|
unpickled = pickle.loads(pickled)
|
|
|
|
assert unpickled.intra_op_num_threads == 1
|
|
|
|
assert unpickled.inter_op_num_threads == 1
|