mirror of
https://github.com/immich-app/immich.git
synced 2024-12-23 02:06:15 +02:00
95cfe22866
* cuda and openvino ep, refactor, update dockerfile * updated workflow * typing fixes * added tests * updated ml test gh action * updated README * updated docker-compose * added compute to hwaccel.yml * updated gh matrix updated gh matrix updated gh matrix updated gh matrix updated gh matrix give up * remove cuda/arm64 build * add hwaccel image tags to docker-compose * remove unnecessary quotes * add suffix to git tag * fixed kwargs in base model * armnn ld_library_path * update pyproject.toml * add armnn workflow * formatting * consolidate hwaccel files, update docker compose * update hw transcoding docs * add ml hwaccel docs * update dev and prod docker-compose * added armnn prerequisite docs * support 3.10 * updated docker-compose comments * formatting * test coverage * don't set arena extend strategy for openvino * working openvino * formatting * fix dockerfile * added type annotation * add wsl configuration for openvino * updated lock file * copy python3 * comment out extends section * fix platforms * simplify workflow suffix tagging * simplify aio transcoding doc * update docs and workflow for `hwaccel.yml` change * revert docs
71 lines
1.6 KiB
Python
71 lines
1.6 KiB
Python
from app.config import clean_name
|
|
|
|
_OPENCLIP_MODELS = {
|
|
"RN50__openai",
|
|
"RN50__yfcc15m",
|
|
"RN50__cc12m",
|
|
"RN101__openai",
|
|
"RN101__yfcc15m",
|
|
"RN50x4__openai",
|
|
"RN50x16__openai",
|
|
"RN50x64__openai",
|
|
"ViT-B-32__openai",
|
|
"ViT-B-32__laion2b_e16",
|
|
"ViT-B-32__laion400m_e31",
|
|
"ViT-B-32__laion400m_e32",
|
|
"ViT-B-32__laion2b-s34b-b79k",
|
|
"ViT-B-16__openai",
|
|
"ViT-B-16__laion400m_e31",
|
|
"ViT-B-16__laion400m_e32",
|
|
"ViT-B-16-plus-240__laion400m_e31",
|
|
"ViT-B-16-plus-240__laion400m_e32",
|
|
"ViT-L-14__openai",
|
|
"ViT-L-14__laion400m_e31",
|
|
"ViT-L-14__laion400m_e32",
|
|
"ViT-L-14__laion2b-s32b-b82k",
|
|
"ViT-L-14-336__openai",
|
|
"ViT-H-14__laion2b-s32b-b79k",
|
|
"ViT-g-14__laion2b-s12b-b42k",
|
|
"ViT-L-14-quickgelu__dfn2b",
|
|
"ViT-H-14-quickgelu__dfn5b",
|
|
"ViT-H-14-378-quickgelu__dfn5b",
|
|
}
|
|
|
|
|
|
_MCLIP_MODELS = {
|
|
"LABSE-Vit-L-14",
|
|
"XLM-Roberta-Large-Vit-B-32",
|
|
"XLM-Roberta-Large-Vit-B-16Plus",
|
|
"XLM-Roberta-Large-Vit-L-14",
|
|
"XLM-Roberta-Large-ViT-H-14__frozen_laion5b_s13b_b90k",
|
|
"nllb-clip-base-siglip__v1",
|
|
"nllb-clip-large-siglip__v1",
|
|
}
|
|
|
|
|
|
_INSIGHTFACE_MODELS = {
|
|
"antelopev2",
|
|
"buffalo_l",
|
|
"buffalo_m",
|
|
"buffalo_s",
|
|
}
|
|
|
|
|
|
SUPPORTED_PROVIDERS = [
|
|
"CUDAExecutionProvider",
|
|
"OpenVINOExecutionProvider",
|
|
"CPUExecutionProvider",
|
|
]
|
|
|
|
|
|
def is_openclip(model_name: str) -> bool:
|
|
return clean_name(model_name) in _OPENCLIP_MODELS
|
|
|
|
|
|
def is_mclip(model_name: str) -> bool:
|
|
return clean_name(model_name) in _MCLIP_MODELS
|
|
|
|
|
|
def is_insightface(model_name: str) -> bool:
|
|
return clean_name(model_name) in _INSIGHTFACE_MODELS
|