119 lines
3.7 KiB
Python
119 lines
3.7 KiB
Python
from typing import NewType, Any
|
|
import os
|
|
import io
|
|
|
|
from PIL import Image
|
|
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
|
import torch
|
|
from torchvision import transforms # type: ignore
|
|
|
|
from server.modules.recognizer.repository import ARecognizerRepository
|
|
|
|
|
|
TorchModel = NewType("TorchModel", torch.nn.Module)
|
|
|
|
|
|
def load_model(model_path, device="cpu") -> TorchModel:
|
|
model = torch.load(model_path, map_location=device, weights_only=False)
|
|
model.eval()
|
|
return TorchModel(model)
|
|
|
|
|
|
DOG_MODEL = load_model("server/models/dogs_model.pth")
|
|
CAT_MODEL = load_model("server/models/cats_model.pth")
|
|
|
|
|
|
class RecognizerService:
|
|
__slots__ = "_repository"
|
|
|
|
def __init__(self, repository: ARecognizerRepository):
|
|
self._repository = repository
|
|
|
|
async def images_cats(self) -> dict:
|
|
return await self._repository.images_cats()
|
|
|
|
async def images_dogs(self) -> dict:
|
|
return await self._repository.images_dogs()
|
|
|
|
async def predict_dog_image(self, image: bytes) -> dict:
|
|
predicted_data = self._predict(image, DOG_MODEL)
|
|
results = {}
|
|
images = []
|
|
description: dict[str, list] = {}
|
|
images_dogs = await self._repository.images_dogs()
|
|
for d in predicted_data:
|
|
predicted_idx, probabilities = d
|
|
predicted_label = self._repository.labels_dogs()[str(predicted_idx)]
|
|
name = predicted_label.replace("_", " ")
|
|
images.append(
|
|
{
|
|
"name": name,
|
|
"url": [
|
|
f"/static/assets/dog/{predicted_label}/{i}"
|
|
for i in images_dogs[predicted_label]
|
|
],
|
|
}
|
|
)
|
|
description.setdefault(name, []).append(
|
|
f"/dogs-characteristics/{name.replace(' ', '_')}"
|
|
)
|
|
results[probabilities] = name
|
|
return {
|
|
"results": results,
|
|
"images": images,
|
|
"description": description,
|
|
}
|
|
|
|
async def predict_cat_image(self, image: bytes) -> dict:
|
|
predicted_data = self._predict(image, CAT_MODEL)
|
|
results = {}
|
|
images = []
|
|
images_cats = await self._repository.images_cats()
|
|
for d in predicted_data:
|
|
predicted_idx, probabilities = d
|
|
predicted_label = self._repository.labels_cats()[str(predicted_idx)]
|
|
name = predicted_label.replace("_", " ")
|
|
images.append(
|
|
{
|
|
"name": name,
|
|
"url": [
|
|
f"/static/assets/cat/{predicted_label}/{i}"
|
|
for i in images_cats[predicted_label]
|
|
],
|
|
}
|
|
)
|
|
results[probabilities] = name
|
|
return {
|
|
"results": results,
|
|
"images": images,
|
|
}
|
|
|
|
def _predict(self, image: bytes, model, device="cpu") -> list[Any]:
|
|
img_size = (224, 224)
|
|
preprocess = transforms.Compose(
|
|
[
|
|
transforms.Resize(img_size),
|
|
transforms.ToTensor(),
|
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
|
]
|
|
)
|
|
input_tensor = preprocess(Image.open(io.BytesIO(image)))
|
|
input_batch = input_tensor.unsqueeze(0).to(
|
|
device
|
|
) # Добавляем dimension для батча
|
|
|
|
with torch.no_grad():
|
|
output = model(input_batch)
|
|
|
|
probabilities = torch.nn.functional.softmax(output[0], dim=0)
|
|
k = 5
|
|
topk_probs, predicted_idx = torch.topk(probabilities, k)
|
|
|
|
predicted_data = []
|
|
for i in range(k):
|
|
predicted_data.append(
|
|
(predicted_idx[i].item(), float(topk_probs[i].item()))
|
|
)
|
|
return predicted_data
|