породы в БД
This commit is contained in:
parent
6eb8cdff6e
commit
ce5c715611
4
Makefile
4
Makefile
|
|
@ -1,5 +1,5 @@
|
|||
api:
|
||||
uv run granian --interface asgi server.main:app --host 0.0.0.0
|
||||
alembic upgrade head && uv run granian --interface asgi server.main:app --host 0.0.0.0
|
||||
|
||||
dog-train:
|
||||
uv run ml/dogs.py
|
||||
|
|
@ -18,7 +18,7 @@ pipinstall:
|
|||
uv pip sync requirements.txt
|
||||
|
||||
migrate-up:
|
||||
AI_DIALOG_WB_TOKEN="" AI_BAIDU_SECRET_KEY="" AI_BAIDU_API_KEY="" DB_PASS_SALT="d" SENTRY_DNS="" APP_TOKEN_SECRET="d" alembic upgrade head
|
||||
alembic upgrade head
|
||||
|
||||
migration-generate:
|
||||
git rev-parse --short HEAD | xargs -I {} alembic revision --autogenerate -m "{}"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import json
|
||||
|
||||
from PIL import ImageFile
|
||||
import torch.nn as nn
|
||||
from torchvision.datasets import ImageFolder # type: ignore
|
||||
from PIL import ImageFile
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from train import get_labels, load_model, get_loaders, train, show, DEVICE # type: ignore
|
||||
from torchvision.datasets import ImageFolder # type: ignore
|
||||
from train import DEVICE, get_labels, get_loaders, load_model, show, train # type: ignore
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import json
|
||||
|
||||
from PIL import ImageFile
|
||||
import torch.nn as nn
|
||||
from torchvision.datasets import ImageFolder # type: ignore
|
||||
from PIL import ImageFile
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from train import get_labels, load_model, get_loaders, train, show, DEVICE # type: ignore
|
||||
from torchvision.datasets import ImageFolder # type: ignore
|
||||
from train import DEVICE, get_labels, get_loaders, load_model, show, train # type: ignore
|
||||
|
||||
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
import torch
|
||||
from torchvision import transforms # type: ignore
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image
|
||||
import json
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image
|
||||
from torchvision import transforms # type: ignore
|
||||
|
||||
# Создание labels_dict для соответствия классов и индексов
|
||||
with open("labels.json", "r") as f:
|
||||
with open("labels.json") as f:
|
||||
data_labels = f.read()
|
||||
labels_dict = json.loads(data_labels)
|
||||
|
||||
|
|
|
|||
16
ml/train.py
16
ml/train.py
|
|
@ -1,13 +1,13 @@
|
|||
import os
|
||||
|
||||
import matplotlib.pyplot as plt # type: ignore
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torchvision.datasets import ImageFolder # type: ignore
|
||||
from torch.utils.data import Dataset, DataLoader, random_split
|
||||
from torchvision import transforms # type: ignore
|
||||
import torchvision
|
||||
from torch.utils.data import DataLoader, Dataset, random_split
|
||||
from torchvision import transforms # type: ignore
|
||||
from torchvision.datasets import ImageFolder # type: ignore
|
||||
from torchvision.models import ResNet50_Weights # type: ignore
|
||||
from typing import Tuple
|
||||
|
||||
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ def get_labels(input_dir, img_size):
|
|||
return labels_dict, dataset
|
||||
|
||||
|
||||
def get_loaders(dataset: Dataset) -> Tuple[DataLoader, DataLoader]:
|
||||
def get_loaders(dataset: Dataset) -> tuple[DataLoader, DataLoader]:
|
||||
# Разделение данных на тренировочные и валидационные
|
||||
train_size = int(0.8 * float(len(dataset))) # type: ignore[arg-type]
|
||||
val_size = len(dataset) - train_size # type: ignore[arg-type]
|
||||
|
|
@ -61,7 +61,7 @@ def train(
|
|||
model: nn.Module,
|
||||
train_loader: DataLoader,
|
||||
val_loader: DataLoader,
|
||||
) -> Tuple[list[float], list[float], list[float], list[float]]:
|
||||
) -> tuple[list[float], list[float], list[float], list[float]]:
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
optimizer = torch.optim.Adam(model.fc.parameters(), lr=1e-4) # type: ignore[union-attr]
|
||||
# История метрик
|
||||
|
|
@ -96,9 +96,7 @@ def train(
|
|||
train_acc = 100.0 * correct / total
|
||||
train_loss_history.append(train_loss)
|
||||
train_acc_history.append(train_acc)
|
||||
print(
|
||||
f"Epoch {epoch + 1}/{num_epochs}, Train Loss: {train_loss:.4f}, Train Accuracy: {train_acc:.2f}%"
|
||||
)
|
||||
print(f"Epoch {epoch + 1}/{num_epochs}, Train Loss: {train_loss:.4f}, Train Accuracy: {train_acc:.2f}%")
|
||||
|
||||
# Оценка на валидационных данных
|
||||
model.eval()
|
||||
|
|
|
|||
114
pyproject.toml
114
pyproject.toml
|
|
@ -48,3 +48,117 @@ default = [
|
|||
"matplotlib>=3.10.1",
|
||||
]
|
||||
|
||||
|
||||
# MYPY
|
||||
|
||||
[tool.mypy]
|
||||
exclude = [
|
||||
".venv",
|
||||
"venv",
|
||||
"tmp",
|
||||
"scripts",
|
||||
"tests"
|
||||
]
|
||||
plugins = ["sqlalchemy.ext.mypy.plugin"]
|
||||
mypy_path = "./stubs"
|
||||
ignore_missing_imports = true
|
||||
|
||||
# RUFF
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py312"
|
||||
show-fixes = true
|
||||
src = ["app"]
|
||||
# Same as Black.
|
||||
line-length = 120
|
||||
indent-width = 4
|
||||
|
||||
# Exclude a variety of commonly ignored directories.
|
||||
exclude = [
|
||||
".bzr",
|
||||
".direnv",
|
||||
".eggs",
|
||||
".git",
|
||||
".git-rewrite",
|
||||
".hg",
|
||||
".ipynb_checkpoints",
|
||||
".mypy_cache",
|
||||
".nox",
|
||||
".pants.d",
|
||||
".pyenv",
|
||||
".pytest_cache",
|
||||
".pytype",
|
||||
".ruff_cache",
|
||||
".svn",
|
||||
".tox",
|
||||
".venv",
|
||||
".vscode",
|
||||
"__pypackages__",
|
||||
"_build",
|
||||
"buck-out",
|
||||
"build",
|
||||
"dist",
|
||||
"node_modules",
|
||||
"site-packages",
|
||||
"venv",
|
||||
"stubs",
|
||||
"scripts",
|
||||
]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
known-first-party = ["app"]
|
||||
|
||||
[tool.ruff.format]
|
||||
# Like Black, use double quotes for strings.
|
||||
quote-style = "double"
|
||||
# Like Black, indent with spaces, rather than tabs.
|
||||
indent-style = "space"
|
||||
# Like Black, respect magic trailing commas.
|
||||
skip-magic-trailing-comma = false
|
||||
# Like Black, automatically detect the appropriate line ending.
|
||||
line-ending = "auto"
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"stubs/*" = ["F403"]
|
||||
"server/migration/*" = ["E501", "F403"]
|
||||
"server/config/__init__.py" = ["E501"]
|
||||
"scripts/*" = ["T201", "E501"]
|
||||
"server/admin/*" = ["E501", "E711"]
|
||||
"vk_api/*"= ["T201", "C416", "A001", "E501"]
|
||||
"ml/*"= ["T201", "C416", "A001", "E501", "C416", "N812"]
|
||||
"tests/**/*.py" = [
|
||||
"E501", "ASYNC230",
|
||||
# at least this three should be fine in tests:
|
||||
"S101", # asserts allowed in tests...
|
||||
"S106", # Possible hardcoded password assigned to argument: "password"
|
||||
"S110", # consider logging the exception
|
||||
"ARG", # Unused function args -> fixtures nevertheless are functionally relevant...
|
||||
"FBT", # Don't care about booleans as positional arguments in tests, e.g. via @pytest.mark.parametrize()
|
||||
# The below are debateable
|
||||
"PLR2004", # Magic value used in comparison, ...
|
||||
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
|
||||
"INP001", # File `...` is part of an implicit namespace package. Add an `__init__.py`.
|
||||
"SLF001", # Private member accessed: `_...`
|
||||
]
|
||||
"tests/__init__.py" = ["I001"]
|
||||
|
||||
[tool.ruff.lint]
|
||||
# https://docs.astral.sh/ruff/rules/
|
||||
select = ["DTZ", "F", "C4", "B", "A", "E", "T", "I", "N", "UP", "ASYNC", "Q"]
|
||||
ignore = ["E712", "B904", "B019", "C417"]
|
||||
|
||||
# Allow fix for all enabled rules (when `--fix`) is provided.
|
||||
fixable = ["ALL"]
|
||||
unfixable = []
|
||||
# Allow unused variables when underscore-prefixed.
|
||||
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
|
||||
|
||||
[tool.ruff.lint.mccabe]
|
||||
# https://docs.astral.sh/ruff/settings/#mccabe
|
||||
# Flag errors (`C901`) whenever the complexity level exceeds 5.
|
||||
max-complexity = 12
|
||||
|
||||
[tool.ruff.lint.flake8-pytest-style]
|
||||
# https://docs.astral.sh/ruff/settings/#flake8-pytest-style
|
||||
fixture-parentheses = false
|
||||
mark-parentheses = false
|
||||
|
|
|
|||
|
|
@ -22,9 +22,7 @@ class AppConfig:
|
|||
sentry_dns: str = field("SENTRY_DNS", default="")
|
||||
log_level: str = field("LOG_LEVEL", "INFO")
|
||||
|
||||
db_uri: str = field(
|
||||
"DB_URI", "postgresql+asyncpg://svcuser:svcpass@localhost:5432/svc"
|
||||
)
|
||||
db_uri: str = field("DB_URI", "postgresql+asyncpg://svcuser:svcpass@localhost:5432/svc")
|
||||
db_pass_salt: str = field("DB_PASS_SALT", "")
|
||||
db_search_path: str = field("DB_SEARCH_PATH", "beerds")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class CacheRepository(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
async def get(self, key: str) -> Optional[str]:
|
||||
async def get(self, key: str) -> str | None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def set(self, key: str, data: str, _exp_min: Optional[int] = None):
|
||||
async def set(self, key: str, data: str, _exp_min: int | None = None):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
@ -23,10 +22,10 @@ class LocalCacheRepository(CacheRepository):
|
|||
def __init__(self) -> None:
|
||||
self._data = {}
|
||||
|
||||
async def get(self, key: str) -> Optional[str]:
|
||||
async def get(self, key: str) -> str | None:
|
||||
return self._data.get(key)
|
||||
|
||||
async def set(self, key: str, data: str, _exp_min: Optional[int] = None):
|
||||
async def set(self, key: str, data: str, _exp_min: int | None = None):
|
||||
self._data[key] = data
|
||||
|
||||
async def delete(self, key: str):
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
"""Abstract realiztion for DB"""
|
||||
|
||||
from typing import Any, AsyncContextManager, Awaitable, Callable, TypeAlias
|
||||
from collections.abc import Awaitable, Callable
|
||||
from contextlib import AbstractAsyncContextManager as AsyncContextManager
|
||||
from typing import Any
|
||||
|
||||
from server.config import AppConfig
|
||||
|
||||
ExecuteFun: TypeAlias = Callable[[Any], Awaitable[None]]
|
||||
type ExecuteFun = Callable[[Any], Awaitable[None]]
|
||||
|
||||
|
||||
class ConnectError(Exception):
|
||||
|
|
|
|||
|
|
@ -1,11 +1,9 @@
|
|||
from typing import Type, TypeVar
|
||||
|
||||
from sqlalchemy.orm import registry
|
||||
|
||||
mapper_registry = registry()
|
||||
|
||||
DC = TypeVar("DC")
|
||||
|
||||
|
||||
def dict_to_dataclass(data: dict, class_type: Type[DC]) -> DC:
|
||||
def dict_to_dataclass[T](data: dict, class_type: type[T]) -> T:
|
||||
return class_type(**data)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
from typing import Any, AsyncContextManager
|
||||
from contextlib import AbstractAsyncContextManager as AsyncContextManager
|
||||
from typing import Any
|
||||
|
||||
from server.config import AppConfig
|
||||
from server.infra.db.abc import AbstractDB, AbstractSession
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from copy import copy
|
||||
from dataclasses import Field, asdict, dataclass
|
||||
from enum import Enum
|
||||
from typing import Any, ClassVar, Optional, Protocol, assert_never
|
||||
from typing import Any, ClassVar, Protocol, assert_never
|
||||
|
||||
from sqlalchemy import Select, and_, or_
|
||||
|
||||
|
|
@ -28,7 +28,7 @@ class FilterLeftField(Protocol):
|
|||
class Filter:
|
||||
right: Any
|
||||
sign: FilterSign
|
||||
left: Optional[FilterLeftField] = None
|
||||
left: FilterLeftField | None = None
|
||||
|
||||
@staticmethod
|
||||
def not_eq(f1: Any, f2: Any):
|
||||
|
|
@ -79,33 +79,27 @@ class RestrictionField:
|
|||
|
||||
@dataclass
|
||||
class QueryRestriction:
|
||||
filters: Optional[list[Filter]] = None
|
||||
limit: Optional[int] = None
|
||||
offset: Optional[int] = None
|
||||
sort: Optional[list[RestrictionField]] = None
|
||||
filters: list[Filter] | None = None
|
||||
limit: int | None = None
|
||||
offset: int | None = None
|
||||
sort: list[RestrictionField] | None = None
|
||||
|
||||
|
||||
@dataclass(frozen=False)
|
||||
class FilterQuery:
|
||||
filters: list[Filter]
|
||||
limit: Optional[int] = None
|
||||
offset: Optional[int] = None
|
||||
sort: Optional[list[RestrictionField]] = None
|
||||
limit: int | None = None
|
||||
offset: int | None = None
|
||||
sort: list[RestrictionField] | None = None
|
||||
|
||||
@staticmethod
|
||||
def mass_and(fields: list[object], values: list[Any]) -> "FilterQuery":
|
||||
return FilterQuery(
|
||||
filters=[Filter.eq(field, val) for field, val in zip(fields, values)]
|
||||
)
|
||||
return FilterQuery(filters=[Filter.eq(field, val) for field, val in zip(fields, values, strict=True)])
|
||||
|
||||
@staticmethod
|
||||
def mass_or(fields: list[object], values: list[Any]) -> "FilterQuery":
|
||||
return FilterQuery(
|
||||
filters=[
|
||||
Filter.or_(
|
||||
[Filter.eq(field, val) for field, val in zip(fields, values)]
|
||||
)
|
||||
]
|
||||
filters=[Filter.or_([Filter.eq(field, val) for field, val in zip(fields, values, strict=True)])]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -119,7 +113,7 @@ class FilterQuery:
|
|||
def add_and(self, field: object, value: Any):
|
||||
self.filters.append(Filter.eq(field, value))
|
||||
|
||||
def add_query_restistions(self, q_restriction: Optional[QueryRestriction] = None):
|
||||
def add_query_restistions(self, q_restriction: QueryRestriction | None = None):
|
||||
if not q_restriction:
|
||||
return None
|
||||
if q_restriction.limit:
|
||||
|
|
@ -137,9 +131,7 @@ class DataclassInstance(Protocol):
|
|||
__dataclass_fields__: ClassVar[dict[str, Field[Any]]]
|
||||
|
||||
|
||||
async def indexes_by_id(
|
||||
input_data: list, values: list[str], id_name="id"
|
||||
) -> Optional[list[int]]:
|
||||
async def indexes_by_id(input_data: list, values: list[str], id_name="id") -> list[int] | None:
|
||||
r_data: list[int] = []
|
||||
for i, _ in enumerate(input_data):
|
||||
if getattr(input_data[i], id_name) in values:
|
||||
|
|
@ -149,9 +141,7 @@ async def indexes_by_id(
|
|||
return r_data
|
||||
|
||||
|
||||
def data_by_filter[T: DataclassInstance](
|
||||
input_data: list[T], q: FilterQuery
|
||||
) -> list[T]:
|
||||
def data_by_filter[T: DataclassInstance](input_data: list[T], q: FilterQuery) -> list[T]:
|
||||
# can't do query AND(OR() + AND())
|
||||
data: list[T] = []
|
||||
data_or: list[T] = []
|
||||
|
|
@ -245,14 +235,10 @@ def sqlalchemy_conditions(q: FilterQuery):
|
|||
conditions = []
|
||||
for f in q.filters:
|
||||
if f.sign == FilterSign.OR:
|
||||
conditions.append(
|
||||
or_(*sqlalchemy_conditions(q=FilterQuery(filters=f.right)))
|
||||
)
|
||||
conditions.append(or_(*sqlalchemy_conditions(q=FilterQuery(filters=f.right))))
|
||||
continue
|
||||
if f.sign == FilterSign.AND:
|
||||
conditions.append(
|
||||
and_(*sqlalchemy_conditions(q=FilterQuery(filters=f.right)))
|
||||
)
|
||||
conditions.append(and_(*sqlalchemy_conditions(q=FilterQuery(filters=f.right))))
|
||||
continue
|
||||
if f.left is None:
|
||||
continue
|
||||
|
|
@ -282,9 +268,7 @@ def sqlalchemy_conditions(q: FilterQuery):
|
|||
return conditions
|
||||
|
||||
|
||||
def sqlalchemy_restrictions(
|
||||
f: FilterQuery, q: Select, dict_to_sort: Optional[dict] = None
|
||||
) -> Select:
|
||||
def sqlalchemy_restrictions(f: FilterQuery, q: Select, dict_to_sort: dict | None = None) -> Select:
|
||||
if f.limit:
|
||||
q = q.limit(f.limit)
|
||||
if f.offset:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from server.infra.web.description import DescriptionController
|
||||
from server.infra.web.seo import SeoController
|
||||
from server.infra.web.recognizer import BreedsController
|
||||
from server.infra.web.seo import SeoController
|
||||
|
||||
__all__ = ("DescriptionController", "SeoController", "BreedsController")
|
||||
|
|
|
|||
|
|
@ -34,9 +34,7 @@ class DescriptionController(Controller):
|
|||
async def dogs_characteristics(self) -> Template:
|
||||
characters_service: CharactersService = inject.instance(CharactersService)
|
||||
breeds = await characters_service.get_characters()
|
||||
return Template(
|
||||
template_name="dogs-characteristics.html", context={"breeds": breeds}
|
||||
)
|
||||
return Template(template_name="dogs-characteristics.html", context={"breeds": breeds})
|
||||
|
||||
@get("/dogs-characteristics/{name:str}")
|
||||
async def beer_description(self, name: str) -> Template:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
from typing import Annotated
|
||||
|
||||
import inject
|
||||
from litestar import (
|
||||
Controller,
|
||||
post,
|
||||
)
|
||||
from litestar.enums import RequestEncodingType
|
||||
from litestar.datastructures import UploadFile
|
||||
from litestar.enums import RequestEncodingType
|
||||
from litestar.params import Body
|
||||
|
||||
from server.modules.recognizer import RecognizerService
|
||||
|
|
@ -14,17 +16,13 @@ class BreedsController(Controller):
|
|||
path = "/beerds"
|
||||
|
||||
@post("/dogs")
|
||||
async def beerds_dogs(
|
||||
self, data: UploadFile = Body(media_type=RequestEncodingType.MULTI_PART)
|
||||
) -> dict:
|
||||
async def beerds_dogs(self, data: Annotated[UploadFile, Body(media_type=RequestEncodingType.MULTI_PART)]) -> dict:
|
||||
recognizer_service: RecognizerService = inject.instance(RecognizerService)
|
||||
body = await data.read()
|
||||
return await recognizer_service.predict_dog_image(body)
|
||||
|
||||
@post("/cats")
|
||||
async def beerds_cats(
|
||||
self, data: UploadFile = Body(media_type=RequestEncodingType.MULTI_PART)
|
||||
) -> dict:
|
||||
async def beerds_cats(self, data: Annotated[UploadFile, Body(media_type=RequestEncodingType.MULTI_PART)]) -> dict:
|
||||
recognizer_service: RecognizerService = inject.instance(RecognizerService)
|
||||
body = await data.read()
|
||||
return await recognizer_service.predict_cat_image(body)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import inject
|
||||
from litestar import (
|
||||
Controller,
|
||||
get,
|
||||
MediaType,
|
||||
get,
|
||||
)
|
||||
|
||||
from server.modules.descriptions import CharactersService, Breed
|
||||
from server.modules.descriptions import Breed, CharactersService
|
||||
|
||||
|
||||
class SeoController(Controller):
|
||||
|
|
|
|||
|
|
@ -1,20 +1,20 @@
|
|||
import asyncio
|
||||
from pathlib import Path
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import inject
|
||||
from litestar import (
|
||||
Litestar,
|
||||
)
|
||||
from litestar.contrib.jinja import JinjaTemplateEngine
|
||||
from litestar.template.config import TemplateConfig
|
||||
from litestar.static_files import create_static_files_router
|
||||
from litestar.template.config import TemplateConfig
|
||||
|
||||
from server.config import get_app_config
|
||||
from server.infra.web import BreedsController, DescriptionController, SeoController
|
||||
from server.infra.db import AsyncDB
|
||||
from server.modules.descriptions import CharactersService, CharactersRepository
|
||||
from server.modules.recognizer import RecognizerService, RecognizerRepository
|
||||
from server.infra.web import BreedsController, DescriptionController, SeoController
|
||||
from server.modules.descriptions import CharactersService, PGCharactersRepository
|
||||
from server.modules.recognizer import RecognizerRepository, RecognizerService
|
||||
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ def inject_config(binder: inject.Binder):
|
|||
db = AsyncDB(cnf)
|
||||
loop.run_until_complete(db.connect())
|
||||
binder.bind(RecognizerService, RecognizerService(RecognizerRepository()))
|
||||
binder.bind(CharactersService, CharactersService(CharactersRepository()))
|
||||
binder.bind(CharactersService, CharactersService(PGCharactersRepository(db)))
|
||||
|
||||
|
||||
inject.configure(inject_config)
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
from logging.config import fileConfig
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
from server.config import get_app_config
|
||||
from server.infra.db.db_mapper import mapper_registry
|
||||
from server.modules.attachments.repository.models import *
|
||||
from server.modules.descriptions.repository.models import *
|
||||
from server.modules.rate.repository.models import *
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
|
|
@ -16,7 +15,7 @@ config = context.config
|
|||
Base = declarative_base()
|
||||
target_metadata = Base.metadata
|
||||
|
||||
for (table_name, table) in mapper_registry.metadata.tables.items():
|
||||
for table_name, table in mapper_registry.metadata.tables.items():
|
||||
target_metadata._add_table(table_name, table.schema, table)
|
||||
|
||||
# Interpret the config file for Python logging.
|
||||
|
|
@ -38,8 +37,8 @@ def run_migrations_offline() -> None:
|
|||
|
||||
"""
|
||||
url = "{}?options=-c search_path={}".format(
|
||||
str(get_app_config().db_uri).replace(
|
||||
"+asyncpg", ""), get_app_config().db_search_path
|
||||
str(get_app_config().db_uri).replace("+asyncpg", ""),
|
||||
get_app_config().db_search_path,
|
||||
)
|
||||
context.configure(
|
||||
url=url,
|
||||
|
|
@ -62,8 +61,8 @@ def run_migrations_online() -> None:
|
|||
alemb_cnf = config.get_section(config.config_ini_section, {})
|
||||
if not alemb_cnf["sqlalchemy.url"] or alemb_cnf["sqlalchemy.url"] == "driver://user:pass@localhost/dbname":
|
||||
alemb_cnf["sqlalchemy.url"] = "{}?options=-c search_path={}".format(
|
||||
str(get_app_config().db_uri).replace(
|
||||
"+asyncpg", ""), get_app_config().db_search_path
|
||||
str(get_app_config().db_uri).replace("+asyncpg", ""),
|
||||
get_app_config().db_search_path,
|
||||
)
|
||||
connectable = engine_from_config(
|
||||
alemb_cnf,
|
||||
|
|
@ -73,7 +72,8 @@ def run_migrations_online() -> None:
|
|||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection, target_metadata=target_metadata,
|
||||
connection=connection,
|
||||
target_metadata=target_metadata,
|
||||
)
|
||||
|
||||
with context.begin_transaction():
|
||||
|
|
|
|||
|
|
@ -5,56 +5,67 @@ Revises:
|
|||
Create Date: 2026-01-12 18:28:37.783462
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import pathlib
|
||||
from collections.abc import Sequence
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '474b572b7fe2'
|
||||
down_revision: Union[str, None] = None
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
revision: str = "474b572b7fe2"
|
||||
down_revision: str | None = None
|
||||
branch_labels: str | Sequence[str] | None = None
|
||||
depends_on: str | Sequence[str] | None = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('attachments',
|
||||
sa.Column('id', sa.String(), nullable=False),
|
||||
sa.Column('size', sa.BigInteger(), nullable=False),
|
||||
sa.Column('storage_driver_name', sa.String(), nullable=False),
|
||||
sa.Column('path', sa.String(), nullable=False),
|
||||
sa.Column('media_type', sa.String(), nullable=False),
|
||||
sa.Column('content_type', sa.String(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column('is_deleted', sa.Boolean(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
op.create_table(
|
||||
"attachments",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("size", sa.BigInteger(), nullable=False),
|
||||
sa.Column("storage_driver_name", sa.String(), nullable=False),
|
||||
sa.Column("path", sa.String(), nullable=False),
|
||||
sa.Column("media_type", sa.String(), nullable=False),
|
||||
sa.Column("content_type", sa.String(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("is_deleted", sa.Boolean(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_table('beerds',
|
||||
sa.Column('id', sa.String(), nullable=False),
|
||||
sa.Column('name', sa.Text(), nullable=False),
|
||||
sa.Column('descriptions', sa.Text(), nullable=False),
|
||||
sa.Column('signs', sa.JSON(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
op.create_table(
|
||||
"beerds",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("name", sa.Text(), nullable=False),
|
||||
sa.Column("alias", sa.Text(), nullable=False),
|
||||
sa.Column("descriptions", sa.Text(), nullable=False),
|
||||
sa.Column("signs", sa.JSON(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
op.create_table('votes',
|
||||
sa.Column('id', sa.String(), nullable=False),
|
||||
sa.Column('attachemnt_id', sa.String(), nullable=False),
|
||||
sa.Column('beerd_id', sa.String(), nullable=False),
|
||||
sa.Column('rate', sa.BigInteger(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
|
||||
sa.ForeignKeyConstraint(['attachemnt_id'], ['attachments.id'], name='votes_attachemnt_id_fk'),
|
||||
sa.ForeignKeyConstraint(['beerd_id'], ['beerds.id'], name='votes_beerd_id_fk'),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
op.create_table(
|
||||
"votes",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("attachemnt_id", sa.String(), nullable=False),
|
||||
sa.Column("beerd_id", sa.String(), nullable=False),
|
||||
sa.Column("rate", sa.BigInteger(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.ForeignKeyConstraint(["attachemnt_id"], ["attachments.id"], name="votes_attachemnt_id_fk"),
|
||||
sa.ForeignKeyConstraint(["beerd_id"], ["beerds.id"], name="votes_beerd_id_fk"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
with open(
|
||||
pathlib.Path(__file__).resolve().parent / "dumps/beerds_insert.sql",
|
||||
encoding="utf-8",
|
||||
) as upgrade_file:
|
||||
sql = upgrade_file.read()
|
||||
op.execute(sql)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_table('votes')
|
||||
op.drop_table('beerds')
|
||||
op.drop_table('attachments')
|
||||
op.drop_table("votes")
|
||||
op.drop_table("beerds")
|
||||
op.drop_table("attachments")
|
||||
# ### end Alembic commands ###
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from datetime import UTC, datetime
|
||||
from typing import Tuple
|
||||
|
||||
from sqlalchemy import CursorResult, delete, insert, select, update
|
||||
|
||||
|
|
@ -11,9 +10,7 @@ from server.modules.attachments.domains.attachments import Attachment
|
|||
|
||||
class AttachmentRepository(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
async def get_by_id(
|
||||
self, session: AbstractSession, attach_id: list[str]
|
||||
) -> list[Attachment]:
|
||||
async def get_by_id(self, session: AbstractSession, attach_id: list[str]) -> list[Attachment]:
|
||||
"""Get Attachment by ID"""
|
||||
pass
|
||||
|
||||
|
|
@ -89,9 +86,7 @@ class MockAttachmentRepository(AttachmentRepository):
|
|||
}
|
||||
self._db = MockDB(get_app_config())
|
||||
|
||||
async def get_by_id(
|
||||
self, session: AbstractSession, attach_id: list[str]
|
||||
) -> list[Attachment]:
|
||||
async def get_by_id(self, session: AbstractSession, attach_id: list[str]) -> list[Attachment]:
|
||||
f: list[Attachment] = []
|
||||
for f_id in attach_id:
|
||||
f_item = self._data.get(f_id)
|
||||
|
|
@ -119,14 +114,12 @@ class DBAttachmentRepository(AttachmentRepository):
|
|||
def __init__(self, db: AsyncDB):
|
||||
self._db = db
|
||||
|
||||
async def get_by_id(
|
||||
self, session: AbstractSession, attach_id: list[str]
|
||||
) -> list[Attachment]:
|
||||
async def get_by_id(self, session: AbstractSession, attach_id: list[str]) -> list[Attachment]:
|
||||
q = select(Attachment).where(
|
||||
Attachment.id.in_(attach_id) # type: ignore
|
||||
)
|
||||
attachment: list[Attachment] = []
|
||||
result: CursorResult[Tuple[Attachment]] = await session.execute(q) # type: ignore
|
||||
result: CursorResult[tuple[Attachment]] = await session.execute(q) # type: ignore
|
||||
for d in result.all():
|
||||
attachment.append(d[0])
|
||||
return attachment
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from dataclasses_ujson.dataclasses_ujson import UJsonMixin
|
||||
from dataclasses_ujson.dataclasses_ujson import UJsonMixin # type: ignore
|
||||
from sqlalchemy import BigInteger, Boolean, Column, DateTime, String
|
||||
|
||||
from server.config import get_app_config
|
||||
from server.infra.db.db_mapper import mapper_registry
|
||||
|
||||
|
||||
|
||||
@mapper_registry.mapped
|
||||
@dataclass
|
||||
class Attachment(UJsonMixin):
|
||||
|
|
@ -23,10 +22,12 @@ class Attachment(UJsonMixin):
|
|||
content_type: str = field(metadata={"sa": Column(String(), nullable=False)})
|
||||
|
||||
created_at: datetime = field(
|
||||
default=datetime.now(UTC), metadata={"sa": Column(DateTime(timezone=True), nullable=False)}
|
||||
default=datetime.now(UTC),
|
||||
metadata={"sa": Column(DateTime(timezone=True), nullable=False)},
|
||||
)
|
||||
updated_at: datetime = field(
|
||||
default=datetime.now(UTC), metadata={"sa": Column(DateTime(timezone=True), nullable=False)}
|
||||
default=datetime.now(UTC),
|
||||
metadata={"sa": Column(DateTime(timezone=True), nullable=False)},
|
||||
)
|
||||
is_deleted: bool = field(default=False, metadata={"sa": Column(Boolean(), nullable=False, default=False)})
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,13 @@
|
|||
import hashlib
|
||||
import os.path
|
||||
import uuid
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncIterable, AsyncIterator
|
||||
from datetime import UTC, datetime
|
||||
from enum import Enum
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import Any, AsyncIterable, AsyncIterator, Optional
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
import aioboto3 # type: ignore
|
||||
import aiofiles
|
||||
|
|
@ -46,7 +47,7 @@ class StorageDriver(metaclass=ABCMeta):
|
|||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def take(self, path: str) -> Optional[bytes]:
|
||||
async def take(self, path: str) -> bytes | None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
@ -78,7 +79,7 @@ class LocalStorageDriver(StorageDriver):
|
|||
await f.write(data)
|
||||
return str(path)
|
||||
|
||||
async def take(self, path: str) -> Optional[bytes]:
|
||||
async def take(self, path: str) -> bytes | None:
|
||||
if not os.path.isfile(path):
|
||||
return None
|
||||
async with aiofiles.open(path, "rb") as f:
|
||||
|
|
@ -112,7 +113,7 @@ class MockStorageDriver(StorageDriver):
|
|||
self._store[path] = data
|
||||
return path
|
||||
|
||||
async def take(self, path: str) -> Optional[bytes]:
|
||||
async def take(self, path: str) -> bytes | None:
|
||||
return self._store.get(path)
|
||||
|
||||
async def delete(self, path: str):
|
||||
|
|
@ -138,9 +139,7 @@ class S3StorageDriver(StorageDriver):
|
|||
return self._session.client("s3", endpoint_url=self._cnf.fs_s3_endpoint)
|
||||
|
||||
def _normalize_path(self, path: str) -> str:
|
||||
return f"{S3StorageDriver._prefix}{path}".replace(
|
||||
self._cnf.fs_local_mount_dir, ""
|
||||
)
|
||||
return f"{S3StorageDriver._prefix}{path}".replace(self._cnf.fs_local_mount_dir, "")
|
||||
|
||||
async def put(self, data: bytes) -> str:
|
||||
sign = hashlib.file_digest(BytesIO(data), "sha256").hexdigest()
|
||||
|
|
@ -176,12 +175,10 @@ class S3StorageDriver(StorageDriver):
|
|||
self._logger.error(f"stream client error: {str(e)}, path: {path}")
|
||||
raise FileNotFoundError
|
||||
except Exception as e:
|
||||
self._logger.error(
|
||||
f"stream error: {type(e).__name__} {str(e)}, path: {path}"
|
||||
)
|
||||
self._logger.error(f"stream error: {type(e).__name__} {str(e)}, path: {path}")
|
||||
raise FileNotFoundError
|
||||
|
||||
async def take(self, path: str) -> Optional[bytes]:
|
||||
async def take(self, path: str) -> bytes | None:
|
||||
buffer = BytesIO()
|
||||
async for chunk in self.stream(path):
|
||||
if chunk:
|
||||
|
|
@ -191,9 +188,7 @@ class S3StorageDriver(StorageDriver):
|
|||
|
||||
async def delete(self, path: str) -> None:
|
||||
async with await self._client() as s3:
|
||||
await s3.delete_object(
|
||||
Bucket=self._cnf.fs_s3_bucket, Key=self._normalize_path(path)
|
||||
)
|
||||
await s3.delete_object(Bucket=self._cnf.fs_s3_bucket, Key=self._normalize_path(path))
|
||||
|
||||
|
||||
RESIZE_MAX_SIZE = 100_000
|
||||
|
|
@ -243,9 +238,7 @@ class AtachmentService:
|
|||
return parts.replace("/", "")
|
||||
|
||||
def url(self, attachment_id: str, content_type: str | None = None) -> str:
|
||||
return f"{self._cnf.app_public_url}/api/v0/attachment/{attachment_id}.original.{
|
||||
self.extension(content_type)
|
||||
}"
|
||||
return f"{self._cnf.app_public_url}/api/v0/attachment/{attachment_id}.original.{self.extension(content_type)}"
|
||||
|
||||
async def create(self, file: bytes, user_id: str) -> Attachment:
|
||||
path = await self._driver.put(file)
|
||||
|
|
@ -264,9 +257,7 @@ class AtachmentService:
|
|||
await self._repository.create(attach)
|
||||
return attach
|
||||
|
||||
async def get_info(
|
||||
self, session: AbstractSession | None, attach_id: list[str]
|
||||
) -> list[Attachment]:
|
||||
async def get_info(self, session: AbstractSession | None, attach_id: list[str]) -> list[Attachment]:
|
||||
if not attach_id:
|
||||
return []
|
||||
if session is not None:
|
||||
|
|
@ -277,17 +268,13 @@ class AtachmentService:
|
|||
def get_name(self, attachment: Attachment) -> str:
|
||||
return f"{attachment.id}.{self.extension(attachment.content_type)}"
|
||||
|
||||
async def get_data(
|
||||
self, session: AbstractSession, attach_id: str
|
||||
) -> Optional[bytes]:
|
||||
async def get_data(self, session: AbstractSession, attach_id: str) -> bytes | None:
|
||||
file = await self._repository.get_by_id(session, [attach_id])
|
||||
if not file:
|
||||
return None
|
||||
return await self._driver.take(file[0].path)
|
||||
|
||||
async def get_stream(
|
||||
self, session: AbstractSession | None, attach_id: str
|
||||
) -> AsyncIterator[bytes]:
|
||||
async def get_stream(self, session: AbstractSession | None, attach_id: str) -> AsyncIterator[bytes]:
|
||||
async def _stream_iterator(is_empty: bool):
|
||||
if is_empty:
|
||||
return
|
||||
|
|
@ -343,7 +330,5 @@ class AtachmentService:
|
|||
f"delete:{item.path}",
|
||||
)
|
||||
path = await self._driver.put(d)
|
||||
await self._repository.update(
|
||||
item.id, path=path, content_type="image/jpeg", size=len(d)
|
||||
)
|
||||
await self._repository.update(item.id, path=path, content_type="image/jpeg", size=len(d))
|
||||
await self._driver.delete(item.path)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,11 @@
|
|||
from server.modules.descriptions.repository import (
|
||||
CharactersRepository,
|
||||
ACharactersRepository,
|
||||
)
|
||||
from server.modules.descriptions.service import CharactersService
|
||||
from server.modules.descriptions.domain import Breed
|
||||
from server.modules.descriptions.repository import ACharactersRepository, CharactersRepository, PGCharactersRepository
|
||||
from server.modules.descriptions.service import CharactersService
|
||||
|
||||
__all__ = (
|
||||
"CharactersRepository",
|
||||
"ACharactersRepository",
|
||||
"CharactersService",
|
||||
"PGCharactersRepository",
|
||||
"Breed",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from dataclasses import dataclass
|
|||
|
||||
@dataclass(frozen=True)
|
||||
class Breed:
|
||||
id: str
|
||||
name: str
|
||||
alias: str
|
||||
description: str
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from server.modules.descriptions.repository.repository import (
|
||||
CharactersRepository,
|
||||
ACharactersRepository,
|
||||
CharactersRepository,
|
||||
PGCharactersRepository,
|
||||
)
|
||||
|
||||
__all__ = ("CharactersRepository", "ACharactersRepository")
|
||||
__all__ = ("CharactersRepository", "ACharactersRepository", "PGCharactersRepository")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
|
||||
|
||||
```json
|
||||
{
|
||||
"people_friendly": true,
|
||||
"child_friendly": true,
|
||||
|
|
@ -14,4 +11,3 @@
|
|||
"tolerates_loneliness": false,
|
||||
"hypoallergenic": false
|
||||
}
|
||||
```
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
|
||||
|
||||
```json
|
||||
{
|
||||
"active": true,
|
||||
"colors": ["blue", "gray", "black"],
|
||||
|
|
@ -10,4 +7,3 @@
|
|||
"good_health": false,
|
||||
"tolerates_loneliness": false
|
||||
}
|
||||
```
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
|
||||
|
||||
```json
|
||||
{
|
||||
"people_friendly": true,
|
||||
"child_friendly": true,
|
||||
|
|
@ -13,4 +10,3 @@
|
|||
"tolerates_loneliness": false,
|
||||
"hypoallergenic": false
|
||||
}
|
||||
```
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
|
||||
|
||||
```json
|
||||
{
|
||||
"people_friendly": true,
|
||||
"child_friendly": true,
|
||||
|
|
@ -17,4 +14,3 @@
|
|||
"tolerates_loneliness": false,
|
||||
"hypoallergenic": false
|
||||
}
|
||||
```
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
|
||||
|
||||
It seems like the text you've shared is repeated multiple times, possibly due to an error. Could you clarify your question or specify what you need help with? For example, are you looking for tips on dog care, training, nutrition, or something else? Let me know, and I'll provide a clear, concise response! 🐾
|
||||
|
|
@ -1,6 +1,3 @@
|
|||
|
||||
|
||||
```json
|
||||
{
|
||||
"people_friendly": true,
|
||||
"child_friendly": true,
|
||||
|
|
@ -13,4 +10,3 @@
|
|||
"tolerates_loneliness": false,
|
||||
"hypoallergenic": false
|
||||
}
|
||||
```
|
||||
|
|
@ -1,16 +1,7 @@
|
|||
|
||||
|
||||
The Entlebucher Mountain Dog exhibits the following traits based on the provided description:
|
||||
|
||||
- **Child-friendly**: Yes. The text explicitly states, "Прекрасно ладят с детьми" (Great with children).
|
||||
- **High energy**: Yes. The breed requires daily walks, games, and activities, and lacks of stimulation may lead to destructive behavior.
|
||||
- **Friendly**: Yes. The dog is described as "обычно дружелюбны" (usually friendly) with people and other animals, and it is noted that they are good with children.
|
||||
|
||||
Other attributes:
|
||||
- **People-friendly**: Ambiguous. While the breed is friendly with people, the term "people-friendly" is not explicitly mentioned.
|
||||
- **Dog-friendly**: Unclear. The text notes "обычно дружелюбны" (usually friendly) with other animals but mentions a hunting instinct toward small animals, which may not directly apply to dogs.
|
||||
- **Low maintenance**: No. The breed requires regular grooming (brushing) and active socialization/training.
|
||||
- **Hypoallergenic**: No. There is no mention of hypoallergenic traits.
|
||||
|
||||
**Final Answer**:
|
||||
Child-friendly, High energy, Friendly.
|
||||
{
|
||||
"people_friendly": true,
|
||||
"child_friendly": true,
|
||||
"active": true,
|
||||
"need_attentions": true,
|
||||
"good_health": false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,14 +1,11 @@
|
|||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from dataclasses_ujson.dataclasses_ujson import UJsonMixin
|
||||
from sqlalchemy import BigInteger, Boolean, Column, DateTime, JSON, String, Text
|
||||
from dataclasses_ujson.dataclasses_ujson import UJsonMixin # type: ignore
|
||||
from sqlalchemy import JSON, Column, String, Text
|
||||
|
||||
from server.config import get_app_config
|
||||
from server.infra.db.db_mapper import mapper_registry
|
||||
|
||||
|
||||
|
||||
@mapper_registry.mapped
|
||||
@dataclass
|
||||
class Beerds(UJsonMixin):
|
||||
|
|
@ -17,10 +14,6 @@ class Beerds(UJsonMixin):
|
|||
|
||||
id: str = field(metadata={"sa": Column(String(), primary_key=True, nullable=False)})
|
||||
name: str = field(metadata={"sa": Column(Text(), nullable=False)})
|
||||
descriptions: str = field(
|
||||
default=datetime.now(UTC), metadata={"sa": Column(Text(), nullable=False)}
|
||||
)
|
||||
signs: dict = field(
|
||||
default=datetime.now(UTC), metadata={"sa": Column(JSON(), nullable=False)}
|
||||
)
|
||||
|
||||
alias: str = field(metadata={"sa": Column(Text(), nullable=False)})
|
||||
descriptions: str = field(metadata={"sa": Column(Text(), nullable=False)})
|
||||
signs: dict | None = field(default=None, metadata={"sa": Column(JSON(), nullable=False)})
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
from aiocache import cached, Cache # type: ignore
|
||||
from aiocache import Cache, cached # type: ignore
|
||||
from sqlalchemy import select
|
||||
|
||||
from server.infra.db import AsyncDB
|
||||
from server.modules.descriptions.domain import Breed
|
||||
from server.modules.descriptions.repository import models
|
||||
|
||||
|
||||
class ACharactersRepository(metaclass=ABCMeta):
|
||||
|
|
@ -12,6 +14,7 @@ class ACharactersRepository(metaclass=ABCMeta):
|
|||
async def get_characters(self) -> list[Breed]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_character(self, alias: str) -> Breed | None:
|
||||
pass
|
||||
|
||||
|
|
@ -28,11 +31,10 @@ class CharactersRepository(ACharactersRepository):
|
|||
# Идем по каждому текстовому файлу с описанием породы
|
||||
for breed_file in breed_dir.glob("*.txt"):
|
||||
breed_name = breed_file.stem # имя файла без расширения - название породы
|
||||
description = breed_file.read_text(
|
||||
encoding="utf-8"
|
||||
) # читаем описание из файла
|
||||
description = breed_file.read_text(encoding="utf-8") # читаем описание из файла
|
||||
breeds.append(
|
||||
Breed(
|
||||
id=breed_name,
|
||||
name=breed_name.replace("_", " "),
|
||||
alias=breed_file.stem,
|
||||
description=description.strip(),
|
||||
|
|
@ -55,30 +57,71 @@ class PGCharactersRepository(ACharactersRepository):
|
|||
def __init__(self, db: AsyncDB):
|
||||
self._db = db
|
||||
|
||||
@cached(ttl=60, cache=Cache.MEMORY)
|
||||
# ───────────────────────────────────────────────────────────────────── #
|
||||
# 8️⃣ Кешируемый метод, который возвращает **все** породы
|
||||
# ───────────────────────────────────────────────────────────────────── #
|
||||
@cached(ttl=60, cache=Cache.MEMORY) # 1‑мин. кеш
|
||||
async def get_characters(self) -> list[Breed]:
|
||||
breed_dir = Path("server/modules/descriptions/repository/breed_descriptions")
|
||||
breeds: list[Breed] = []
|
||||
"""
|
||||
Читает данные из таблицы `beerds.beerds` и преобразует каждую строку
|
||||
в экземпляр `Breed`. Поле `signs` игнорируется – в `Breed` его нет.
|
||||
"""
|
||||
|
||||
# Идем по каждому текстовому файлу с описанием породы
|
||||
for breed_file in breed_dir.glob("*.txt"):
|
||||
breed_name = breed_file.stem # имя файла без расширения - название породы
|
||||
description = breed_file.read_text(
|
||||
encoding="utf-8"
|
||||
) # читаем описание из файла
|
||||
breeds.append(
|
||||
async with self._db.async_session() as session:
|
||||
# Писем SELECT‑запрос (получаем все строки)
|
||||
stmt = select(
|
||||
models.Beerds.id,
|
||||
models.Beerds.name,
|
||||
models.Beerds.alias,
|
||||
models.Beerds.descriptions,
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
rows = result.fetchall()
|
||||
|
||||
# Конвертируем в Breed
|
||||
breeds: list[Breed] = [
|
||||
Breed(
|
||||
name=breed_name.replace("_", " "),
|
||||
alias=breed_file.stem,
|
||||
description=description.strip(),
|
||||
id=str(row.id),
|
||||
name=row.name.strip(),
|
||||
alias=row.alias.strip(),
|
||||
description=row.descriptions.strip(),
|
||||
)
|
||||
)
|
||||
breeds.sort(key=lambda b: b.name)
|
||||
for row in rows
|
||||
]
|
||||
|
||||
# Сортируем по имени, как было в файле‑реализации
|
||||
breeds.sort(key=lambda b: b.name.lower())
|
||||
return breeds
|
||||
|
||||
# ───────────────────────────────────────────────────────────────────── #
|
||||
# 9️⃣ Получить конкретную породу по псевдониму
|
||||
# ───────────────────────────────────────────────────────────────────── #
|
||||
async def get_character(self, alias: str) -> Breed | None:
|
||||
breeds = await self.get_characters()
|
||||
data = [b for b in breeds if b.alias == alias]
|
||||
if len(data) == 0:
|
||||
"""
|
||||
Быстрый запрос без получения всех пород. Если результат
|
||||
пустой – возвращаем `None`.
|
||||
"""
|
||||
|
||||
async with self._db.async_session() as session:
|
||||
stmt = (
|
||||
select(
|
||||
models.Beerds.id,
|
||||
models.Beerds.name,
|
||||
models.Beerds.alias,
|
||||
models.Beerds.descriptions,
|
||||
)
|
||||
.where(models.Beerds.alias == alias)
|
||||
.limit(1)
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
row = result.fetchone()
|
||||
|
||||
if row is None: # pragma: no cover
|
||||
return None
|
||||
return data[0]
|
||||
|
||||
return Breed(
|
||||
id=str(row.id),
|
||||
name=row.name.strip(),
|
||||
alias=row.alias.strip(),
|
||||
description=row.descriptions.strip(),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,12 +1,16 @@
|
|||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from dataclasses_ujson.dataclasses_ujson import UJsonMixin
|
||||
from sqlalchemy import BigInteger, Boolean, Column, DateTime, ForeignKeyConstraint, String
|
||||
|
||||
from dataclasses_ujson.dataclasses_ujson import UJsonMixin # type: ignore
|
||||
from server.config import get_app_config
|
||||
from server.infra.db.db_mapper import mapper_registry
|
||||
|
||||
from sqlalchemy import (
|
||||
BigInteger,
|
||||
Column,
|
||||
DateTime,
|
||||
ForeignKeyConstraint,
|
||||
String,
|
||||
)
|
||||
|
||||
|
||||
@mapper_registry.mapped
|
||||
|
|
@ -24,7 +28,8 @@ class Vote(UJsonMixin):
|
|||
beerd_id: str = field(metadata={"sa": Column(String(), nullable=False)})
|
||||
rate: int = field(metadata={"sa": Column(BigInteger(), nullable=False)})
|
||||
created_at: datetime = field(
|
||||
default=datetime.now(UTC), metadata={"sa": Column(DateTime(timezone=True), nullable=False)}
|
||||
default=datetime.now(UTC),
|
||||
metadata={"sa": Column(DateTime(timezone=True), nullable=False)},
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from server.modules.recognizer.repository import (
|
||||
RecognizerRepository,
|
||||
ARecognizerRepository,
|
||||
RecognizerRepository,
|
||||
)
|
||||
from server.modules.recognizer.service import RecognizerService
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from server.modules.recognizer.repository.repository import (
|
||||
RecognizerRepository,
|
||||
ARecognizerRepository,
|
||||
RecognizerRepository,
|
||||
)
|
||||
|
||||
__all__ = ("RecognizerRepository", "ARecognizerRepository")
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from functools import lru_cache
|
||||
|
||||
from aiocache import cached, Cache # type: ignore
|
||||
import ujson
|
||||
from aiocache import Cache, cached # type: ignore
|
||||
|
||||
|
||||
class ARecognizerRepository(metaclass=ABCMeta):
|
||||
|
|
@ -29,26 +29,22 @@ class RecognizerRepository(ARecognizerRepository):
|
|||
|
||||
@cached(ttl=60, cache=Cache.MEMORY)
|
||||
async def images_dogs(self) -> dict:
|
||||
with open("server/modules/recognizer/repository/meta/images.json", "r") as f:
|
||||
with open("server/modules/recognizer/repository/meta/images.json") as f: # noqa: ASYNC230
|
||||
return ujson.loads(f.read())["dog"]
|
||||
|
||||
@cached(ttl=60, cache=Cache.MEMORY)
|
||||
async def images_cats(self) -> dict:
|
||||
with open("server/modules/recognizer/repository/meta/images.json", "r") as f:
|
||||
with open("server/modules/recognizer/repository/meta/images.json") as f: # noqa: ASYNC230
|
||||
return ujson.loads(f.read())["cat"]
|
||||
|
||||
@lru_cache
|
||||
def labels_cats(self) -> dict:
|
||||
with open(
|
||||
"server/modules/recognizer/repository/meta/labels_cats.json", "r"
|
||||
) as f:
|
||||
with open("server/modules/recognizer/repository/meta/labels_cats.json") as f: # noqa: ASYNC230
|
||||
data_labels = f.read()
|
||||
return ujson.loads(data_labels)
|
||||
|
||||
@lru_cache
|
||||
def labels_dogs(self) -> dict:
|
||||
with open(
|
||||
"server/modules/recognizer/repository/meta/labels_dogs.json", "r"
|
||||
) as f:
|
||||
with open("server/modules/recognizer/repository/meta/labels_dogs.json") as f: # noqa: ASYNC230
|
||||
data_labels = f.read()
|
||||
return ujson.loads(data_labels)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from typing import NewType, Any
|
||||
import os
|
||||
import io
|
||||
import os
|
||||
from typing import Any, NewType
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
|
@ -10,7 +10,6 @@ from torchvision import transforms # type: ignore
|
|||
|
||||
from server.modules.recognizer.repository import ARecognizerRepository
|
||||
|
||||
|
||||
TorchModel = NewType("TorchModel", torch.nn.Module)
|
||||
|
||||
|
||||
|
|
@ -49,15 +48,10 @@ class RecognizerService:
|
|||
images.append(
|
||||
{
|
||||
"name": name,
|
||||
"url": [
|
||||
f"/static/assets/dog/{predicted_label}/{i}"
|
||||
for i in images_dogs[predicted_label]
|
||||
],
|
||||
"url": [f"/static/assets/dog/{predicted_label}/{i}" for i in images_dogs[predicted_label]],
|
||||
}
|
||||
)
|
||||
description.setdefault(name, []).append(
|
||||
f"/dogs-characteristics/{name.replace(' ', '_')}"
|
||||
)
|
||||
description.setdefault(name, []).append(f"/dogs-characteristics/{name.replace(' ', '_')}")
|
||||
results[probabilities] = name
|
||||
return {
|
||||
"results": results,
|
||||
|
|
@ -77,10 +71,7 @@ class RecognizerService:
|
|||
images.append(
|
||||
{
|
||||
"name": name,
|
||||
"url": [
|
||||
f"/static/assets/cat/{predicted_label}/{i}"
|
||||
for i in images_cats[predicted_label]
|
||||
],
|
||||
"url": [f"/static/assets/cat/{predicted_label}/{i}" for i in images_cats[predicted_label]],
|
||||
}
|
||||
)
|
||||
results[probabilities] = name
|
||||
|
|
@ -99,9 +90,7 @@ class RecognizerService:
|
|||
]
|
||||
)
|
||||
input_tensor = preprocess(Image.open(io.BytesIO(image)))
|
||||
input_batch = input_tensor.unsqueeze(0).to(
|
||||
device
|
||||
) # Добавляем dimension для батча
|
||||
input_batch = input_tensor.unsqueeze(0).to(device) # Добавляем dimension для батча
|
||||
|
||||
with torch.no_grad():
|
||||
output = model(input_batch)
|
||||
|
|
@ -112,7 +101,5 @@ class RecognizerService:
|
|||
|
||||
predicted_data = []
|
||||
for i in range(k):
|
||||
predicted_data.append(
|
||||
(predicted_idx[i].item(), float(topk_probs[i].item()))
|
||||
)
|
||||
predicted_data.append((predicted_idx[i].item(), float(topk_probs[i].item())))
|
||||
return predicted_data
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import time
|
||||
|
||||
import requests # type: ignore
|
||||
|
||||
# Получить токен чтобы:
|
||||
|
|
@ -12,9 +13,7 @@ group_id = 220240483
|
|||
dir = "../assets/dog"
|
||||
list_labels = [fname for fname in os.listdir(dir)]
|
||||
|
||||
r = requests.get(
|
||||
f"{VK_URL}photos.getAll{postfix}&access_token={TOKEN}&owner_id=-{group_id}&count=200"
|
||||
)
|
||||
r = requests.get(f"{VK_URL}photos.getAll{postfix}&access_token={TOKEN}&owner_id=-{group_id}&count=200")
|
||||
if "error" in r.json():
|
||||
print("error", r.json())
|
||||
exit()
|
||||
|
|
|
|||
Loading…
Reference in New Issue