Compare commits

...

10 Commits

Author SHA1 Message Date
artem 61669ea702 keras -> pytorch, sanic -> litestar 2025-04-13 10:43:29 +03:00
artem 95fe63ac6b добавил meta-description 2023-05-10 19:45:21 +03:00
artem 2f50a00f78 sitemap + robots 2023-05-09 09:46:03 +03:00
artem 0868e73301 изменил модель 2023-05-03 08:01:12 +03:00
artem a86abe9c9e поправил скрипт обучения 2023-05-03 08:00:10 +03:00
artem ed559a8b94 облагородил 2023-05-01 22:00:47 +03:00
artem c0bd2ddf3d облагородил 2023-05-01 21:49:53 +03:00
artem 18c4259d16 облагородил 2023-05-01 21:19:42 +03:00
artem 86d7a87fe2 фото из ВК 2023-05-01 15:04:45 +03:00
artem 4493bbab07 вк апи 2023-05-01 11:02:30 +03:00
21 changed files with 2653 additions and 327 deletions

177
.gitignore vendored
View File

@ -1,3 +1,178 @@
assets/*
*.jpg
beerds.json
beerds.json
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc

1
.python-version Normal file
View File

@ -0,0 +1 @@
3.11

12
Makefile Normal file
View File

@ -0,0 +1,12 @@
api:
uv run granian --interface asgi server.main:app
runml:
uv run ml/beerds.py
format:
uv run ruff format app
lint:
uv run mypy ./ --explicit-package-bases;
ruff check --fix

5
README.md Normal file
View File

@ -0,0 +1,5 @@
Нужно установить драйвер Nvindia + Cuda
```
sudo apt install nvidia-cuda-toolkit
```

3
RoadMap.md Normal file
View File

@ -0,0 +1,3 @@
Можно скачать отсюда блог и перевести
https://dogtime.com/dog-breeds/afador

Binary file not shown.

Binary file not shown.

107
beerds.py
View File

@ -1,107 +0,0 @@
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.utils import image_dataset_from_directory, split_dataset
img_size = (200, 200)
# обогащение выборки
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
layers.RandomZoom(0.2),
]
)
input_dir = "assets/dog"
labels_dict = {}
for fname in os.listdir(input_dir):
if fname in labels_dict:
continue
labels_dict[fname] = len(labels_dict)
model_name = "beerd_25_04_2023.keras"
train_dataset, val_ds = image_dataset_from_directory(
input_dir,
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
seed=12,
image_size=img_size,
shuffle=True,
validation_split=0.1,
subset="both",
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False
)
validation_dataset, test_dataset = split_dataset(val_ds, left_size=0.8)
inputs = keras.Input(shape=img_size + (3,))
x = data_augmentation(inputs)
x = layers.Rescaling(1./255)(x)
x = layers.Conv2D(filters=32, kernel_size=5, use_bias=False)(x)
for size in [32, 64, 128, 256, 512, 1024]:
residual = x
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same", use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same", use_bias=False)(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
residual = layers.Conv2D(
size, 1, strides=2, padding="same", use_bias=False)(residual)
x = layers.add([x, residual])
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(len(labels_dict), activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="categorical_crossentropy", metrics=['accuracy'])
callbacks = [
keras.callbacks.ModelCheckpoint(model_name,
save_best_only=True)
]
history = model.fit(train_dataset,
epochs=200,
callbacks=callbacks,
validation_data=validation_dataset,)
epochs = range(1, len(history.history["loss"]) + 1)
loss = history.history["loss"]
val_loss = history.history["val_loss"]
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Точность на этапе обучения")
plt.plot(epochs, val_acc, "b", label="Точность на этапе проверки")
plt.title("Точность на этапах обучения и проверки")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Потери на этапе обучения")
plt.plot(epochs, val_loss, "b", label="Потери на этапе проверки")
plt.title("Потери на этапах обучения и проверки")
plt.legend()
plt.show()
test_model = keras.models.load_model(model_name)
test_loss, test_acc = test_model.evaluate(test_dataset)
print(f"Test accuracy: {test_acc:.3f}")

View File

@ -1,100 +0,0 @@
import os
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.utils import image_dataset_from_directory, split_dataset
img_size = (180, 180)
conv_base = keras.applications.vgg16.VGG16(
weights="imagenet",
include_top=False,
input_shape=(180, 180, 3))
conv_base.trainable = False
# обогащение выборки
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
layers.RandomZoom(0.2),
])
input_dir = "assets/dog"
labels_dict = {}
for fname in os.listdir(input_dir):
if fname in labels_dict:
continue
labels_dict[fname] = len(labels_dict)
model_name = "beerd_imagenet_25_04_2023.keras"
train_dataset, val_ds = image_dataset_from_directory(
input_dir,
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
seed=12,
image_size=img_size,
shuffle=True,
validation_split=0.1,
subset="both",
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False
)
validation_dataset, test_dataset = split_dataset(val_ds, left_size=0.8)
inputs = keras.Input(shape=(180, 180, 3))
x = data_augmentation(inputs)
x = keras.applications.vgg16.preprocess_input(x)
x = conv_base(x)
x = layers.Flatten()(x)
x = layers.Dense(512)(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(len(labels_dict), activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="categorical_crossentropy", metrics=['accuracy'])
callbacks = [
keras.callbacks.ModelCheckpoint(model_name,
save_best_only=True)
]
history = model.fit(train_dataset,
epochs=100,
callbacks=callbacks,
validation_data=validation_dataset,)
epochs = range(1, len(history.history["loss"]) + 1)
loss = history.history["loss"]
val_loss = history.history["val_loss"]
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Точность на этапе обучения")
plt.plot(epochs, val_acc, "b", label="Точность на этапе проверки")
plt.title("Точность на этапах обучения и проверки")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Потери на этапе обучения")
plt.plot(epochs, val_loss, "b", label="Потери на этапе проверки")
plt.title("Потери на этапах обучения и проверки")
plt.legend()
plt.show()
test_model = keras.models.load_model(model_name)
test_loss, test_acc = test_model.evaluate(test_dataset)
print(f"Test accuracy: {test_acc:.3f}")

View File

@ -1,44 +0,0 @@
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import json
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.utils import load_img, img_to_array
from tensorflow.keras.utils import image_dataset_from_directory
# model_name = "beerd_25_04_2023.keras"
model_name = "beerd_imagenet_25_04_2023.keras"
img = load_img("photo_2023-04-25_10-02-25.jpg", color_mode="rgb")
img = tf.image.resize(img, (180, 180, ), "bilinear")
img_array = img_to_array(img)
test_model = keras.models.load_model(model_name)
test_loss = test_model.predict(np.expand_dims(img_array, 0))
list_labels = [fname for fname in os.listdir("assets/dog")]
list_labels.sort()
dict_names = {}
for i, label in enumerate(list_labels):
dict_names[i] = label
with open("beerds.json", "w") as f:
f.write(json.dumps(dict_names))
max_val = 0
max_num = 0
for i, val in enumerate(test_loss[0]):
if val < max_val:
continue
max_val = val
max_num = i
print("-----------------------")
print(list_labels)
print(test_loss)
print(max_num, max_val, dict_names[max_num])

165
ml/dogs.py Normal file
View File

@ -0,0 +1,165 @@
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torchvision.datasets import ImageFolder # type: ignore
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms # type: ignore
import torchvision
from typing import Tuple
# Настройка устройства для вычислений
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {DEVICE}")
IMG_SIZE = (200, 200)
INPUT_DIR = "assets/dog"
NUM_EPOCHS = 90
def get_labels(input_dir, img_size):
# Преобразования изображений
transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dataset = ImageFolder(root=input_dir, transform=transform)
# Создание labels_dict для соответствия классов и индексов
labels_dict = {idx: class_name for idx, class_name in enumerate(dataset.classes)}
return labels_dict, dataset
def get_loaders(dataset: Dataset) -> Tuple[DataLoader, DataLoader]:
# Разделение данных на тренировочные и валидационные
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
# Загрузчики данных
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)
return train_loader, val_loader
def load_model(model_path: str, device: str = 'cuda') -> nn.Module:
if not os.path.isfile(model_path):
print("Start new model")
model = torchvision.models.resnet50(pretrained=True)
model.fc = torch.nn.Linear(model.fc.in_features, len(labels_dict))
return model
model = torch.load(model_path, map_location=device, weights_only=False)
model.eval()
return model
def train(num_epochs: int, model: nn.Module, train_loader: DataLoader, val_loader: DataLoader) -> Tuple[list[float], list[float], list[float], list[float]]:
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.fc.parameters(), lr=0.001, weight_decay=0.001)
# История метрик
train_loss_history = []
train_acc_history = []
val_loss_history = []
val_acc_history = []
# Обучение с проверкой и сохранением лучшей модели
best_val_loss = float('inf')
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
correct = 0
total = 0
# Обучение на тренировочных данных
for inputs, labels in train_loader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
train_loss = running_loss / len(train_loader)
train_acc = 100. * correct / total
train_loss_history.append(train_loss)
train_acc_history.append(train_acc)
print(f"Epoch {epoch+1}/{num_epochs}, Train Loss: {train_loss:.4f}, Train Accuracy: {train_acc:.2f}%")
# Оценка на валидационных данных
model.eval()
val_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in val_loader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
outputs = model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
val_loss /= len(val_loader)
val_acc = 100. * correct / total
val_loss_history.append(val_loss)
val_acc_history.append(val_acc)
if val_loss < best_val_loss:
best_val_loss = val_loss
print("save model")
torch.save(model, "full_model.pth")
print(f"Validation Loss: {val_loss:.4f}, Validation Accuracy: {val_acc:.2f}%")
return val_acc_history, train_acc_history, val_loss_history, train_loss_history
def show(num_epochs: int,
val_acc_history: list[float],
train_acc_history: list[float],
val_loss_history: list[float],
train_loss_history: list[float]):
# Построение графиков
epochs = range(1, num_epochs + 1)
# График точности
plt.figure(figsize=(10, 5))
plt.plot(epochs, train_acc_history, "bo-", label="Точность на обучении")
plt.plot(epochs, val_acc_history, "ro-", label="Точность на валидации")
plt.title("Точность на этапах обучения и проверки")
plt.xlabel("Эпохи")
plt.ylabel("Точность (%)")
plt.legend()
plt.grid()
plt.show()
# График потерь
plt.figure(figsize=(10, 5))
plt.plot(epochs, train_loss_history, "bo-", label="Потери на обучении")
plt.plot(epochs, val_loss_history, "ro-", label="Потери на валидации")
plt.title("Потери на этапах обучения и проверки")
plt.xlabel("Эпохи")
plt.ylabel("Потери")
plt.legend()
plt.grid()
plt.show()
if __name__ == "__main__":
# Инициализация данных и модели
labels_dict: dict[int, str]
dataset: ImageFolder
labels_dict, dataset = get_labels(INPUT_DIR, IMG_SIZE)
model: nn.Module = load_model("full_model.pth").to(DEVICE)
# Подготовка данных
train_loader: DataLoader
val_loader: DataLoader
train_loader, val_loader = get_loaders(dataset)
# Обучение модели
val_acc_history, train_acc_history, val_loss_history, train_loss_history = train(NUM_EPOCHS, model, train_loader, val_loader)
# Визуализация результатов
show(NUM_EPOCHS, val_acc_history, train_acc_history, val_loss_history, train_loss_history)

52
ml/dogs_check.py Normal file
View File

@ -0,0 +1,52 @@
import torch
from torchvision import transforms # type: ignore
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import json
# Создание labels_dict для соответствия классов и индексов
with open("labels.json", "r") as f:
data_labels = f.read()
labels_dict = json.loads(data_labels)
def load_model(model_path,device='cuda'):
model = torch.load(model_path, map_location=device, weights_only=False)
model.eval()
return model
# Инициализация
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = load_model('full_model.pth', device=device)
# Преобразования для изображения (адаптируйте под ваш случай)
# Преобразования изображений
def predict_image(image_path, model, device='cuda'):
img_size = (200, 200)
preprocess = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
image = Image.open(image_path).convert('RGB')
input_tensor = preprocess(image)
input_batch = input_tensor.unsqueeze(0).to(device) # Добавляем dimension для батча
with torch.no_grad():
output = model(input_batch)
probabilities = F.softmax(output[0], dim=0)
_, predicted_idx = torch.max(probabilities, 0)
return predicted_idx.item(), probabilities.cpu().numpy()
# Пример использования
image_path = 'assets/test/photo_2023-04-25_10-02-25.jpg'
predicted_idx, probabilities = predict_image(image_path, model, device)
# Предполагая, что labels_dict - словарь вида {индекс: 'название_класса'}
predicted_label = labels_dict[str(predicted_idx)]
print(f'Predicted class: {predicted_label} (prob: {probabilities[predicted_idx]:.2f})')

18
pyproject.toml Normal file
View File

@ -0,0 +1,18 @@
[project]
name = "ai"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"granian>=2.2.4",
"jinja2>=3.1.6",
"matplotlib>=3.10.1",
"mypy>=1.15.0",
"numpy==1.23.5",
"pyqt5>=5.15.11",
"ruff>=0.11.5",
"starlite>=1.51.16",
"torch>=2.6.0",
"torchvision>=0.21.0",
]

View File

@ -1,63 +1,154 @@
from pathlib import Path
from PIL import Image
from sanic import Sanic
from sanic.response import json as json_answer
from starlite import Starlite, Controller, StaticFilesConfig, get, post, Body, MediaType, RequestEncodingType, Starlite, UploadFile, Template, TemplateConfig
from starlite.contrib.jinja import JinjaTemplateEngine
import numpy as np
from tensorflow import keras
from tensorflow.keras.utils import img_to_array
import io
import os
import json
import requests
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
app = Sanic("Ai")
model_name = "../beerd_imagenet_25_04_2023.keras"
model_name = "models/beerd_imagenet_02_05_2023.keras"
test_model_imagenet = keras.models.load_model(model_name)
model_name = "../beerd_25_04_2023.keras"
model_name = "./models/beerd_25_04_2023.keras"
test_model = keras.models.load_model(model_name)
dict_names = {}
with open("beerds.json", "r") as f:
dict_names = json.loads(f.read())
app.static("/", "index.html", name="main")
app.static("/static/", "static/", name="static")
for key in dict_names:
dict_names[key] = dict_names[key].replace("_", " ")
VK_URL = "https://api.vk.com/method/"
TOKEN = ""
headers = {"Authorization": f"Bearer {TOKEN}"}
group_id = 220240483
postfix = "?v=5.131"
IMAGES = {}
@app.post("/beeds")
async def beeds(request):
body = request.files.get("f").body
def get_images():
global IMAGES
img = Image.open(io.BytesIO(body))
img = img.convert('RGB')
r = requests.get(
f"{VK_URL}photos.getAll{postfix}&access_token={TOKEN}&owner_id=-{group_id}&count=200")
items = r.json().get("response").get("items")
for item in items:
for s in item.get("sizes"):
if s.get("type") != "x":
continue
IMAGES[item.get("text")] = s.get("url")
break
img_net = img.resize((180, 180, ), Image.BILINEAR)
img_array = img_to_array(img_net)
test_loss_image_net = test_model_imagenet.predict(
np.expand_dims(img_array, 0))
img = img.resize((200, 200, ), Image.BILINEAR)
img_array = img_to_array(img)
test_loss = test_model.predict(np.expand_dims(img_array, 0))
get_images()
result = {}
for i, val in enumerate(test_loss[0]):
if val <= 0.09:
continue
result[val] = dict_names[str(i)]
result_net = {}
for i, val in enumerate(test_loss_image_net[0]):
if val <= 0.09:
continue
result_net[val] = dict_names[str(i)]
class BeerdsController(Controller):
path = "/breeds"
return json_answer({
"results": dict(sorted(result.items(), reverse=True)),
"results_net": dict(sorted(result_net.items(), reverse=True)),
})
@post("/", media_type=MediaType.TEXT)
async def beeds(self, data: UploadFile = Body(media_type=RequestEncodingType.MULTI_PART)) -> dict:
body = await data.read()
if __name__ == "__main__":
app.run(auto_reload=True, port=4003, host="0.0.0.0")
img = Image.open(io.BytesIO(body))
img = img.convert('RGB')
img_net = img.resize((180, 180, ), Image.BILINEAR)
img_array = img_to_array(img_net)
test_loss_image_net = test_model_imagenet.predict(
np.expand_dims(img_array, 0))
img = img.resize((200, 200, ), Image.BILINEAR)
img_array = img_to_array(img)
test_loss = test_model.predict(np.expand_dims(img_array, 0))
result = {}
for i, val in enumerate(test_loss[0]):
if val <= 0.09:
continue
result[val] = dict_names[str(i)]
result_net = {}
for i, val in enumerate(test_loss_image_net[0]):
if val <= 0.09:
continue
result_net[val] = dict_names[str(i)]
items_one = dict(sorted(result.items(), reverse=True))
items_two = dict(sorted(result_net.items(), reverse=True))
images = []
for item in items_one:
name = items_one[item].replace("_", " ")
if name not in IMAGES:
continue
images.append({"name": name, "url": IMAGES[name]})
for item in items_two:
name = items_two[item].replace("_", " ")
if name not in IMAGES:
continue
images.append({"name": name, "url": IMAGES[name]})
return {
"results": items_one,
"results_net": items_two,
"images": images,
}
class BaseController(Controller):
path = "/"
@get("/")
async def main(self) -> Template:
return Template(name="index.html")
@get("/sitemap.xml", media_type=MediaType.XML)
async def sitemaps(self) -> bytes:
return '''<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
<!-- created with Free Online Sitemap Generator www.xml-sitemaps.com -->
<url>
<loc>https://xn-----6kcp3cadbabfh8a0a.xn--p1ai/</loc>
<lastmod>2023-05-01T19:01:03+00:00</lastmod>
</url>
</urlset>
'''.encode()
@get("/robots.txt", media_type=MediaType.TEXT)
async def robots(self) -> str:
return '''
User-agent: *
Allow: /
Sitemap: https://xn-----6kcp3cadbabfh8a0a.xn--p1ai/sitemap.xml
'''
app = Starlite(
route_handlers=[BeerdsController, BaseController],
static_files_config=[
StaticFilesConfig(directories=["static"], path="/static"),
],
template_config=TemplateConfig(
directory=Path("templates"),
engine=JinjaTemplateEngine,
),
)

BIN
server/static/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.2 KiB

728
server/static/photos.json Normal file
View File

@ -0,0 +1,728 @@
[
{
"name_en": "beagle",
"name": "бигль",
"photo": "https://sun9-46.userapi.com/impg/B48745e4fBGwvuPzG-Oj0tIADbP_huU4Y49pIQ/nfmchsz5Fug.jpg?size=960x640&quality=95&sign=5f9bb57ea668a7d0a65a3542fa4382b7&type=album"
},
{
"name_en": "australian kelpie",
"name": "австралийский келпи",
"photo": "https://sun9-40.userapi.com/impg/YyM270TWdxR_TtHvOODNnuW4LM9s3-OXLrRiHA/kh-SMCm1zUc.jpg?size=1885x1414&quality=95&sign=fe84f57a4064f99224071c2a7d11b29d&type=album"
},
{
"name_en": "australian terrier",
"name": "австралийский терьер",
"photo": "https://sun1-54.userapi.com/impg/ZvspRqzaR1n_0kJf58B73WY_rpJVR8HHtePG8Q/gUZm0Q-JcB4.jpg?size=1429x1072&quality=95&sign=d8ddf8c7ed725b5e34043b76833b5b1f&type=album"
},
{
"name_en": "australian silky terrier",
"name": "австралийский шелковистый терьер",
"photo": "https://sun9-48.userapi.com/impg/bP1FWRk8uf8ug18lVsZ7K6V7wxybycHKfnfdzA/oQ1YghcoOn0.jpg?size=720x540&quality=95&sign=901c4cd68e6e7582e28b54071a5ba927&type=album"
}
,
{
"name_en": "american eskimo dog",
"name": "американская эскимосская собака",
"photo": "https://sun9-77.userapi.com/impg/OFrCkOXU9qM19rfOqHW7ejVR4tzCgpFpWG7ADg/hJMaSg8qXqE.jpg?size=1200x900&quality=95&sign=48d134ec21e182e31080b4bb999eff05&type=album"
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
,
{
"name_en": "",
"name": "",
"photo": ""
}
]

View File

@ -1,37 +1,59 @@
let urlCreator = window.URL || window.webkitURL;
async function SavePhoto()
{
document.getElementById("result").innerHTML = "";
let photo = document.getElementById("file-input").files[0];
let formData = new FormData();
formData.append("f", photo);
let response = await fetch('/beeds', {method: "POST", body: formData});
if (response.ok) {
let json = await response.json();
let text = ""
let uniqChecker = {}
for (let key in json.results_net) {
text += "<div>" + json.results_net[key] + ": " + key + "</div>";
uniqChecker[json.results_net[key]] = key;
async function SavePhoto() {
document.getElementById("result").innerHTML = "";
let photo = document.getElementById("file-input").files[0];
let formData = new FormData();
// TODO: пройтись по всем результатм - если совпадают дважды - поднять вверх
formData.append("f", photo);
let response = await fetch('/beeds', { method: "POST", body: formData });
if (response.ok) {
let json = await response.json();
let text = "<h3 class='image-results'>Результаты</h3>";
let uniqChecker = {};
for (let key in json.results) {
text += "<div class='image-block'><div class='image-text'>" + json.results[key] + " (вероятность: " + Math.round(parseFloat(key)*10)/10 + ")</div>";
for (let imgKey in json.images) {
if (json.images[imgKey].name != json.results[key]) {
continue;
}
for (let key in json.results) {
if (uniqChecker[json.results[key]] != undefined) {
continue
}
text += "<div>" + json.results[key] + ": " + key + "</div>";
if (json.images[imgKey] == undefined) {
continue;
}
document.getElementById("result").innerHTML = text;
} else {
alert("Ошибка HTTP: " + response.status);
text += "<div class='image-container'><img src='" + json.images[imgKey].url + "'/></div>";
break;
}
text += "</div>";
uniqChecker[json.results[key]] = key;
}
for (let key in json.results_net) {
if (uniqChecker[json.results_net[key]] != undefined) {
continue
}
text += "<div class='image-block'><div class='image-text'>" + json.results_net[key] + "</div>";
for (let imgKey in json.images) {
if (json.images[imgKey].name != json.results_net[key]) {
continue;
}
if (json.images[imgKey] == undefined) {
continue;
}
text += "<div class='image-container'><img src='" + json.images[imgKey].url + "'/></div>";
break;
}
text += "</div>";
}
document.getElementById("result").innerHTML = text;
} else {
alert("Ошибка HTTP: " + response.status);
}
}
document.getElementById("file-input").addEventListener("change", function () {
let photo = document.getElementById("file-input").files[0];
let imageUrl = urlCreator.createObjectURL(photo);
document.getElementById("image").src = imageUrl;
document.getElementById("upload-image-text").innerHTML = "Ваше изображение:";
});

View File

@ -2,8 +2,17 @@ body {
background: #fff;
margin: 0;
border: none;
color: #696969;
font-family: Arial, Helvetica, sans-serif;
line-height: 180%;
}
a {
color: black;
text-decoration: underline;
}
img {
margin: 0;
border: none;
@ -11,9 +20,38 @@ img {
h1 {
text-align: center;
color: #696969;
}
form {
border: 1px solid #696969;
padding: 0 10px 0 10px;
}
#main {
max-width: 1024px;
margin: auto;
padding: 10px;
min-width: 360px;
}
.image-block {
display: flex;
}
.image-block div {
width: 50%;
text-align: center;
}
.image-block img {
max-width: 200px;
}
.upload-image-text {
text-align: center;
}
.image-results {
text-align: center;
}

View File

@ -4,6 +4,8 @@
<meta charset="UTF-8">
<meta name="yandex-verification" content="2d4efced567f0f7f" />
<meta name="google-site-verification" content="gKPSnPZ1ULUF9amD0vw_JQqkS5GLqc937UxayaN_s-I" />
<meta name="description" content="Опередление породы собаки по фото. Определение породы происходит при помощи нейронной сети - точность опеределения составляет 60%." />
<link rel="icon" type="image/x-icon" href="static/favicon.ico">
<title>Определение породы собаки по фото</title>
<link rel="stylesheet" href="static/styles.css">
<!-- Yandex.Metrika counter -->
@ -26,15 +28,20 @@
<body>
<section id="main">
<h1>Определить породу собаки по фото</h1>
<p>Загрузите фото, чтобы опеределить породу собаки или щенка. Если порода смешанная, после загрузки будет показана вероятность породы животного.</p>
<p>Опредление породы происходит при помощи нейронной сети - точность опеределения составляет 60%, сеть обучена на 125 породах. Если на фото будет неизвестная порода или не собака - сеть не сможет правильно опеределить, что это.</p>
<p>Загрузите фото, чтобы опеределить породу собаки или щенка. Если порода смешанная (или порода определена неточно), после загрузки будет показана вероятность породы животного.</p>
<p>Определение породы происходит при помощи нейронной сети - точность опеределения составляет 60%, сеть обучена на <a href="https://vk.com/albums-220240483" target="_blank">125 породах</a>. Если на фото будет неизвестная порода или не собака - сеть не сможет правильно опеределить, что это.</p>
<p>Для распознования все фото отправляются на сервер, но там не сохраняются</p>
<form enctype="multipart/form-data" method="post" action="/beeds" onsubmit="SavePhoto();return false">
<form enctype="multipart/form-data" method="post" action="/breeds" onsubmit="SavePhoto();return false">
<p><input type="file" name="f" id="file-input">
<input type="submit" value="Отправить"></p>
<input type="submit" value="Определить"></p>
</form>
<div id="result"></div>
<img id="image" style="max-width: 200px;"/>
<div>
<div id="upload-image">
<div id="upload-image-text"></div>
<img id="image" style="max-width: 200px;"/>
</div>
<div id="result"></div>
</div>
</body>
</section>
<script src="static/scripts.js"></script>

1234
uv.lock Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,35 @@
import os
import time
import requests
import requests # type: ignore
#TOKEN = "vk1.a.mf4KFdN9gC14SSGDFHVwFRTpzBKBeNxkdlEe0IFlZqU5a5rHH5PwiPn5ekWnDhc94lEI5d2vtXzfxvjXRPapsQZCCt89YUwCIQB1alo06A0Iup9PCWbd6F5GayBn0TS_26N5BTQ1B7deFzi25BV3LKimP9g5ZkeoY0xhNfQ7XawPnBhhK0a2ipL5zZxygYgf"
TOKEN = "vk1.a.m92hxDp9fUi06dr424RERKOsika5s5WTNZM64XxTZbU_feIuIRQs72UH_WpeZBr0f_av3V68MvADcNyRSwPMl015aWW0EZjc0d50b8my4_w88BELd1BVT2p9o_7W0YyedfnYGGylyQzfj0jGy2Ufuc3CGFQXJjGJrnINmMSjNps84jhMk0LDqTVFG0a6VlgJ"
# Получить токен чтобы:
# https://oauth.vk.com/oauth/authorize?client_id=51534014&display=page&scope=photos,offline&response_type=token&v=5.131&slogin_h=4984535b54c59e09ca.f1e0b6dce0d0cc82e7&__q_hash=618f24fbac4bc34edbf09b8bc503e923
TOKEN = ""
VK_URL = "https://api.vk.com/method/"
headers = {"Authorization": f"Bearer {TOKEN}"}
postfix = "?v=5.131&state=123456"
group_id = 220240483
dir ="../assets/dog"
dir = "../assets/dog"
list_labels = [fname for fname in os.listdir(dir)]
r = requests.get(
f"{VK_URL}photos.getAll{postfix}&access_token={TOKEN}&owner_id=-{group_id}&count=200")
if "error" in r.json():
print("error", r.json())
exit()
items = r.json().get("response").get("items")
names = {}
for item in items:
if item.get("text") in names:
print("Doubles: ", item.get("text"))
names[item.get("text")] = True
for name in list_labels:
if name.replace("_", " ") in names:
print(f"Continue: {name}")
continue
if name in names:
print(f"Continue: {name}")
continue
max_size = 0
max_index = 0
list_data = os.listdir(os.path.join(dir, name))
@ -22,17 +40,25 @@ for name in list_labels:
max_index = i
image_name = list_data[max_index]
file_stats = os.stat(os.path.join(dir, name, image_name))
r = requests.post(f"{VK_URL}photos.createAlbum{postfix}", data={"title": name, "group_id": group_id}, headers=headers)
r = requests.post(f"{VK_URL}photos.createAlbum{postfix}", data={
"title": name.replace("_", " "), "group_id": group_id}, headers=headers)
if "error" in r.json():
print("error", r.json())
break
album_id = r.json().get("response").get("id")
r = requests.get(f"{VK_URL}photos.getUploadServer{postfix}&album_id={album_id}&access_token={TOKEN}&group_id={group_id}")
r = requests.get(
f"{VK_URL}photos.getUploadServer{postfix}&album_id={album_id}&access_token={TOKEN}&group_id={group_id}")
url = r.json().get("response").get("upload_url")
files = {'file1': open(os.path.join(dir, name, image_name),'rb')}
files = {'file1': open(os.path.join(dir, name, image_name), 'rb')}
r = requests.post(url, files=files)
server = r.json().get("server")
photos_list = r.json().get("photos_list")
hash_data = r.json().get("hash")
r = requests.post(f"{VK_URL}photos.save{postfix}", data={"album_id": album_id, "server": server, "photos_list": photos_list, "hash": hash_data}, headers=headers)
time.sleep(1)
aid = r.json().get("aid")
r = requests.post(f"{VK_URL}photos.save{postfix}&hash={hash_data}", data={"album_id": aid, "server": server,
"photos_list": photos_list, "caption": name.replace("_", " "), "group_id": group_id}, headers=headers)
if "error" in r.json():
print("error", r.json())
break
print(f"Created: {name}")
time.sleep(1)