Spaces:
Runtime error
Runtime error
julianna-fil
commited on
Commit
•
62e5590
1
Parent(s):
2699b67
add
Browse files- .gitattributes +1 -0
- .idea/.gitignore +8 -0
- .idea/EmotionApp.iml +8 -0
- .idea/inspectionProfiles/Project_Default.xml +58 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- __pycache__/model.cpython-310.pyc +0 -0
- app.py +94 -0
- model.py +105 -0
- requirements.txt +9 -0
- res0.jpg +0 -0
- testJulifil.jpg +0 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.psd filter=lfs diff=lfs merge=lfs -text
|
.idea/.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
4 |
+
# Datasource local storage ignored files
|
5 |
+
/dataSources/
|
6 |
+
/dataSources.local.xml
|
7 |
+
# Editor-based HTTP Client requests
|
8 |
+
/httpRequests/
|
.idea/EmotionApp.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="inheritedJdk" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
5 |
+
<Languages>
|
6 |
+
<language minSize="773" name="Python" />
|
7 |
+
</Languages>
|
8 |
+
</inspection_tool>
|
9 |
+
<inspection_tool class="HtmlUnknownTag" enabled="true" level="WARNING" enabled_by_default="true">
|
10 |
+
<option name="myValues">
|
11 |
+
<value>
|
12 |
+
<list size="8">
|
13 |
+
<item index="0" class="java.lang.String" itemvalue="nobr" />
|
14 |
+
<item index="1" class="java.lang.String" itemvalue="noembed" />
|
15 |
+
<item index="2" class="java.lang.String" itemvalue="comment" />
|
16 |
+
<item index="3" class="java.lang.String" itemvalue="noscript" />
|
17 |
+
<item index="4" class="java.lang.String" itemvalue="embed" />
|
18 |
+
<item index="5" class="java.lang.String" itemvalue="script" />
|
19 |
+
<item index="6" class="java.lang.String" itemvalue="div" />
|
20 |
+
<item index="7" class="java.lang.String" itemvalue="button" />
|
21 |
+
</list>
|
22 |
+
</value>
|
23 |
+
</option>
|
24 |
+
<option name="myCustomValuesEnabled" value="true" />
|
25 |
+
</inspection_tool>
|
26 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
27 |
+
<option name="ignoredPackages">
|
28 |
+
<value>
|
29 |
+
<list size="8">
|
30 |
+
<item index="0" class="java.lang.String" itemvalue="scipy" />
|
31 |
+
<item index="1" class="java.lang.String" itemvalue="matplotlib" />
|
32 |
+
<item index="2" class="java.lang.String" itemvalue="pyTelegramBotAPI" />
|
33 |
+
<item index="3" class="java.lang.String" itemvalue="scikit_learn" />
|
34 |
+
<item index="4" class="java.lang.String" itemvalue="imutils" />
|
35 |
+
<item index="5" class="java.lang.String" itemvalue="skimage" />
|
36 |
+
<item index="6" class="java.lang.String" itemvalue="Pillow" />
|
37 |
+
<item index="7" class="java.lang.String" itemvalue="numpy" />
|
38 |
+
</list>
|
39 |
+
</value>
|
40 |
+
</option>
|
41 |
+
</inspection_tool>
|
42 |
+
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
43 |
+
<option name="ignoredErrors">
|
44 |
+
<list>
|
45 |
+
<option value="N803" />
|
46 |
+
</list>
|
47 |
+
</option>
|
48 |
+
</inspection_tool>
|
49 |
+
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
50 |
+
<option name="ignoredIdentifiers">
|
51 |
+
<list>
|
52 |
+
<option value="tuple.append" />
|
53 |
+
<option value="for_table.*" />
|
54 |
+
</list>
|
55 |
+
</option>
|
56 |
+
</inspection_tool>
|
57 |
+
</profile>
|
58 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9" project-jdk-type="Python SDK" />
|
4 |
+
</project>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/EmotionApp.iml" filepath="$PROJECT_DIR$/.idea/EmotionApp.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
__pycache__/model.cpython-310.pyc
ADDED
Binary file (2.6 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import io
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
from model import Generator
|
7 |
+
from torchvision import transforms
|
8 |
+
import torch
|
9 |
+
|
10 |
+
st.set_page_config(
|
11 |
+
page_title="Emotion App!",
|
12 |
+
page_icon="😎",
|
13 |
+
layout="wide"
|
14 |
+
)
|
15 |
+
inv_normalize = transforms.Normalize(
|
16 |
+
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
|
17 |
+
std=[1/0.229, 1/0.224, 1/0.225]
|
18 |
+
)
|
19 |
+
|
20 |
+
st.markdown("### Правила игры:")
|
21 |
+
st.markdown("1) Введите сообщение на русском языке. Это может быть что угодно, например, случившееся недавно событие или ваши мысли")
|
22 |
+
st.markdown("2) Приложение распознает тональность вашего настроения и выведет фото автора приложения с соответствующим настроением")
|
23 |
+
st.markdown("3) Но нет, это не два разных фото. Это работа генеративной модели! Не верите? Попробуйте загрузить своё фото!)")
|
24 |
+
|
25 |
+
st.markdown("Советы по выбору фото: лучше всего брать селфи! Или фотографии, где лицо крупным планом. Чем сильнее фото отличается от этого описания, тем хуже результат.")
|
26 |
+
# st.markdown("<img width=200px src='https://rozetked.me/images/uploads/dwoilp3BVjlE.jpg'>", unsafe_allow_html=True)
|
27 |
+
# ^-- можно показывать пользователю текст, картинки, ограниченное подмножество html - всё как в jupyter
|
28 |
+
|
29 |
+
text = st.text_area("Введите текст:")
|
30 |
+
# img = st.image("testJulifil.jpg")
|
31 |
+
# ^-- показать текстовое поле. В поле text лежит строка, которая находится там в данный момент
|
32 |
+
# st.markdown("### Hello, world!")
|
33 |
+
from transformers import pipeline
|
34 |
+
|
35 |
+
trans = pipeline('translation', model = "Helsinki-NLP/opus-mt-ru-en")
|
36 |
+
classifier = pipeline('sentiment-analysis', model="distilbert-base-uncased-finetuned-sst-2-english")
|
37 |
+
res = classifier(trans(text)[0]["translation_text"])
|
38 |
+
# st.markdown(res)
|
39 |
+
if res[0]['label'] != 'POSITIVE':
|
40 |
+
labels = torch.Tensor([[0, 0]])
|
41 |
+
# st.markdown("Я сотру улыбку с этого лица!")
|
42 |
+
else:
|
43 |
+
labels = torch.Tensor([[1, 1]])
|
44 |
+
# st.markdown("Я сделаю фото улыбчивым!")
|
45 |
+
|
46 |
+
# st.markdown(labels)
|
47 |
+
file = st.file_uploader("Загрузите своё фото:") # , type=['png','jpeg'])
|
48 |
+
if file:
|
49 |
+
|
50 |
+
image_data = file.getvalue()
|
51 |
+
# Показ загруженного изображения на Web-странице средствами Streamlit
|
52 |
+
# st.image(image_data)
|
53 |
+
# Возврат изображения в формате PIL
|
54 |
+
image = Image.open(io.BytesIO(image_data))
|
55 |
+
# image = Image.open("test"+username+".jpg").convert('RGB')
|
56 |
+
|
57 |
+
|
58 |
+
else:
|
59 |
+
image = Image.open("testJulifil.jpg")
|
60 |
+
|
61 |
+
|
62 |
+
transform=transforms.Compose([
|
63 |
+
transforms.Resize(64*4),
|
64 |
+
transforms.CenterCrop(64*4),
|
65 |
+
transforms.ToTensor(),
|
66 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
67 |
+
std=[0.229, 0.224, 0.225])
|
68 |
+
])
|
69 |
+
img = transform(image)
|
70 |
+
img = img.unsqueeze(0)
|
71 |
+
generator = torch.load("gen_model.pt", map_location=torch.device('cpu'))
|
72 |
+
# st.write(int(labels.sum()) < 1)
|
73 |
+
# if int(labels.sum()) > 1:
|
74 |
+
# st.markdown("Я сотру улыбку с этого лица!")
|
75 |
+
# elif int(labels.sum()) < 1:
|
76 |
+
# st.markdown("Я сделаю фото улыбчивым!")
|
77 |
+
x_f2 = generator(img, labels)
|
78 |
+
res = inv_normalize(x_f2[0]).permute(1,2,0).detach().cpu().numpy()
|
79 |
+
res = res# *255 #
|
80 |
+
res = cv2.normalize(res, None, 220, 40, cv2.NORM_MINMAX, cv2.CV_8U)
|
81 |
+
res = res[:,:,::-1]
|
82 |
+
|
83 |
+
# plt.imshow(res)
|
84 |
+
# cv2.imshow("res", res)
|
85 |
+
username = "0"
|
86 |
+
cv2.imwrite("res"+username+".jpg", res)
|
87 |
+
img = st.image(r"res"+username+".jpg")
|
88 |
+
# x = np.array(img)
|
89 |
+
# cv2.imwrite("downloaded.jpeg", x)
|
90 |
+
# st.write(x.mean())
|
91 |
+
# import cv2
|
92 |
+
# foto = cv2.imread(file)
|
93 |
+
# cv2.imwrite(foto, "res.png")
|
94 |
+
# выводим результаты модели в текстовое поле, на потеху пользователю
|
model.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
|
5 |
+
class Generator(nn.Module):
|
6 |
+
|
7 |
+
def __init__(self, c_dim):
|
8 |
+
super(Generator, self).__init__()
|
9 |
+
self.g = nn.Sequential(
|
10 |
+
#-------Down-sampling--------------------
|
11 |
+
nn.Conv2d(3+c_dim, 64, kernel_size=7, stride=1, padding=3, bias=False),
|
12 |
+
nn.InstanceNorm2d(64, affine=True, track_running_stats=True),
|
13 |
+
nn.ReLU(inplace=True),
|
14 |
+
|
15 |
+
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
|
16 |
+
nn.InstanceNorm2d(128, affine=True, track_running_stats=True),
|
17 |
+
nn.ReLU(inplace=True),
|
18 |
+
|
19 |
+
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False),
|
20 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
21 |
+
nn.ReLU(inplace=True),
|
22 |
+
|
23 |
+
#--------Bottleneck---------------------------
|
24 |
+
|
25 |
+
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
26 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
27 |
+
nn.ReLU(inplace=True),
|
28 |
+
# (так 6 раз)
|
29 |
+
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
30 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
31 |
+
nn.ReLU(inplace=True),
|
32 |
+
|
33 |
+
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
34 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
35 |
+
nn.ReLU(inplace=True),
|
36 |
+
|
37 |
+
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
38 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
39 |
+
nn.ReLU(inplace=True),
|
40 |
+
|
41 |
+
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
42 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
43 |
+
nn.ReLU(inplace=True),
|
44 |
+
|
45 |
+
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
|
46 |
+
nn.InstanceNorm2d(256, affine=True, track_running_stats=True),
|
47 |
+
nn.ReLU(inplace=True),
|
48 |
+
|
49 |
+
|
50 |
+
#-------Up-sampling-----------------------------
|
51 |
+
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False),
|
52 |
+
nn.InstanceNorm2d(128, affine=True, track_running_stats=True),
|
53 |
+
nn.ReLU(inplace=True),
|
54 |
+
|
55 |
+
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=False),
|
56 |
+
nn.InstanceNorm2d(64, affine=True, track_running_stats=True),
|
57 |
+
nn.ReLU(inplace=True),
|
58 |
+
|
59 |
+
nn.Conv2d(64, 3, kernel_size=7, stride=1, padding=3, bias=False),
|
60 |
+
nn.Tanh()
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
def forward(self, x, c):
|
65 |
+
# labels = self.label_embedding(labels).view(-1, 1, self.config.noise_shape, self.config.noise_shape)
|
66 |
+
c = c.view(c.size(0), c.size(1), 1, 1)
|
67 |
+
c = c.repeat(1, 1, x.size(2), x.size(3))
|
68 |
+
x = torch.cat([x, c], dim=1)
|
69 |
+
# print(f"size = {x.size()}")
|
70 |
+
return self.g(x)
|
71 |
+
|
72 |
+
|
73 |
+
class Discriminator(nn.Module):
|
74 |
+
|
75 |
+
def __init__(self):
|
76 |
+
super(Discriminator, self).__init__()
|
77 |
+
self.d = nn.Sequential(
|
78 |
+
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
|
79 |
+
nn.LeakyReLU(0.01),
|
80 |
+
|
81 |
+
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
|
82 |
+
nn.LeakyReLU(0.01),
|
83 |
+
|
84 |
+
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
|
85 |
+
nn.LeakyReLU(0.01),
|
86 |
+
|
87 |
+
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
|
88 |
+
nn.LeakyReLU(0.01),
|
89 |
+
|
90 |
+
nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1),
|
91 |
+
nn.LeakyReLU(0.01),
|
92 |
+
|
93 |
+
nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1),
|
94 |
+
nn.LeakyReLU(0.01)
|
95 |
+
)
|
96 |
+
self.conv1 = nn.Conv2d(2048, 1, kernel_size=3, stride=1, padding=1, bias=False)
|
97 |
+
self.conv2 = nn.Conv2d(2048, 2, kernel_size=4, bias=False)
|
98 |
+
|
99 |
+
def forward(self, x):
|
100 |
+
h = self.d(x)
|
101 |
+
out_src = self.conv1(h)
|
102 |
+
out_cls = self.conv2(h)
|
103 |
+
# print(out_cls.size())
|
104 |
+
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
|
105 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface-hub @ file:///C:/b/abs_8d8wo2z8c6/croot/huggingface_hub_1667490298905/work
|
2 |
+
numpy @ file:///C:/b/abs_datssh7cer/croot/numpy_and_numpy_base_1672336199388/work
|
3 |
+
numpydoc @ file:///C:/b/abs_cfdd4zxbga/croot/numpydoc_1668085912100/work
|
4 |
+
opencv-python==4.7.0.72
|
5 |
+
sacremoses-0.0.53
|
6 |
+
sreamlit==1.21.0
|
7 |
+
torch==2.0.0
|
8 |
+
torchvision==0.15.1
|
9 |
+
transformers @ file:///C:/b/abs_8byf5_j714/croot/transformers_1667919454001/work
|
res0.jpg
ADDED
testJulifil.jpg
ADDED