mcding
commited on
Commit
•
00d871f
1
Parent(s):
50fcf2a
finish
Browse files- .gitignore +3 -1
- app.py +58 -46
- kit/{metric.py → __init__.py} +96 -80
- kit/decode.py +0 -80
- kit/metrics/perceptual.py +28 -3
- kit/test.py +0 -20
- requirements.txt +7 -2
- test.ipynb +8 -10
.gitignore
CHANGED
@@ -159,4 +159,6 @@ cython_debug/
|
|
159 |
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
-
#.idea/
|
|
|
|
|
|
159 |
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
160 |
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
161 |
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
162 |
+
#.idea/
|
163 |
+
|
164 |
+
.env
|
app.py
CHANGED
@@ -1,24 +1,25 @@
|
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import numpy as np
|
3 |
-
import
|
|
|
4 |
from PIL import Image
|
5 |
import time
|
|
|
6 |
from datetime import datetime
|
7 |
-
import
|
8 |
-
import
|
|
|
|
|
9 |
|
10 |
-
# Redis configuration
|
11 |
-
REDIS_HOST = "redis-17482.c73.us-east-1-2.ec2.redns.redis-cloud.com"
|
12 |
-
REDIS_PORT = 17482
|
13 |
-
REDIS_USERNAME = "default"
|
14 |
-
REDIS_PASSWORD = "7gdBhkyX1duixq5J3Mr14kYxUhWugoYk"
|
15 |
|
16 |
# Connect to Redis
|
17 |
redis_client = redis.Redis(
|
18 |
-
host=REDIS_HOST,
|
19 |
-
port=REDIS_PORT,
|
20 |
-
username=REDIS_USERNAME,
|
21 |
-
password=REDIS_PASSWORD,
|
22 |
decode_responses=True,
|
23 |
)
|
24 |
|
@@ -38,54 +39,65 @@ def get_submissions_from_redis():
|
|
38 |
return [json.loads(submission) for submission in submissions]
|
39 |
|
40 |
|
41 |
-
def decode(image):
|
42 |
-
# TODO: Replace with actual decode function
|
43 |
-
time.sleep(2) # Simulate processing time
|
44 |
-
return np.random.random()
|
45 |
-
|
46 |
-
|
47 |
-
def metric(image):
|
48 |
-
# TODO: Replace with actual metric function
|
49 |
-
time.sleep(2) # Simulate processing time
|
50 |
-
return np.random.random()
|
51 |
-
|
52 |
-
|
53 |
def update_leaderboard(submissions):
|
54 |
names = [sub["name"] for sub in submissions]
|
55 |
performances = [float(sub["performance"]) for sub in submissions]
|
56 |
qualities = [float(sub["quality"]) for sub in submissions]
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
#
|
64 |
circle_radii = np.linspace(0, 1, 5)
|
65 |
for radius in circle_radii:
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
return fig
|
76 |
|
77 |
|
78 |
def process_submission(name, image):
|
79 |
-
|
80 |
progress = gr.Progress()
|
81 |
progress(0, desc="Processing")
|
82 |
-
time.sleep(
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
progress(0.75, desc="Saving results")
|
89 |
save_to_redis(name, performance, quality)
|
90 |
|
91 |
submissions = get_submissions_from_redis()
|
@@ -176,7 +188,7 @@ def create_interface():
|
|
176 |
|
177 |
submit_btn.click(lambda: gr.Tabs(selected="submit"), None, tabs)
|
178 |
|
179 |
-
upload_btn.click(
|
180 |
upload_and_evaluate,
|
181 |
inputs=[name_input, uploaded_image],
|
182 |
outputs=[
|
@@ -186,7 +198,7 @@ def create_interface():
|
|
186 |
performance_output,
|
187 |
quality_output,
|
188 |
],
|
189 |
-
)
|
190 |
|
191 |
demo.load(
|
192 |
lambda: [
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
+
|
4 |
import numpy as np
|
5 |
+
import json
|
6 |
+
import redis
|
7 |
from PIL import Image
|
8 |
import time
|
9 |
+
import plotly.graph_objects as go
|
10 |
from datetime import datetime
|
11 |
+
from kit import compute_performance, compute_quality
|
12 |
+
import dotenv
|
13 |
+
|
14 |
+
dotenv.load_dotenv()
|
15 |
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Connect to Redis
|
18 |
redis_client = redis.Redis(
|
19 |
+
host=os.getenv("REDIS_HOST"),
|
20 |
+
port=os.getenv("REDIS_PORT"),
|
21 |
+
username=os.getenv("REDIS_USERNAME"),
|
22 |
+
password=os.getenv("REDIS_PASSWORD"),
|
23 |
decode_responses=True,
|
24 |
)
|
25 |
|
|
|
39 |
return [json.loads(submission) for submission in submissions]
|
40 |
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def update_leaderboard(submissions):
|
43 |
names = [sub["name"] for sub in submissions]
|
44 |
performances = [float(sub["performance"]) for sub in submissions]
|
45 |
qualities = [float(sub["quality"]) for sub in submissions]
|
46 |
|
47 |
+
# Create scatter plot
|
48 |
+
fig = go.Figure()
|
49 |
+
|
50 |
+
fig.add_trace(
|
51 |
+
go.Scatter(
|
52 |
+
x=qualities,
|
53 |
+
y=performances,
|
54 |
+
mode="markers+text",
|
55 |
+
text=names,
|
56 |
+
textposition="top center",
|
57 |
+
name="Submissions",
|
58 |
+
)
|
59 |
+
)
|
60 |
|
61 |
+
# Add circles
|
62 |
circle_radii = np.linspace(0, 1, 5)
|
63 |
for radius in circle_radii:
|
64 |
+
theta = np.linspace(0, 2 * np.pi, 100)
|
65 |
+
x = radius * np.cos(theta)
|
66 |
+
y = radius * np.sin(theta)
|
67 |
+
fig.add_trace(
|
68 |
+
go.Scatter(
|
69 |
+
x=x,
|
70 |
+
y=y,
|
71 |
+
mode="lines",
|
72 |
+
line=dict(color="gray", dash="dash"),
|
73 |
+
showlegend=False,
|
74 |
+
)
|
75 |
+
)
|
76 |
|
77 |
+
# Update layout
|
78 |
+
fig.update_layout(
|
79 |
+
title="Submissions Leaderboard",
|
80 |
+
xaxis_title="Quality",
|
81 |
+
yaxis_title="Performance",
|
82 |
+
xaxis=dict(range=[0, 1]),
|
83 |
+
yaxis=dict(range=[0, 1]),
|
84 |
+
width=600,
|
85 |
+
height=600,
|
86 |
+
)
|
87 |
|
88 |
return fig
|
89 |
|
90 |
|
91 |
def process_submission(name, image):
|
92 |
+
original_image = Image.open("./image.png")
|
93 |
progress = gr.Progress()
|
94 |
progress(0, desc="Processing")
|
95 |
+
time.sleep(0.5)
|
96 |
+
progress(0.1, desc="Decoding")
|
97 |
+
performance = compute_performance(image)
|
98 |
+
progress(0.6, desc="Computing metric")
|
99 |
+
quality = compute_quality(image, original_image)
|
100 |
+
progress(0.9, desc="Saving results")
|
|
|
101 |
save_to_redis(name, performance, quality)
|
102 |
|
103 |
submissions = get_submissions_from_redis()
|
|
|
188 |
|
189 |
submit_btn.click(lambda: gr.Tabs(selected="submit"), None, tabs)
|
190 |
|
191 |
+
upload_btn.click(lambda: gr.Tabs(selected="leaderboard"), None, tabs).then(
|
192 |
upload_and_evaluate,
|
193 |
inputs=[name_input, uploaded_image],
|
194 |
outputs=[
|
|
|
198 |
performance_output,
|
199 |
quality_output,
|
200 |
],
|
201 |
+
)
|
202 |
|
203 |
demo.load(
|
204 |
lambda: [
|
kit/{metric.py → __init__.py}
RENAMED
@@ -1,7 +1,74 @@
|
|
1 |
import os
|
2 |
-
import
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
compute_image_distance_repeated,
|
6 |
load_perceptual_models,
|
7 |
compute_perceptual_metric_repeated,
|
@@ -9,100 +76,44 @@ from metrics import (
|
|
9 |
compute_aesthetics_and_artifacts_scores,
|
10 |
)
|
11 |
|
12 |
-
QUALITY_METRICS = {
|
13 |
-
"legacy_fid": "Legacy FID",
|
14 |
-
"clip_fid": "CLIP FID",
|
15 |
-
"psnr": "PSNR",
|
16 |
-
"ssim": "SSIM",
|
17 |
-
"nmi": "Normed Mutual-Info",
|
18 |
-
"lpips": "LPIPS",
|
19 |
-
"aesthetics": "Delta Aesthetics",
|
20 |
-
"artifacts": "Delta Artifacts",
|
21 |
-
}
|
22 |
-
|
23 |
|
24 |
-
|
25 |
-
"legacy_fid": 1.52e-3,
|
26 |
-
"clip_fid": 5.07e-3,
|
27 |
-
"psnr": -2.22e-3,
|
28 |
-
"ssim": -1.13e-1,
|
29 |
-
"nmi": -9.88e-2,
|
30 |
-
"lpips": 3.41e-1,
|
31 |
-
"aesthetics": 4.50e-2,
|
32 |
-
"artifacts": -1.44e-1,
|
33 |
-
}
|
34 |
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
for key, value in quality_dict.items():
|
39 |
-
result += value[0] * QUALITY_COEFFICIENTS[key]
|
40 |
-
return result
|
41 |
|
42 |
-
|
43 |
-
def metric(attacked_image, clean_image, quiet=True):
|
44 |
-
modes = QUALITY_METRICS.keys()
|
45 |
-
modes = (
|
46 |
-
[
|
47 |
-
mode
|
48 |
-
for mode in modes
|
49 |
-
if mode not in ["aesthetics", "artifacts", "clip_score"]
|
50 |
-
]
|
51 |
-
+ ["aesthetics_and_artifacts"]
|
52 |
-
+ (["clip_score"] if "clip_score" in modes else [])
|
53 |
-
)
|
54 |
|
55 |
results = {}
|
56 |
for mode in modes:
|
57 |
-
if mode
|
58 |
-
results[mode] = [0.0]
|
59 |
-
# num_gpus = torch.cuda.device_count()
|
60 |
-
# if num_gpus == 0:
|
61 |
-
# raise RuntimeError("No GPUs available for processing")
|
62 |
-
# if not quiet:
|
63 |
-
# print(f"Using {num_gpus} GPUs for processing")
|
64 |
-
# metric = float(
|
65 |
-
# compute_fid(
|
66 |
-
# clean_path,
|
67 |
-
# attacked_path,
|
68 |
-
# mode=mode.split("_")[0],
|
69 |
-
# device=torch.device("cuda"),
|
70 |
-
# batch_size=64,
|
71 |
-
# num_workers=8,
|
72 |
-
# verbose=not quiet,
|
73 |
-
# )
|
74 |
-
# )
|
75 |
-
# return {
|
76 |
-
# idx: metric
|
77 |
-
# for idx in existence_to_indices(
|
78 |
-
# check_file_existence(attacked_path, name_pattern="{}.png", limit=limit),
|
79 |
-
# limit=limit,
|
80 |
-
# )
|
81 |
-
# }
|
82 |
-
|
83 |
-
elif mode in ["psnr", "ssim", "nmi"]:
|
84 |
metrics = compute_image_distance_repeated(
|
85 |
[clean_image],
|
86 |
[attacked_image],
|
87 |
metric_name=mode,
|
88 |
-
num_workers=
|
89 |
verbose=not quiet,
|
90 |
)
|
91 |
results[mode] = metrics
|
92 |
|
93 |
-
elif mode
|
94 |
model = load_perceptual_models(
|
95 |
mode,
|
96 |
-
mode="alex"
|
97 |
-
device="cpu"
|
98 |
)
|
99 |
metrics = compute_perceptual_metric_repeated(
|
100 |
[clean_image],
|
101 |
[attacked_image],
|
102 |
metric_name=mode,
|
103 |
-
mode="alex"
|
104 |
model=model,
|
105 |
-
device="cpu"
|
106 |
)
|
107 |
results[mode] = metrics
|
108 |
|
@@ -111,14 +122,19 @@ def metric(attacked_image, clean_image, quiet=True):
|
|
111 |
aesthetics, artifacts = compute_aesthetics_and_artifacts_scores(
|
112 |
[attacked_image],
|
113 |
model,
|
114 |
-
device="cpu"
|
115 |
)
|
116 |
clean_aesthetics, clean_artifacts = compute_aesthetics_and_artifacts_scores(
|
117 |
[attacked_image],
|
118 |
model,
|
119 |
-
device="cpu"
|
120 |
)
|
121 |
-
results["aesthetics"] = [clean_aesthetics[0]-aesthetics[0]]
|
122 |
-
results["artifacts"] = [clean_artifacts[0]-artifacts[0]]
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
import io
|
3 |
+
import numpy as np
|
4 |
+
import onnxruntime as ort
|
5 |
+
from PIL import Image
|
6 |
+
import dotenv
|
7 |
+
|
8 |
+
dotenv.load_dotenv()
|
9 |
+
|
10 |
+
GT_MESSAGE = os.environ["GT_MESSAGE"]
|
11 |
+
|
12 |
+
|
13 |
+
QUALITY_COEFFICIENTS = {
|
14 |
+
"psnr": -0.0022186489180419534,
|
15 |
+
"ssim": -0.11337077856710862,
|
16 |
+
"nmi": -0.09878221979274945,
|
17 |
+
"lpips": 0.3412626374646173,
|
18 |
+
"aesthetics": 0.044982716146790995,
|
19 |
+
"artifacts": -0.1437622439320745,
|
20 |
+
}
|
21 |
+
|
22 |
+
QUALITY_OFFSETS = {
|
23 |
+
"psnr": 43.54757854447622,
|
24 |
+
"ssim": 0.984229018845295,
|
25 |
+
"nmi": 1.7536553655336136,
|
26 |
+
"lpips": 0.014247652621287854,
|
27 |
+
"aesthetics": -0.005184057521820067,
|
28 |
+
"artifacts": -0.0015724570911377733,
|
29 |
+
}
|
30 |
+
|
31 |
+
|
32 |
+
def compute_performance(image):
|
33 |
+
session_options = ort.SessionOptions()
|
34 |
+
session_options.intra_op_num_threads = 1
|
35 |
+
session_options.inter_op_num_threads = 1
|
36 |
+
session_options.log_severity_level = 3
|
37 |
+
model = ort.InferenceSession(
|
38 |
+
"./kit/models/stable_signature.onnx",
|
39 |
+
sess_options=session_options,
|
40 |
+
)
|
41 |
+
inputs = np.stack(
|
42 |
+
[
|
43 |
+
(
|
44 |
+
(
|
45 |
+
np.array(
|
46 |
+
image,
|
47 |
+
dtype=np.float32,
|
48 |
+
)
|
49 |
+
/ 255.0
|
50 |
+
- [0.485, 0.456, 0.406]
|
51 |
+
)
|
52 |
+
/ [0.229, 0.224, 0.225]
|
53 |
+
)
|
54 |
+
.transpose((2, 0, 1))
|
55 |
+
.astype(np.float32)
|
56 |
+
],
|
57 |
+
axis=0,
|
58 |
+
)
|
59 |
+
|
60 |
+
outputs = model.run(
|
61 |
+
None,
|
62 |
+
{
|
63 |
+
"image": inputs,
|
64 |
+
},
|
65 |
+
)
|
66 |
+
decoded = (outputs[0] > 0).astype(int)[0]
|
67 |
+
gt_message = np.array([int(bit) for bit in GT_MESSAGE])
|
68 |
+
return 1 - np.mean(gt_message != decoded)
|
69 |
+
|
70 |
+
|
71 |
+
from .metrics import (
|
72 |
compute_image_distance_repeated,
|
73 |
load_perceptual_models,
|
74 |
compute_perceptual_metric_repeated,
|
|
|
76 |
compute_aesthetics_and_artifacts_scores,
|
77 |
)
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
+
def compute_quality(attacked_image, clean_image, quiet=True):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
+
# Compress the image
|
83 |
+
buffer = io.BytesIO()
|
84 |
+
attacked_image.save(buffer, format="JPEG", quality=90)
|
85 |
+
buffer.seek(0)
|
86 |
|
87 |
+
# Update attacked_image with the compressed version
|
88 |
+
attacked_image = Image.open(buffer)
|
|
|
|
|
|
|
89 |
|
90 |
+
modes = ["psnr", "ssim", "nmi", "lpips", "aesthetics_and_artifacts"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
results = {}
|
93 |
for mode in modes:
|
94 |
+
if mode in ["psnr", "ssim", "nmi"]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
metrics = compute_image_distance_repeated(
|
96 |
[clean_image],
|
97 |
[attacked_image],
|
98 |
metric_name=mode,
|
99 |
+
num_workers=1,
|
100 |
verbose=not quiet,
|
101 |
)
|
102 |
results[mode] = metrics
|
103 |
|
104 |
+
elif mode == "lpips":
|
105 |
model = load_perceptual_models(
|
106 |
mode,
|
107 |
+
mode="alex",
|
108 |
+
device="cpu",
|
109 |
)
|
110 |
metrics = compute_perceptual_metric_repeated(
|
111 |
[clean_image],
|
112 |
[attacked_image],
|
113 |
metric_name=mode,
|
114 |
+
mode="alex",
|
115 |
model=model,
|
116 |
+
device="cpu",
|
117 |
)
|
118 |
results[mode] = metrics
|
119 |
|
|
|
122 |
aesthetics, artifacts = compute_aesthetics_and_artifacts_scores(
|
123 |
[attacked_image],
|
124 |
model,
|
125 |
+
device="cpu",
|
126 |
)
|
127 |
clean_aesthetics, clean_artifacts = compute_aesthetics_and_artifacts_scores(
|
128 |
[attacked_image],
|
129 |
model,
|
130 |
+
device="cpu",
|
131 |
)
|
132 |
+
results["aesthetics"] = [clean_aesthetics[0] - aesthetics[0]]
|
133 |
+
results["artifacts"] = [clean_artifacts[0] - artifacts[0]]
|
134 |
+
|
135 |
+
normalized_quality = 0
|
136 |
+
for key, value in results.items():
|
137 |
+
normalized_quality += (value[0] - QUALITY_OFFSETS[key]) * QUALITY_COEFFICIENTS[
|
138 |
+
key
|
139 |
+
]
|
140 |
+
return normalized_quality
|
kit/decode.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import numpy as np
|
3 |
-
import onnxruntime as ort
|
4 |
-
from PIL import ImageOps
|
5 |
-
|
6 |
-
|
7 |
-
def init_model(mode, gpu):
|
8 |
-
if mode == "stable_sig":
|
9 |
-
session_options = ort.SessionOptions()
|
10 |
-
session_options.intra_op_num_threads = 1
|
11 |
-
session_options.inter_op_num_threads = 1
|
12 |
-
session_options.log_severity_level = 3
|
13 |
-
return ort.InferenceSession(
|
14 |
-
os.path.join("models", "stable_signature.onnx"), # CHENGHAO: may need to modifiy the path
|
15 |
-
#providers=["CUDAExecutionProvider"],
|
16 |
-
#provider_options=[{"device_id": str(gpu)}],
|
17 |
-
sess_options=session_options,
|
18 |
-
)
|
19 |
-
elif mode == "stegastamp":
|
20 |
-
session_options = ort.SessionOptions()
|
21 |
-
session_options.intra_op_num_threads = 1
|
22 |
-
session_options.inter_op_num_threads = 1
|
23 |
-
session_options.log_severity_level = 3
|
24 |
-
return ort.InferenceSession(
|
25 |
-
os.path.join("models", "stega_stamp.onnx"), # CHENGHAO: may need to modifiy the path
|
26 |
-
#providers=["CUDAExecutionProvider"],
|
27 |
-
#provider_options=[{"device_id": str(gpu)}],
|
28 |
-
sess_options=session_options,
|
29 |
-
)
|
30 |
-
|
31 |
-
|
32 |
-
def decode(image, mode="stable_sig"):
|
33 |
-
model = init_model(mode, 0)
|
34 |
-
inputs = np.stack(
|
35 |
-
[
|
36 |
-
(
|
37 |
-
(
|
38 |
-
np.array(
|
39 |
-
image,
|
40 |
-
dtype=np.float32,
|
41 |
-
)
|
42 |
-
/ 255.0
|
43 |
-
- [0.485, 0.456, 0.406]
|
44 |
-
)
|
45 |
-
/ [0.229, 0.224, 0.225]
|
46 |
-
)
|
47 |
-
.transpose((2, 0, 1))
|
48 |
-
.astype(np.float32)
|
49 |
-
],
|
50 |
-
axis=0,
|
51 |
-
) if mode == "stable_sig" else np.stack(
|
52 |
-
[
|
53 |
-
np.array(
|
54 |
-
ImageOps.fit(
|
55 |
-
image, (400, 400)
|
56 |
-
),
|
57 |
-
dtype=np.float32,
|
58 |
-
)
|
59 |
-
/ 255.0
|
60 |
-
],
|
61 |
-
axis=0,
|
62 |
-
) # if mode == "stegastamp"
|
63 |
-
|
64 |
-
if mode == "stable_sig":
|
65 |
-
outputs = model.run(
|
66 |
-
None,
|
67 |
-
{
|
68 |
-
"image": inputs,
|
69 |
-
},
|
70 |
-
)
|
71 |
-
return (outputs[0] > 0).astype(int)[0]
|
72 |
-
elif mode == "stegastamp":
|
73 |
-
outputs = model.run(
|
74 |
-
None,
|
75 |
-
{
|
76 |
-
"image": inputs,
|
77 |
-
"secret": np.zeros((inputs.shape[0], 100), dtype=np.float32),
|
78 |
-
},
|
79 |
-
)
|
80 |
-
return outputs[2].astype(int)[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
kit/metrics/perceptual.py
CHANGED
@@ -1,12 +1,37 @@
|
|
1 |
import torch
|
2 |
-
import numpy as np
|
3 |
from PIL import Image
|
4 |
-
from
|
5 |
-
from utils import to_tensor
|
6 |
from .lpips import LPIPS
|
7 |
from .watson import LossProvider
|
8 |
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
def load_perceptual_models(metric_name, mode, device=torch.device("cuda")):
|
11 |
assert metric_name in ["lpips", "watson"]
|
12 |
if metric_name == "lpips":
|
|
|
1 |
import torch
|
|
|
2 |
from PIL import Image
|
3 |
+
from torchvision import transforms
|
|
|
4 |
from .lpips import LPIPS
|
5 |
from .watson import LossProvider
|
6 |
|
7 |
|
8 |
+
# Normalize image tensors
|
9 |
+
def normalize_tensor(images, norm_type):
|
10 |
+
assert norm_type in ["imagenet", "naive"]
|
11 |
+
# Two possible normalization conventions
|
12 |
+
if norm_type == "imagenet":
|
13 |
+
mean = [0.485, 0.456, 0.406]
|
14 |
+
std = [0.229, 0.224, 0.225]
|
15 |
+
normalize = transforms.Normalize(mean, std)
|
16 |
+
elif norm_type == "naive":
|
17 |
+
mean = [0.5, 0.5, 0.5]
|
18 |
+
std = [0.5, 0.5, 0.5]
|
19 |
+
normalize = transforms.Normalize(mean, std)
|
20 |
+
else:
|
21 |
+
assert False
|
22 |
+
return torch.stack([normalize(image) for image in images])
|
23 |
+
|
24 |
+
|
25 |
+
def to_tensor(images, norm_type="naive"):
|
26 |
+
assert isinstance(images, list) and all(
|
27 |
+
[isinstance(image, Image.Image) for image in images]
|
28 |
+
)
|
29 |
+
images = torch.stack([transforms.ToTensor()(image) for image in images])
|
30 |
+
if norm_type is not None:
|
31 |
+
images = normalize_tensor(images, norm_type)
|
32 |
+
return images
|
33 |
+
|
34 |
+
|
35 |
def load_perceptual_models(metric_name, mode, device=torch.device("cuda")):
|
36 |
assert metric_name in ["lpips", "watson"]
|
37 |
if metric_name == "lpips":
|
kit/test.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from decode import decode
|
2 |
-
from metric import metric
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
def main(attacked_image_path):
|
6 |
-
attacked_image = Image.open(attacked_image_path)
|
7 |
-
clean_image = Image.open("../image.png")
|
8 |
-
|
9 |
-
decode_result = decode(
|
10 |
-
attacked_image,
|
11 |
-
)
|
12 |
-
print("Decode: ", decode_result)
|
13 |
-
metric_result = metric(
|
14 |
-
attacked_image,
|
15 |
-
clean_image,
|
16 |
-
)
|
17 |
-
print("Metric: ", metric_result)
|
18 |
-
|
19 |
-
if __name__ == "__main__":
|
20 |
-
main("../attacked_image.png")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,9 +1,14 @@
|
|
1 |
gradio
|
|
|
|
|
|
|
|
|
|
|
2 |
numpy
|
3 |
-
matplotlib
|
4 |
Pillow
|
5 |
-
scikit-image
|
6 |
redis
|
|
|
|
|
7 |
|
8 |
# dev
|
9 |
jupyterlab
|
|
|
1 |
gradio
|
2 |
+
onnxruntime
|
3 |
+
torch
|
4 |
+
torchvision
|
5 |
+
transformers
|
6 |
+
open_clip_torch
|
7 |
numpy
|
|
|
8 |
Pillow
|
|
|
9 |
redis
|
10 |
+
plotly
|
11 |
+
python-dotenv
|
12 |
|
13 |
# dev
|
14 |
jupyterlab
|
test.ipynb
CHANGED
@@ -15,22 +15,20 @@
|
|
15 |
],
|
16 |
"source": [
|
17 |
"# Print all submissions in Redis\n",
|
18 |
-
"\n",
|
19 |
"import redis\n",
|
20 |
"import json\n",
|
|
|
|
|
|
|
21 |
"\n",
|
22 |
-
"# Redis configuration\n",
|
23 |
-
"REDIS_HOST = \"redis-17482.c73.us-east-1-2.ec2.redns.redis-cloud.com\"\n",
|
24 |
-
"REDIS_PORT = 17482\n",
|
25 |
-
"REDIS_USERNAME = \"default\"\n",
|
26 |
-
"REDIS_PASSWORD = \"7gdBhkyX1duixq5J3Mr14kYxUhWugoYk\"\n",
|
27 |
"\n",
|
28 |
"# Connect to Redis\n",
|
29 |
"redis_client = redis.Redis(\n",
|
30 |
-
" host=REDIS_HOST,\n",
|
31 |
-
" port=REDIS_PORT,\n",
|
32 |
-
" username=REDIS_USERNAME,\n",
|
33 |
-
" password=REDIS_PASSWORD,\n",
|
34 |
" decode_responses=True,\n",
|
35 |
")\n",
|
36 |
"\n",
|
|
|
15 |
],
|
16 |
"source": [
|
17 |
"# Print all submissions in Redis\n",
|
18 |
+
"import os\n",
|
19 |
"import redis\n",
|
20 |
"import json\n",
|
21 |
+
"import dotenv\n",
|
22 |
+
"\n",
|
23 |
+
"dotenv.load_dotenv()\n",
|
24 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
25 |
"\n",
|
26 |
"# Connect to Redis\n",
|
27 |
"redis_client = redis.Redis(\n",
|
28 |
+
" host=os.getenv(\"REDIS_HOST\"),\n",
|
29 |
+
" port=os.getenv(\"REDIS_PORT\"),\n",
|
30 |
+
" username=os.getenv(\"REDIS_USERNAME\"),\n",
|
31 |
+
" password=os.getenv(\"REDIS_PASSWORD\"),\n",
|
32 |
" decode_responses=True,\n",
|
33 |
")\n",
|
34 |
"\n",
|