Spaces:
Runtime error
Runtime error
haoning.wu
commited on
Commit
•
bba21a6
1
Parent(s):
e63f3e2
update format
Browse files- app.py +1 -1
- q_align/.ipynb_checkpoints/utils-checkpoint.py +0 -128
- q_align/evaluate/.ipynb_checkpoints/iaa_eval-checkpoint.py +0 -164
- q_align/evaluate/.ipynb_checkpoints/iqa4vqa_eval-checkpoint.py +0 -150
- q_align/evaluate/.ipynb_checkpoints/iqa_eval-checkpoint.py +0 -156
- q_align/evaluate/.ipynb_checkpoints/scorer-checkpoint.py +0 -155
- q_align/evaluate/.ipynb_checkpoints/vqa_eval-checkpoint.py +0 -167
- q_align/train/.ipynb_checkpoints/train-checkpoint.py +0 -844
app.py
CHANGED
@@ -64,7 +64,7 @@ def image_classifier(input_img, input_vid, scorer_type):
|
|
64 |
|
65 |
title_markdown = ("""
|
66 |
|
67 |
-
<
|
68 |
|
69 |
<h3 align="center"> One Unified Model for Visual scoring. </h3>
|
70 |
|
|
|
64 |
|
65 |
title_markdown = ("""
|
66 |
|
67 |
+
<h3 align="center">Q-Align: Teaching LMMs for Visual Scoring via Discrete Text-Defined Levels</h3>
|
68 |
|
69 |
<h3 align="center"> One Unified Model for Visual scoring. </h3>
|
70 |
|
q_align/.ipynb_checkpoints/utils-checkpoint.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
import datetime
|
2 |
-
import logging
|
3 |
-
import logging.handlers
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
|
7 |
-
import requests
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
from q_align.constants import LOGDIR
|
12 |
-
|
13 |
-
server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
14 |
-
moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
|
15 |
-
|
16 |
-
handler = None
|
17 |
-
|
18 |
-
|
19 |
-
def build_logger(logger_name, logger_filename):
|
20 |
-
global handler
|
21 |
-
|
22 |
-
formatter = logging.Formatter(
|
23 |
-
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
24 |
-
datefmt="%Y-%m-%d %H:%M:%S",
|
25 |
-
)
|
26 |
-
|
27 |
-
# Set the format of root handlers
|
28 |
-
if not logging.getLogger().handlers:
|
29 |
-
logging.basicConfig(level=logging.INFO)
|
30 |
-
logging.getLogger().handlers[0].setFormatter(formatter)
|
31 |
-
|
32 |
-
# Redirect stdout and stderr to loggers
|
33 |
-
stdout_logger = logging.getLogger("stdout")
|
34 |
-
stdout_logger.setLevel(logging.INFO)
|
35 |
-
sl = StreamToLogger(stdout_logger, logging.INFO)
|
36 |
-
sys.stdout = sl
|
37 |
-
|
38 |
-
stderr_logger = logging.getLogger("stderr")
|
39 |
-
stderr_logger.setLevel(logging.ERROR)
|
40 |
-
sl = StreamToLogger(stderr_logger, logging.ERROR)
|
41 |
-
sys.stderr = sl
|
42 |
-
|
43 |
-
# Get logger
|
44 |
-
logger = logging.getLogger(logger_name)
|
45 |
-
logger.setLevel(logging.INFO)
|
46 |
-
|
47 |
-
# Add a file handler for all loggers
|
48 |
-
if handler is None:
|
49 |
-
os.makedirs(LOGDIR, exist_ok=True)
|
50 |
-
filename = os.path.join(LOGDIR, logger_filename)
|
51 |
-
handler = logging.handlers.TimedRotatingFileHandler(
|
52 |
-
filename, when='D', utc=True)
|
53 |
-
handler.setFormatter(formatter)
|
54 |
-
|
55 |
-
for name, item in logging.root.manager.loggerDict.items():
|
56 |
-
if isinstance(item, logging.Logger):
|
57 |
-
item.addHandler(handler)
|
58 |
-
|
59 |
-
return logger
|
60 |
-
|
61 |
-
|
62 |
-
class StreamToLogger(object):
|
63 |
-
"""
|
64 |
-
Fake file-like stream object that redirects writes to a logger instance.
|
65 |
-
"""
|
66 |
-
def __init__(self, logger, log_level=logging.INFO):
|
67 |
-
self.terminal = sys.stdout
|
68 |
-
self.logger = logger
|
69 |
-
self.log_level = log_level
|
70 |
-
self.linebuf = ''
|
71 |
-
|
72 |
-
def __getattr__(self, attr):
|
73 |
-
return getattr(self.terminal, attr)
|
74 |
-
|
75 |
-
def write(self, buf):
|
76 |
-
temp_linebuf = self.linebuf + buf
|
77 |
-
self.linebuf = ''
|
78 |
-
for line in temp_linebuf.splitlines(True):
|
79 |
-
# From the io.TextIOWrapper docs:
|
80 |
-
# On output, if newline is None, any '\n' characters written
|
81 |
-
# are translated to the system default line separator.
|
82 |
-
# By default sys.stdout.write() expects '\n' newlines and then
|
83 |
-
# translates them so this is still cross platform.
|
84 |
-
if line[-1] == '\n':
|
85 |
-
self.logger.log(self.log_level, line.rstrip())
|
86 |
-
else:
|
87 |
-
self.linebuf += line
|
88 |
-
|
89 |
-
def flush(self):
|
90 |
-
if self.linebuf != '':
|
91 |
-
self.logger.log(self.log_level, self.linebuf.rstrip())
|
92 |
-
self.linebuf = ''
|
93 |
-
|
94 |
-
|
95 |
-
def disable_torch_init():
|
96 |
-
"""
|
97 |
-
Disable the redundant torch default initialization to accelerate model creation.
|
98 |
-
"""
|
99 |
-
import torch
|
100 |
-
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
101 |
-
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
102 |
-
|
103 |
-
|
104 |
-
def violates_moderation(text):
|
105 |
-
"""
|
106 |
-
Check whether the text violates OpenAI moderation API.
|
107 |
-
"""
|
108 |
-
url = "https://api.openai.com/v1/moderations"
|
109 |
-
headers = {"Content-Type": "application/json",
|
110 |
-
"Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]}
|
111 |
-
text = text.replace("\n", "")
|
112 |
-
data = "{" + '"input": ' + f'"{text}"' + "}"
|
113 |
-
data = data.encode("utf-8")
|
114 |
-
try:
|
115 |
-
ret = requests.post(url, headers=headers, data=data, timeout=5)
|
116 |
-
flagged = ret.json()["results"][0]["flagged"]
|
117 |
-
except requests.exceptions.RequestException as e:
|
118 |
-
flagged = False
|
119 |
-
except KeyError as e:
|
120 |
-
flagged = False
|
121 |
-
|
122 |
-
return flagged
|
123 |
-
|
124 |
-
|
125 |
-
def pretty_print_semaphore(semaphore):
|
126 |
-
if semaphore is None:
|
127 |
-
return "None"
|
128 |
-
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_align/evaluate/.ipynb_checkpoints/iaa_eval-checkpoint.py
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
5 |
-
from q_align.conversation import conv_templates, SeparatorStyle
|
6 |
-
from q_align.model.builder import load_pretrained_model
|
7 |
-
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
8 |
-
|
9 |
-
from PIL import Image
|
10 |
-
from PIL import ImageFile
|
11 |
-
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
12 |
-
|
13 |
-
import requests
|
14 |
-
from PIL import Image
|
15 |
-
from io import BytesIO
|
16 |
-
from transformers import TextStreamer
|
17 |
-
|
18 |
-
from scipy.stats import spearmanr, pearsonr
|
19 |
-
|
20 |
-
|
21 |
-
import json
|
22 |
-
from tqdm import tqdm
|
23 |
-
from collections import defaultdict
|
24 |
-
|
25 |
-
import os
|
26 |
-
|
27 |
-
def wa5(logits):
|
28 |
-
import numpy as np
|
29 |
-
logprobs = np.array([logits["excellent"], logits["good"], logits["fair"], logits["poor"], logits["bad"]])
|
30 |
-
probs = np.exp(logprobs) / np.sum(np.exp(logprobs))
|
31 |
-
return np.inner(probs, np.array([1,0.75,0.5,0.25,0.]))
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
def disable_torch_init():
|
37 |
-
"""
|
38 |
-
Disable the redundant torch default initialization to accelerate model creation.
|
39 |
-
"""
|
40 |
-
import torch
|
41 |
-
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
42 |
-
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
43 |
-
|
44 |
-
|
45 |
-
def load_image(image_file):
|
46 |
-
if image_file.startswith('http://') or image_file.startswith('https://'):
|
47 |
-
response = requests.get(image_file)
|
48 |
-
image = Image.open(BytesIO(response.content)).convert('RGB')
|
49 |
-
else:
|
50 |
-
image = Image.open(image_file).convert('RGB')
|
51 |
-
return image
|
52 |
-
|
53 |
-
|
54 |
-
def main(args):
|
55 |
-
# Model
|
56 |
-
disable_torch_init()
|
57 |
-
|
58 |
-
model_name = get_model_name_from_path(args.model_path)
|
59 |
-
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
|
60 |
-
|
61 |
-
|
62 |
-
import json
|
63 |
-
|
64 |
-
|
65 |
-
image_path = "playground/data/"
|
66 |
-
|
67 |
-
|
68 |
-
json_prefix = "playground/data/test_jsons/"
|
69 |
-
jsons = [
|
70 |
-
json_prefix + "test_ava.json",
|
71 |
-
]
|
72 |
-
|
73 |
-
os.makedirs(f"results/{args.model_path}/", exist_ok=True)
|
74 |
-
|
75 |
-
|
76 |
-
conv_mode = "mplug_owl2"
|
77 |
-
|
78 |
-
inp = "How would you rate the aesthetics of this image?"
|
79 |
-
|
80 |
-
conv = conv_templates[conv_mode].copy()
|
81 |
-
inp = DEFAULT_IMAGE_TOKEN + inp
|
82 |
-
conv.append_message(conv.roles[0], inp)
|
83 |
-
image = None
|
84 |
-
|
85 |
-
conv.append_message(conv.roles[1], None)
|
86 |
-
prompt = conv.get_prompt() + " The aesthetics of the image is"
|
87 |
-
|
88 |
-
toks = ["good", "poor", "high", "fair", "low", "excellent", "bad", "fine", "moderate", "decent", "average", "medium", "acceptable"]
|
89 |
-
print(toks)
|
90 |
-
ids_ = [id_[1] for id_ in tokenizer(toks)["input_ids"]]
|
91 |
-
print(ids_)
|
92 |
-
|
93 |
-
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(args.device)
|
94 |
-
|
95 |
-
for json_ in jsons:
|
96 |
-
with open(json_) as f:
|
97 |
-
iqadata = json.load(f)
|
98 |
-
|
99 |
-
image_tensors = []
|
100 |
-
batch_data = []
|
101 |
-
prs, gts = [], []
|
102 |
-
for i, llddata in enumerate(tqdm(iqadata, desc="Evaluating [{}]".format(json_.split("/")[-1]))):
|
103 |
-
filename = llddata["image"]
|
104 |
-
llddata["logits"] = defaultdict(float)
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
image = load_image(image_path + filename)
|
109 |
-
def expand2square(pil_img, background_color):
|
110 |
-
width, height = pil_img.size
|
111 |
-
if width == height:
|
112 |
-
return pil_img
|
113 |
-
elif width > height:
|
114 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
115 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
116 |
-
return result
|
117 |
-
else:
|
118 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
119 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
120 |
-
return result
|
121 |
-
image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
|
122 |
-
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().to(args.device)
|
123 |
-
|
124 |
-
image_tensors.append(image_tensor)
|
125 |
-
batch_data.append(llddata)
|
126 |
-
|
127 |
-
if i % 8 == 7 or i == len(iqadata) - 1:
|
128 |
-
with torch.inference_mode():
|
129 |
-
output_logits = model(input_ids.repeat(len(image_tensors), 1),
|
130 |
-
images=torch.cat(image_tensors, 0))["logits"][:,-1]
|
131 |
-
|
132 |
-
for j, xllddata in enumerate(batch_data):
|
133 |
-
for tok, id_ in zip(toks, ids_):
|
134 |
-
xllddata["logits"][tok] += output_logits[j,id_].item()
|
135 |
-
xllddata["score"] = wa5(xllddata["logits"])
|
136 |
-
# print(llddata)
|
137 |
-
prs.append(xllddata["score"])
|
138 |
-
gts.append(xllddata["gt_score"])
|
139 |
-
json_ = json_.replace("combined/", "combined-")
|
140 |
-
with open(f"results/{args.model_path}/{json_.split('/')[-1]}", "a") as wf:
|
141 |
-
json.dump(xllddata, wf)
|
142 |
-
|
143 |
-
image_tensors = []
|
144 |
-
batch_data = []
|
145 |
-
|
146 |
-
#if i > 0 and i % 200 == 0:
|
147 |
-
# print(spearmanr(prs,gts)[0], pearsonr(prs,gts)[0])
|
148 |
-
print("Spearmanr", spearmanr(prs,gts)[0], "Pearson", pearsonr(prs,gts)[0])
|
149 |
-
|
150 |
-
|
151 |
-
if __name__ == "__main__":
|
152 |
-
parser = argparse.ArgumentParser()
|
153 |
-
parser.add_argument("--model-path", type=str, default="q-future/one-align")
|
154 |
-
parser.add_argument("--model-base", type=str, default=None)
|
155 |
-
parser.add_argument("--device", type=str, default="cuda:0")
|
156 |
-
parser.add_argument("--conv-mode", type=str, default=None)
|
157 |
-
parser.add_argument("--temperature", type=float, default=0.2)
|
158 |
-
parser.add_argument("--max-new-tokens", type=int, default=512)
|
159 |
-
parser.add_argument("--load-8bit", action="store_true")
|
160 |
-
parser.add_argument("--load-4bit", action="store_true")
|
161 |
-
parser.add_argument("--debug", action="store_true")
|
162 |
-
parser.add_argument("--image-aspect-ratio", type=str, default='pad')
|
163 |
-
args = parser.parse_args()
|
164 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_align/evaluate/.ipynb_checkpoints/iqa4vqa_eval-checkpoint.py
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
5 |
-
from q_align.conversation import conv_templates, SeparatorStyle
|
6 |
-
from q_align.model.builder import load_pretrained_model
|
7 |
-
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
8 |
-
|
9 |
-
from PIL import Image
|
10 |
-
|
11 |
-
import requests
|
12 |
-
from PIL import Image
|
13 |
-
from io import BytesIO
|
14 |
-
from transformers import TextStreamer
|
15 |
-
|
16 |
-
from decord import VideoReader
|
17 |
-
|
18 |
-
|
19 |
-
import json
|
20 |
-
from tqdm import tqdm
|
21 |
-
from collections import defaultdict
|
22 |
-
|
23 |
-
import os
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
def disable_torch_init():
|
29 |
-
"""
|
30 |
-
Disable the redundant torch default initialization to accelerate model creation.
|
31 |
-
"""
|
32 |
-
import torch
|
33 |
-
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
34 |
-
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
35 |
-
|
36 |
-
|
37 |
-
def load_video(video_file):
|
38 |
-
vr = VideoReader(video_file)
|
39 |
-
|
40 |
-
# Get video frame rate
|
41 |
-
fps = vr.get_avg_fps()
|
42 |
-
|
43 |
-
# Calculate frame indices for 1fps
|
44 |
-
frame_indices = [int(fps * i) for i in range(int(len(vr) / fps))]
|
45 |
-
frames = vr.get_batch(frame_indices).asnumpy()
|
46 |
-
return [Image.fromarray(frames[i]) for i in range(int(len(vr) / fps))]
|
47 |
-
|
48 |
-
|
49 |
-
def main(args):
|
50 |
-
# Model
|
51 |
-
disable_torch_init()
|
52 |
-
|
53 |
-
model_name = get_model_name_from_path(args.model_path)
|
54 |
-
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
|
55 |
-
|
56 |
-
|
57 |
-
import json
|
58 |
-
|
59 |
-
|
60 |
-
image_paths = [
|
61 |
-
"playground/data/",
|
62 |
-
"playground/data/",
|
63 |
-
"playground/data/KoNViD_1k_videos/",
|
64 |
-
"playground/data/maxwell/",
|
65 |
-
]
|
66 |
-
|
67 |
-
json_prefix = "playground/data/test_jsons/"
|
68 |
-
jsons = [
|
69 |
-
json_prefix + "test_lsvq.json",
|
70 |
-
json_prefix + "test_lsvq_1080p.json",
|
71 |
-
json_prefix + "konvid.json",
|
72 |
-
json_prefix + "maxwell_test.json",
|
73 |
-
]
|
74 |
-
|
75 |
-
os.makedirs(f"results/{args.model_path}/", exist_ok=True)
|
76 |
-
|
77 |
-
|
78 |
-
conv_mode = "mplug_owl2"
|
79 |
-
|
80 |
-
inp = "How would you rate the quality of this image?"
|
81 |
-
|
82 |
-
conv = conv_templates[conv_mode].copy()
|
83 |
-
inp = inp + "\n" + DEFAULT_IMAGE_TOKEN
|
84 |
-
conv.append_message(conv.roles[0], inp)
|
85 |
-
image = None
|
86 |
-
|
87 |
-
conv.append_message(conv.roles[1], None)
|
88 |
-
prompt = conv.get_prompt() + " The quality of the image is"
|
89 |
-
|
90 |
-
toks = ["good", "poor", "high", "fair", "low", "excellent", "bad", "fine", "moderate", "decent", "average", "medium", "acceptable"]
|
91 |
-
print(toks)
|
92 |
-
ids_ = [id_[1] for id_ in tokenizer(toks)["input_ids"]]
|
93 |
-
print(ids_)
|
94 |
-
|
95 |
-
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(args.device)
|
96 |
-
|
97 |
-
for image_path, json_ in zip(image_paths, jsons):
|
98 |
-
with open(json_) as f:
|
99 |
-
iqadata = json.load(f)
|
100 |
-
try:
|
101 |
-
for i, llddata in enumerate(tqdm(iqadata, desc="Evaluating [{}]".format(json_.split("/")[-1]))):
|
102 |
-
filename = llddata["img_path"]
|
103 |
-
llddata["logits"] = defaultdict(float)
|
104 |
-
|
105 |
-
image = load_video(image_path + filename)
|
106 |
-
def expand2square(pil_img, background_color):
|
107 |
-
width, height = pil_img.size
|
108 |
-
if width == height:
|
109 |
-
return pil_img
|
110 |
-
elif width > height:
|
111 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
112 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
113 |
-
return result
|
114 |
-
else:
|
115 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
116 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
117 |
-
return result
|
118 |
-
image = [expand2square(img, tuple(int(x*255) for x in image_processor.image_mean)) for img in image]
|
119 |
-
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().to(args.device)
|
120 |
-
|
121 |
-
|
122 |
-
if True:
|
123 |
-
with torch.inference_mode():
|
124 |
-
output_logits = model(input_ids.repeat(image_tensor.shape[0], 1),
|
125 |
-
images=image_tensor)["logits"][:,-1]
|
126 |
-
|
127 |
-
for tok, id_ in zip(toks, ids_):
|
128 |
-
llddata["logits"][tok] += output_logits.mean(0)[id_].item()
|
129 |
-
# print(llddata)
|
130 |
-
json_ = json_.replace("combined/", "combined-")
|
131 |
-
with open(f"results/{args.model_path}/{json_.split('/')[-1]}", "a") as wf:
|
132 |
-
json.dump(llddata, wf)
|
133 |
-
except:
|
134 |
-
continue
|
135 |
-
|
136 |
-
|
137 |
-
if __name__ == "__main__":
|
138 |
-
parser = argparse.ArgumentParser()
|
139 |
-
parser.add_argument("--model-path", type=str, default="q-future/q-align-image")
|
140 |
-
parser.add_argument("--model-base", type=str, default=None)
|
141 |
-
parser.add_argument("--device", type=str, default="cuda:0")
|
142 |
-
parser.add_argument("--conv-mode", type=str, default=None)
|
143 |
-
parser.add_argument("--temperature", type=float, default=0.2)
|
144 |
-
parser.add_argument("--max-new-tokens", type=int, default=512)
|
145 |
-
parser.add_argument("--load-8bit", action="store_true")
|
146 |
-
parser.add_argument("--load-4bit", action="store_true")
|
147 |
-
parser.add_argument("--debug", action="store_true")
|
148 |
-
parser.add_argument("--image-aspect-ratio", type=str, default='pad')
|
149 |
-
args = parser.parse_args()
|
150 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_align/evaluate/.ipynb_checkpoints/iqa_eval-checkpoint.py
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
5 |
-
from q_align.conversation import conv_templates, SeparatorStyle
|
6 |
-
from q_align.model.builder import load_pretrained_model
|
7 |
-
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
8 |
-
|
9 |
-
from PIL import Image
|
10 |
-
|
11 |
-
import requests
|
12 |
-
from PIL import Image
|
13 |
-
from io import BytesIO
|
14 |
-
from transformers import TextStreamer
|
15 |
-
|
16 |
-
import json
|
17 |
-
from tqdm import tqdm
|
18 |
-
from collections import defaultdict
|
19 |
-
|
20 |
-
import os
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
def disable_torch_init():
|
26 |
-
"""
|
27 |
-
Disable the redundant torch default initialization to accelerate model creation.
|
28 |
-
"""
|
29 |
-
import torch
|
30 |
-
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
31 |
-
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
32 |
-
|
33 |
-
|
34 |
-
def load_image(image_file):
|
35 |
-
if image_file.startswith('http://') or image_file.startswith('https://'):
|
36 |
-
response = requests.get(image_file)
|
37 |
-
image = Image.open(BytesIO(response.content)).convert('RGB')
|
38 |
-
else:
|
39 |
-
image = Image.open(image_file).convert('RGB')
|
40 |
-
return image
|
41 |
-
|
42 |
-
|
43 |
-
def main(args):
|
44 |
-
# Model
|
45 |
-
disable_torch_init()
|
46 |
-
|
47 |
-
model_name = get_model_name_from_path(args.model_path)
|
48 |
-
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
|
49 |
-
|
50 |
-
|
51 |
-
import json
|
52 |
-
|
53 |
-
|
54 |
-
image_path = "playground/data/"
|
55 |
-
|
56 |
-
|
57 |
-
json_prefix = "playground/data/test_jsons/"
|
58 |
-
jsons = [
|
59 |
-
json_prefix + "test_imagerewarddb.json",
|
60 |
-
json_prefix + "test_koniq.json",
|
61 |
-
json_prefix + "test_spaq.json",
|
62 |
-
json_prefix + "test_kadid.json",
|
63 |
-
json_prefix + "livec.json",
|
64 |
-
json_prefix + "agi.json",
|
65 |
-
json_prefix + "live.json",
|
66 |
-
json_prefix + "csiq.json",
|
67 |
-
]
|
68 |
-
|
69 |
-
os.makedirs(f"results/{args.model_path}/", exist_ok=True)
|
70 |
-
|
71 |
-
|
72 |
-
conv_mode = "mplug_owl2"
|
73 |
-
|
74 |
-
inp = "Evaluate the image quality of the following image."#"How would you rate the quality of this image?"
|
75 |
-
|
76 |
-
conv = conv_templates[conv_mode].copy()
|
77 |
-
inp = inp + "\n" + DEFAULT_IMAGE_TOKEN
|
78 |
-
conv.append_message(conv.roles[0], inp)
|
79 |
-
image = None
|
80 |
-
|
81 |
-
conv.append_message(conv.roles[1], None)
|
82 |
-
prompt = conv.get_prompt() + " The quality of the image is"
|
83 |
-
|
84 |
-
toks = ["good", "poor", "high", "fair", "low", "excellent", "bad", "fine", "moderate", "decent", "average", "medium", "acceptable"]
|
85 |
-
print(toks)
|
86 |
-
ids_ = [id_[1] for id_ in tokenizer(toks)["input_ids"]]
|
87 |
-
print(ids_)
|
88 |
-
|
89 |
-
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(args.device)
|
90 |
-
|
91 |
-
for json_ in jsons:
|
92 |
-
with open(json_) as f:
|
93 |
-
iqadata = json.load(f)
|
94 |
-
|
95 |
-
image_tensors = []
|
96 |
-
batch_data = []
|
97 |
-
|
98 |
-
for i, llddata in enumerate(tqdm(iqadata, desc="Evaluating [{}]".format(json_.split("/")[-1]))):
|
99 |
-
if True:
|
100 |
-
try:
|
101 |
-
filename = llddata["image"]
|
102 |
-
except:
|
103 |
-
filename = llddata["img_path"]
|
104 |
-
llddata["logits"] = defaultdict(float)
|
105 |
-
|
106 |
-
image = load_image(image_path + filename)
|
107 |
-
def expand2square(pil_img, background_color):
|
108 |
-
width, height = pil_img.size
|
109 |
-
if width == height:
|
110 |
-
return pil_img
|
111 |
-
elif width > height:
|
112 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
113 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
114 |
-
return result
|
115 |
-
else:
|
116 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
117 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
118 |
-
return result
|
119 |
-
image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
|
120 |
-
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().to(args.device)
|
121 |
-
|
122 |
-
image_tensors.append(image_tensor)
|
123 |
-
batch_data.append(llddata)
|
124 |
-
|
125 |
-
if i % 8 == 7 or i == len(iqadata) - 1:
|
126 |
-
with torch.inference_mode():
|
127 |
-
output_logits = model(input_ids.repeat(len(image_tensors), 1),
|
128 |
-
images=torch.cat(image_tensors, 0))["logits"][:,-1]
|
129 |
-
|
130 |
-
for j, xllddata in enumerate(batch_data):
|
131 |
-
for tok, id_ in zip(toks, ids_):
|
132 |
-
xllddata["logits"][tok] += output_logits[j,id_].item()
|
133 |
-
# print(llddata)
|
134 |
-
json_ = json_.replace("combined/", "combined-")
|
135 |
-
with open(f"results/{args.model_path}/2{json_.split('/')[-1]}", "a") as wf:
|
136 |
-
json.dump(xllddata, wf)
|
137 |
-
|
138 |
-
image_tensors = []
|
139 |
-
batch_data = []
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
if __name__ == "__main__":
|
144 |
-
parser = argparse.ArgumentParser()
|
145 |
-
parser.add_argument("--model-path", type=str, default="q-future/one-align")
|
146 |
-
parser.add_argument("--model-base", type=str, default=None)
|
147 |
-
parser.add_argument("--device", type=str, default="cuda:0")
|
148 |
-
parser.add_argument("--conv-mode", type=str, default=None)
|
149 |
-
parser.add_argument("--temperature", type=float, default=0.2)
|
150 |
-
parser.add_argument("--max-new-tokens", type=int, default=512)
|
151 |
-
parser.add_argument("--load-8bit", action="store_true")
|
152 |
-
parser.add_argument("--load-4bit", action="store_true")
|
153 |
-
parser.add_argument("--debug", action="store_true")
|
154 |
-
parser.add_argument("--image-aspect-ratio", type=str, default='pad')
|
155 |
-
args = parser.parse_args()
|
156 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_align/evaluate/.ipynb_checkpoints/scorer-checkpoint.py
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from typing import List
|
7 |
-
|
8 |
-
from q_align.model.builder import load_pretrained_model
|
9 |
-
|
10 |
-
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
11 |
-
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
12 |
-
|
13 |
-
def load_video(video_file):
|
14 |
-
from decord import VideoReader
|
15 |
-
vr = VideoReader(video_file)
|
16 |
-
|
17 |
-
# Get video frame rate
|
18 |
-
fps = vr.get_avg_fps()
|
19 |
-
|
20 |
-
# Calculate frame indices for 1fps
|
21 |
-
frame_indices = [int(fps * i) for i in range(int(len(vr) / fps))]
|
22 |
-
frames = vr.get_batch(frame_indices).asnumpy()
|
23 |
-
return [Image.fromarray(frames[i]) for i in range(int(len(vr) / fps))]
|
24 |
-
|
25 |
-
|
26 |
-
class QAlignScorer(nn.Module):
|
27 |
-
def __init__(self, pretrained="q-future/one-align", device="cuda:0", tokenizer=None, model=None, image_processor=None):
|
28 |
-
super().__init__()
|
29 |
-
if model is None:
|
30 |
-
tokenizer, model, image_processor, _ = load_pretrained_model(pretrained, None, "mplug_owl2", device=device)
|
31 |
-
prompt = "USER: How would you rate the quality of this image?\n<|image|>\nASSISTANT: The quality of the image is"
|
32 |
-
|
33 |
-
self.preferential_ids_ = [id_[1] for id_ in tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]]
|
34 |
-
self.weight_tensor = torch.Tensor([1,0.75,0.5,0.25,0.]).half().to(model.device)
|
35 |
-
|
36 |
-
self.tokenizer = tokenizer
|
37 |
-
self.model = model
|
38 |
-
self.image_processor = image_processor
|
39 |
-
self.input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
|
40 |
-
|
41 |
-
def expand2square(self, pil_img, background_color):
|
42 |
-
width, height = pil_img.size
|
43 |
-
if width == height:
|
44 |
-
return pil_img
|
45 |
-
elif width > height:
|
46 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
47 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
48 |
-
return result
|
49 |
-
else:
|
50 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
51 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
52 |
-
return result
|
53 |
-
|
54 |
-
def forward(self, image: List[Image.Image]):
|
55 |
-
image = [self.expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in image]
|
56 |
-
with torch.inference_mode():
|
57 |
-
image_tensor = self.image_processor.preprocess(image, return_tensors="pt")["pixel_values"].half().to(self.model.device)
|
58 |
-
output_logits = self.model(self.input_ids.repeat(image_tensor.shape[0], 1),
|
59 |
-
images=image_tensor)["logits"][:,-1, self.preferential_ids_]
|
60 |
-
|
61 |
-
return torch.softmax(output_logits, -1) #@ self.weight_tensor
|
62 |
-
|
63 |
-
|
64 |
-
class QAlignAestheticScorer(nn.Module):
|
65 |
-
def __init__(self, pretrained="q-future/one-align", device="cuda:0", tokenizer=None, model=None, image_processor=None):
|
66 |
-
super().__init__()
|
67 |
-
if model is None:
|
68 |
-
tokenizer, model, image_processor, _ = load_pretrained_model(pretrained, None, "mplug_owl2", device=device)
|
69 |
-
prompt = "USER: How would you rate the aesthetics of this image?\n<|image|>\nASSISTANT: The aesthetics of the image is"
|
70 |
-
|
71 |
-
self.preferential_ids_ = [id_[1] for id_ in tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]]
|
72 |
-
self.weight_tensor = torch.Tensor([1,0.75,0.5,0.25,0.]).half().to(model.device)
|
73 |
-
|
74 |
-
self.tokenizer = tokenizer
|
75 |
-
self.model = model
|
76 |
-
self.image_processor = image_processor
|
77 |
-
self.input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
|
78 |
-
|
79 |
-
def expand2square(self, pil_img, background_color):
|
80 |
-
width, height = pil_img.size
|
81 |
-
if width == height:
|
82 |
-
return pil_img
|
83 |
-
elif width > height:
|
84 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
85 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
86 |
-
return result
|
87 |
-
else:
|
88 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
89 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
90 |
-
return result
|
91 |
-
|
92 |
-
def forward(self, image: List[Image.Image]):
|
93 |
-
image = [self.expand2square(img, tuple(int(x*255) for x in self.image_processor.image_mean)) for img in image]
|
94 |
-
with torch.inference_mode():
|
95 |
-
image_tensor = self.image_processor.preprocess(image, return_tensors="pt")["pixel_values"].half().to(self.model.device)
|
96 |
-
output_logits = self.model(self.input_ids.repeat(image_tensor.shape[0], 1),
|
97 |
-
images=image_tensor)["logits"][:,-1, self.preferential_ids_]
|
98 |
-
|
99 |
-
return torch.softmax(output_logits, -1) #@ self.weight_tensor
|
100 |
-
|
101 |
-
class QAlignVideoScorer(nn.Module):
|
102 |
-
def __init__(self, pretrained="q-future/one-align", device="cuda:0", tokenizer=None, model=None, image_processor=None):
|
103 |
-
super().__init__()
|
104 |
-
if model is None:
|
105 |
-
tokenizer, model, image_processor, _ = load_pretrained_model(pretrained, None, "mplug_owl2", device=device)
|
106 |
-
prompt = "USER: How would you rate the quality of this video?\n<|image|>\nASSISTANT: The quality of the video is"
|
107 |
-
|
108 |
-
self.preferential_ids_ = [id_[1] for id_ in tokenizer(["excellent","good","fair","poor","bad"])["input_ids"]]
|
109 |
-
self.weight_tensor = torch.Tensor([1,0.75,0.5,0.25,0.]).half().to(model.device)
|
110 |
-
|
111 |
-
self.tokenizer = tokenizer
|
112 |
-
self.model = model
|
113 |
-
self.image_processor = image_processor
|
114 |
-
self.input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
|
115 |
-
|
116 |
-
def expand2square(self, pil_img, background_color):
|
117 |
-
width, height = pil_img.size
|
118 |
-
if width == height:
|
119 |
-
return pil_img
|
120 |
-
elif width > height:
|
121 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
122 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
123 |
-
return result
|
124 |
-
else:
|
125 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
126 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
127 |
-
return result
|
128 |
-
|
129 |
-
def forward(self, video: List[List[Image.Image]]):
|
130 |
-
video = [[self.expand2square(frame, tuple(int(x*255) for x in self.image_processor.image_mean)) for frame in vid] for vid in video]
|
131 |
-
with torch.inference_mode():
|
132 |
-
video_tensors = [self.image_processor.preprocess(vid, return_tensors="pt")["pixel_values"].half().to(self.model.device) for vid in video]
|
133 |
-
output_logits = self.model(self.input_ids.repeat(len(video_tensors), 1),
|
134 |
-
images=video_tensors)["logits"][:,-1, self.preferential_ids_]
|
135 |
-
return torch.softmax(output_logits, -1) #@ self.weight_tensor
|
136 |
-
|
137 |
-
|
138 |
-
if __name__ == "__main__":
|
139 |
-
import argparse
|
140 |
-
|
141 |
-
parser = argparse.ArgumentParser()
|
142 |
-
parser.add_argument("--model-path", type=str, default="q-future/one-align")
|
143 |
-
parser.add_argument("--device", type=str, default="cuda:0")
|
144 |
-
parser.add_argument("--img_path", type=str, default="fig/singapore_flyer.jpg")
|
145 |
-
parser.add_argument("--aesthetic", action="store_true")
|
146 |
-
parser.add_argument("--video", action="store_true")
|
147 |
-
args = parser.parse_args()
|
148 |
-
|
149 |
-
if args.video:
|
150 |
-
scorer = QAlignVideoScorer(pretrained=args.model_path, device=args.device)
|
151 |
-
print(scorer([load_video(args.img_path)]).tolist())
|
152 |
-
else:
|
153 |
-
scorer = QAlignScorer(pretrained=args.model_path, device=args.device) if not args.aesthetic else QAlignAestheticScorer(pretrained=args.model_path, device=args.device)
|
154 |
-
print(scorer([Image.open(args.img_path)]).tolist())
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_align/evaluate/.ipynb_checkpoints/vqa_eval-checkpoint.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
5 |
-
from q_align.conversation import conv_templates, SeparatorStyle
|
6 |
-
from q_align.model.builder import load_pretrained_model
|
7 |
-
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
|
8 |
-
|
9 |
-
from PIL import Image
|
10 |
-
|
11 |
-
import requests
|
12 |
-
from PIL import Image
|
13 |
-
from io import BytesIO
|
14 |
-
from transformers import TextStreamer
|
15 |
-
|
16 |
-
|
17 |
-
from scipy.stats import spearmanr, pearsonr
|
18 |
-
|
19 |
-
import json
|
20 |
-
from tqdm import tqdm
|
21 |
-
from collections import defaultdict
|
22 |
-
|
23 |
-
import os
|
24 |
-
|
25 |
-
def wa5(logits):
|
26 |
-
import numpy as np
|
27 |
-
logprobs = np.array([logits["excellent"], logits["good"], logits["fair"], logits["poor"], logits["bad"]])
|
28 |
-
probs = np.exp(logprobs) / np.sum(np.exp(logprobs))
|
29 |
-
return np.inner(probs, np.array([1,0.75,0.5,0.25,0.]))
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
def disable_torch_init():
|
34 |
-
"""
|
35 |
-
Disable the redundant torch default initialization to accelerate model creation.
|
36 |
-
"""
|
37 |
-
import torch
|
38 |
-
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
39 |
-
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
40 |
-
|
41 |
-
|
42 |
-
def load_video(video_file):
|
43 |
-
from decord import VideoReader
|
44 |
-
vr = VideoReader(video_file)
|
45 |
-
|
46 |
-
# Get video frame rate
|
47 |
-
fps = vr.get_avg_fps()
|
48 |
-
|
49 |
-
# Calculate frame indices for 1fps
|
50 |
-
frame_indices = [int(fps * i) for i in range(int(len(vr) / fps))]
|
51 |
-
frames = vr.get_batch(frame_indices).asnumpy()
|
52 |
-
return [Image.fromarray(frames[i]) for i in range(int(len(vr) / fps))]
|
53 |
-
|
54 |
-
|
55 |
-
def main(args):
|
56 |
-
# Model
|
57 |
-
disable_torch_init()
|
58 |
-
|
59 |
-
model_name = get_model_name_from_path(args.model_path)
|
60 |
-
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
|
61 |
-
|
62 |
-
|
63 |
-
import json
|
64 |
-
|
65 |
-
|
66 |
-
image_paths = [
|
67 |
-
#"playground/data/",
|
68 |
-
#"playground/data/",
|
69 |
-
"playground/data/KoNViD_1k_videos/",
|
70 |
-
"playground/data/maxwell/",
|
71 |
-
|
72 |
-
]
|
73 |
-
|
74 |
-
json_prefix = "playground/data/test_jsons/"
|
75 |
-
jsons = [
|
76 |
-
#json_prefix + "test_lsvq.json",
|
77 |
-
#json_prefix + "test_lsvq_1080p.json",
|
78 |
-
json_prefix + "konvid.json",
|
79 |
-
json_prefix + "maxwell_test.json",
|
80 |
-
]
|
81 |
-
|
82 |
-
os.makedirs(f"results/{args.model_path}/", exist_ok=True)
|
83 |
-
|
84 |
-
|
85 |
-
conv_mode = "mplug_owl2"
|
86 |
-
|
87 |
-
inp = "How would you rate the quality of this video?"
|
88 |
-
|
89 |
-
conv = conv_templates[conv_mode].copy()
|
90 |
-
inp = inp + "\n" + DEFAULT_IMAGE_TOKEN
|
91 |
-
conv.append_message(conv.roles[0], inp)
|
92 |
-
image = None
|
93 |
-
|
94 |
-
conv.append_message(conv.roles[1], None)
|
95 |
-
prompt = conv.get_prompt() + " The quality of the video is"
|
96 |
-
|
97 |
-
toks = ["good", "poor", "high", "fair", "low", "excellent", "bad", "fine", "moderate", "decent", "average", "medium", "acceptable"]
|
98 |
-
print(toks)
|
99 |
-
ids_ = [id_[1] for id_ in tokenizer(toks)["input_ids"]]
|
100 |
-
print(ids_)
|
101 |
-
|
102 |
-
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(args.device)
|
103 |
-
|
104 |
-
for image_path, json_ in zip(image_paths, jsons):
|
105 |
-
with open(json_) as f:
|
106 |
-
iqadata = json.load(f)
|
107 |
-
prs, gts = [], []
|
108 |
-
for i, llddata in enumerate(tqdm(iqadata, desc="Evaluating [{}]".format(json_.split("/")[-1]))):
|
109 |
-
try:
|
110 |
-
try:
|
111 |
-
filename = llddata["img_path"]
|
112 |
-
except:
|
113 |
-
filename = llddata["image"]
|
114 |
-
llddata["logits"] = defaultdict(float)
|
115 |
-
|
116 |
-
image = load_video(image_path + filename)
|
117 |
-
def expand2square(pil_img, background_color):
|
118 |
-
width, height = pil_img.size
|
119 |
-
if width == height:
|
120 |
-
return pil_img
|
121 |
-
elif width > height:
|
122 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
123 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
124 |
-
return result
|
125 |
-
else:
|
126 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
127 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
128 |
-
return result
|
129 |
-
image = [expand2square(img, tuple(int(x*255) for x in image_processor.image_mean)) for img in image]
|
130 |
-
image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().to(args.device)
|
131 |
-
|
132 |
-
if True:
|
133 |
-
with torch.inference_mode():
|
134 |
-
output_logits = model(input_ids,
|
135 |
-
images=[image_tensor])["logits"][:,-1]
|
136 |
-
for tok, id_ in zip(toks, ids_):
|
137 |
-
llddata["logits"][tok] += output_logits.mean(0)[id_].item()
|
138 |
-
llddata["score"] = wa5(llddata["logits"])
|
139 |
-
# print(llddata)
|
140 |
-
prs.append(llddata["score"])
|
141 |
-
gts.append(llddata["gt_score"])
|
142 |
-
# print(llddata)
|
143 |
-
json_ = json_.replace("combined/", "combined-")
|
144 |
-
with open(f"results/{args.model_path}/2{json_.split('/')[-1]}", "a") as wf:
|
145 |
-
json.dump(llddata, wf)
|
146 |
-
|
147 |
-
if i > 0 and i % 200 == 0:
|
148 |
-
print(spearmanr(prs,gts)[0], pearsonr(prs,gts)[0])
|
149 |
-
except:
|
150 |
-
continue
|
151 |
-
print("Spearmanr", spearmanr(prs,gts)[0], "Pearson", pearsonr(prs,gts)[0])
|
152 |
-
|
153 |
-
|
154 |
-
if __name__ == "__main__":
|
155 |
-
parser = argparse.ArgumentParser()
|
156 |
-
parser.add_argument("--model-path", type=str, default="q-future/one-align")
|
157 |
-
parser.add_argument("--model-base", type=str, default=None)
|
158 |
-
parser.add_argument("--device", type=str, default="cuda:0")
|
159 |
-
parser.add_argument("--conv-mode", type=str, default=None)
|
160 |
-
parser.add_argument("--temperature", type=float, default=0.2)
|
161 |
-
parser.add_argument("--max-new-tokens", type=int, default=512)
|
162 |
-
parser.add_argument("--load-8bit", action="store_true")
|
163 |
-
parser.add_argument("--load-4bit", action="store_true")
|
164 |
-
parser.add_argument("--debug", action="store_true")
|
165 |
-
parser.add_argument("--image-aspect-ratio", type=str, default='pad')
|
166 |
-
args = parser.parse_args()
|
167 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_align/train/.ipynb_checkpoints/train-checkpoint.py
DELETED
@@ -1,844 +0,0 @@
|
|
1 |
-
# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
|
2 |
-
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
|
3 |
-
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import os
|
18 |
-
import copy
|
19 |
-
from dataclasses import dataclass, field
|
20 |
-
import json
|
21 |
-
import logging
|
22 |
-
import pathlib
|
23 |
-
from typing import Dict, Optional, Sequence, List
|
24 |
-
|
25 |
-
from PIL import ImageFile
|
26 |
-
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
27 |
-
|
28 |
-
import torch
|
29 |
-
|
30 |
-
import transformers
|
31 |
-
from transformers.models.clip.image_processing_clip import CLIPImageProcessor
|
32 |
-
|
33 |
-
from torch.utils.data import Dataset
|
34 |
-
from q_align.train.mplug_owl2_trainer import MPLUGOwl2Trainer
|
35 |
-
from q_align.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
36 |
-
|
37 |
-
from q_align import conversation as conversation_lib
|
38 |
-
from q_align.model import *
|
39 |
-
from q_align.mm_utils import tokenizer_image_token
|
40 |
-
|
41 |
-
from PIL import Image
|
42 |
-
from icecream import ic
|
43 |
-
|
44 |
-
local_rank = None
|
45 |
-
|
46 |
-
|
47 |
-
def rank0_print(*args):
|
48 |
-
if local_rank == 0:
|
49 |
-
print(*args)
|
50 |
-
|
51 |
-
|
52 |
-
@dataclass
|
53 |
-
class ModelArguments:
|
54 |
-
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
55 |
-
version: Optional[str] = field(default="v0")
|
56 |
-
freeze_backbone: bool = field(default=False)
|
57 |
-
|
58 |
-
@dataclass
|
59 |
-
class DataArguments:
|
60 |
-
data_path: str = field(default=None,
|
61 |
-
metadata={"help": "Path to the training data."})
|
62 |
-
lazy_preprocess: bool = False
|
63 |
-
is_multimodal: bool = False
|
64 |
-
image_folder: Optional[str] = field(default=None)
|
65 |
-
image_aspect_ratio: str = 'square'
|
66 |
-
image_grid_pinpoints: Optional[str] = field(default=None)
|
67 |
-
|
68 |
-
|
69 |
-
@dataclass
|
70 |
-
class TrainingArguments(transformers.TrainingArguments):
|
71 |
-
cache_dir: Optional[str] = field(default=None)
|
72 |
-
optim: str = field(default="adamw_torch")
|
73 |
-
remove_unused_columns: bool = field(default=False)
|
74 |
-
|
75 |
-
tune_visual_abstractor: bool = field(default=True)
|
76 |
-
freeze_vision_model: bool = field(default=True)
|
77 |
-
|
78 |
-
model_max_length: int = field(
|
79 |
-
default=512,
|
80 |
-
metadata={
|
81 |
-
"help":
|
82 |
-
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
83 |
-
},
|
84 |
-
)
|
85 |
-
double_quant: bool = field(
|
86 |
-
default=True,
|
87 |
-
metadata={"help": "Compress the quantization statistics through double quantization."}
|
88 |
-
)
|
89 |
-
quant_type: str = field(
|
90 |
-
default="nf4",
|
91 |
-
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
|
92 |
-
)
|
93 |
-
bits: int = field(
|
94 |
-
default=16,
|
95 |
-
metadata={"help": "How many bits to use."}
|
96 |
-
)
|
97 |
-
lora_enable: bool = False
|
98 |
-
lora_r: int = 64
|
99 |
-
lora_alpha: int = 16
|
100 |
-
lora_dropout: float = 0.05
|
101 |
-
lora_weight_path: str = ""
|
102 |
-
lora_bias: str = "none"
|
103 |
-
visual_abstractor_lr: Optional[float] = None
|
104 |
-
group_by_modality_length: bool = field(default=False)
|
105 |
-
|
106 |
-
|
107 |
-
def maybe_zero_3(param, ignore_status=False, name=None):
|
108 |
-
from deepspeed import zero
|
109 |
-
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
|
110 |
-
if hasattr(param, "ds_id"):
|
111 |
-
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
|
112 |
-
if not ignore_status:
|
113 |
-
logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
|
114 |
-
with zero.GatheredParameters([param]):
|
115 |
-
param = param.data.detach().cpu().clone()
|
116 |
-
else:
|
117 |
-
param = param.detach().cpu().clone()
|
118 |
-
return param
|
119 |
-
|
120 |
-
|
121 |
-
# Borrowed from peft.utils.get_peft_model_state_dict
|
122 |
-
def get_peft_state_maybe_zero_3(named_params, bias):
|
123 |
-
if bias == "none":
|
124 |
-
to_return = {k: t for k, t in named_params if "lora_" in k}
|
125 |
-
elif bias == "all":
|
126 |
-
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
|
127 |
-
elif bias == "lora_only":
|
128 |
-
to_return = {}
|
129 |
-
maybe_lora_bias = {}
|
130 |
-
lora_bias_names = set()
|
131 |
-
for k, t in named_params:
|
132 |
-
if "lora_" in k:
|
133 |
-
to_return[k] = t
|
134 |
-
bias_name = k.split("lora_")[0] + "bias"
|
135 |
-
lora_bias_names.add(bias_name)
|
136 |
-
elif "bias" in k:
|
137 |
-
maybe_lora_bias[k] = t
|
138 |
-
for k, t in maybe_lora_bias:
|
139 |
-
if bias_name in lora_bias_names:
|
140 |
-
to_return[bias_name] = t
|
141 |
-
else:
|
142 |
-
raise NotImplementedError
|
143 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}
|
144 |
-
return to_return
|
145 |
-
|
146 |
-
|
147 |
-
def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
|
148 |
-
to_return = {k: t for k, t in named_params if "lora_" not in k}
|
149 |
-
if require_grad_only:
|
150 |
-
to_return = {k: t for k, t in to_return.items() if t.requires_grad}
|
151 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
|
152 |
-
return to_return
|
153 |
-
|
154 |
-
|
155 |
-
def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
|
156 |
-
to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
|
157 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
|
158 |
-
return to_return
|
159 |
-
|
160 |
-
|
161 |
-
def find_all_linear_names(model):
|
162 |
-
cls = torch.nn.Linear
|
163 |
-
lora_module_names = set()
|
164 |
-
multimodal_keywords = ['vision_model', 'visual_abstractor']
|
165 |
-
for name, module in model.named_modules():
|
166 |
-
if any(mm_keyword in name for mm_keyword in multimodal_keywords):
|
167 |
-
continue
|
168 |
-
if isinstance(module, cls):
|
169 |
-
lora_module_names.add(name)
|
170 |
-
|
171 |
-
if 'lm_head' in lora_module_names: # needed for 16-bit
|
172 |
-
lora_module_names.remove('lm_head')
|
173 |
-
return list(lora_module_names)
|
174 |
-
|
175 |
-
|
176 |
-
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
|
177 |
-
output_dir: str):
|
178 |
-
"""Collects the state dict and dump to disk."""
|
179 |
-
|
180 |
-
if trainer.deepspeed:
|
181 |
-
torch.cuda.synchronize()
|
182 |
-
trainer.save_model(output_dir)
|
183 |
-
return
|
184 |
-
|
185 |
-
state_dict = trainer.model.state_dict()
|
186 |
-
if trainer.args.should_save:
|
187 |
-
cpu_state_dict = {
|
188 |
-
key: value.cpu()
|
189 |
-
for key, value in state_dict.items()
|
190 |
-
}
|
191 |
-
del state_dict
|
192 |
-
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
|
193 |
-
|
194 |
-
|
195 |
-
def smart_tokenizer_and_embedding_resize(
|
196 |
-
special_tokens_dict: Dict,
|
197 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
198 |
-
model: transformers.PreTrainedModel,
|
199 |
-
):
|
200 |
-
"""Resize tokenizer and embedding.
|
201 |
-
|
202 |
-
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
|
203 |
-
"""
|
204 |
-
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
|
205 |
-
model.resize_token_embeddings(len(tokenizer))
|
206 |
-
|
207 |
-
if num_new_tokens > 0:
|
208 |
-
input_embeddings = model.get_input_embeddings().weight.data
|
209 |
-
output_embeddings = model.get_output_embeddings().weight.data
|
210 |
-
|
211 |
-
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
|
212 |
-
dim=0, keepdim=True)
|
213 |
-
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
|
214 |
-
dim=0, keepdim=True)
|
215 |
-
|
216 |
-
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
217 |
-
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
218 |
-
|
219 |
-
|
220 |
-
def _tokenize_fn(strings: Sequence[str],
|
221 |
-
tokenizer: transformers.PreTrainedTokenizer) -> Dict:
|
222 |
-
"""Tokenize a list of strings."""
|
223 |
-
tokenized_list = [
|
224 |
-
tokenizer(
|
225 |
-
text,
|
226 |
-
return_tensors="pt",
|
227 |
-
padding="longest",
|
228 |
-
max_length=tokenizer.model_max_length,
|
229 |
-
truncation=True,
|
230 |
-
) for text in strings
|
231 |
-
]
|
232 |
-
input_ids = labels = [
|
233 |
-
tokenized.input_ids[0] for tokenized in tokenized_list
|
234 |
-
]
|
235 |
-
input_ids_lens = labels_lens = [
|
236 |
-
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
|
237 |
-
for tokenized in tokenized_list
|
238 |
-
]
|
239 |
-
return dict(
|
240 |
-
input_ids=input_ids,
|
241 |
-
labels=labels,
|
242 |
-
input_ids_lens=input_ids_lens,
|
243 |
-
labels_lens=labels_lens,
|
244 |
-
)
|
245 |
-
|
246 |
-
|
247 |
-
def _mask_targets(target, tokenized_lens, speakers):
|
248 |
-
# cur_idx = 0
|
249 |
-
cur_idx = tokenized_lens[0]
|
250 |
-
tokenized_lens = tokenized_lens[1:]
|
251 |
-
target[:cur_idx] = IGNORE_INDEX
|
252 |
-
for tokenized_len, speaker in zip(tokenized_lens, speakers):
|
253 |
-
if speaker == "human":
|
254 |
-
target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX
|
255 |
-
cur_idx += tokenized_len
|
256 |
-
|
257 |
-
|
258 |
-
def _add_speaker_and_signal(header, source, get_conversation=True):
|
259 |
-
"""Add speaker and start/end signal on each round."""
|
260 |
-
BEGIN_SIGNAL = "### "
|
261 |
-
END_SIGNAL = "\n"
|
262 |
-
conversation = header
|
263 |
-
for sentence in source:
|
264 |
-
from_str = sentence["from"]
|
265 |
-
if from_str.lower() == "human":
|
266 |
-
from_str = conversation_lib.default_conversation.roles[0]
|
267 |
-
elif from_str.lower() == "gpt":
|
268 |
-
from_str = conversation_lib.default_conversation.roles[1]
|
269 |
-
else:
|
270 |
-
from_str = 'unknown'
|
271 |
-
sentence["value"] = (BEGIN_SIGNAL + from_str + ": " +
|
272 |
-
sentence["value"] + END_SIGNAL)
|
273 |
-
if get_conversation:
|
274 |
-
conversation += sentence["value"]
|
275 |
-
conversation += BEGIN_SIGNAL
|
276 |
-
return conversation
|
277 |
-
|
278 |
-
|
279 |
-
def preprocess_multimodal(
|
280 |
-
sources: Sequence[str],
|
281 |
-
data_args: DataArguments
|
282 |
-
) -> Dict:
|
283 |
-
is_multimodal = data_args.is_multimodal
|
284 |
-
if not is_multimodal:
|
285 |
-
return sources
|
286 |
-
|
287 |
-
for source in sources:
|
288 |
-
for sentence in source:
|
289 |
-
if DEFAULT_IMAGE_TOKEN in sentence['value']:
|
290 |
-
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
|
291 |
-
sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value']
|
292 |
-
sentence['value'] = sentence['value'].strip()
|
293 |
-
|
294 |
-
replace_token = DEFAULT_IMAGE_TOKEN
|
295 |
-
sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
|
296 |
-
|
297 |
-
return sources
|
298 |
-
|
299 |
-
|
300 |
-
def preprocess_v1(
|
301 |
-
sources,
|
302 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
303 |
-
has_image: bool = False
|
304 |
-
) -> Dict:
|
305 |
-
conv = conversation_lib.default_conversation.copy()
|
306 |
-
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
|
307 |
-
|
308 |
-
# Apply prompt templates
|
309 |
-
conversations = []
|
310 |
-
for i, source in enumerate(sources):
|
311 |
-
if roles[source[0]["from"]] != conv.roles[0]:
|
312 |
-
# Skip the first one if it is not from human
|
313 |
-
source = source[1:]
|
314 |
-
|
315 |
-
conv.messages = []
|
316 |
-
for j, sentence in enumerate(source):
|
317 |
-
role = roles[sentence["from"]]
|
318 |
-
assert role == conv.roles[j % 2], f"{i}"
|
319 |
-
conv.append_message(role, sentence["value"])
|
320 |
-
conversations.append(conv.get_prompt())
|
321 |
-
|
322 |
-
# Tokenize conversations
|
323 |
-
|
324 |
-
if has_image:
|
325 |
-
input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
|
326 |
-
else:
|
327 |
-
input_ids = tokenizer(
|
328 |
-
conversations,
|
329 |
-
return_tensors="pt",
|
330 |
-
padding="longest",
|
331 |
-
max_length=tokenizer.model_max_length,
|
332 |
-
truncation=True,
|
333 |
-
).input_ids
|
334 |
-
|
335 |
-
targets = input_ids.clone()
|
336 |
-
|
337 |
-
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO or conv.sep_style == conversation_lib.SeparatorStyle.TWO_NO_SYS
|
338 |
-
|
339 |
-
# Mask targets
|
340 |
-
sep = conv.sep + conv.roles[1] + ": "
|
341 |
-
for conversation, target in zip(conversations, targets):
|
342 |
-
total_len = int(target.ne(tokenizer.pad_token_id).sum())
|
343 |
-
|
344 |
-
rounds = conversation.split(conv.sep2)
|
345 |
-
cur_len = 1
|
346 |
-
target[:cur_len] = IGNORE_INDEX
|
347 |
-
for i, rou in enumerate(rounds):
|
348 |
-
if rou == "":
|
349 |
-
break
|
350 |
-
|
351 |
-
parts = rou.split(sep)
|
352 |
-
if len(parts) != 2:
|
353 |
-
break
|
354 |
-
parts[0] += sep
|
355 |
-
|
356 |
-
if has_image:
|
357 |
-
round_len = len(tokenizer_image_token(rou, tokenizer))
|
358 |
-
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
|
359 |
-
else:
|
360 |
-
round_len = len(tokenizer(rou).input_ids)
|
361 |
-
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
|
362 |
-
|
363 |
-
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
|
364 |
-
|
365 |
-
cur_len += round_len
|
366 |
-
target[cur_len:] = IGNORE_INDEX
|
367 |
-
|
368 |
-
if cur_len < tokenizer.model_max_length:
|
369 |
-
if cur_len != total_len:
|
370 |
-
target[:] = IGNORE_INDEX
|
371 |
-
print(
|
372 |
-
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
|
373 |
-
f" (ignored)"
|
374 |
-
)
|
375 |
-
|
376 |
-
return dict(
|
377 |
-
input_ids=input_ids,
|
378 |
-
labels=targets,
|
379 |
-
)
|
380 |
-
|
381 |
-
|
382 |
-
def preprocess_plain(
|
383 |
-
sources: Sequence[str],
|
384 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
385 |
-
) -> Dict:
|
386 |
-
# add end signal and concatenate together
|
387 |
-
conversations = []
|
388 |
-
for source in sources:
|
389 |
-
assert len(source) == 2
|
390 |
-
assert DEFAULT_IMAGE_TOKEN in source[0]['value']
|
391 |
-
source[0]['value'] = DEFAULT_IMAGE_TOKEN
|
392 |
-
conversation = source[0]['value'] + source[1]['value'] + conversation_lib.default_conversation.sep
|
393 |
-
conversations.append(conversation)
|
394 |
-
# tokenize conversations
|
395 |
-
input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
|
396 |
-
targets = copy.deepcopy(input_ids)
|
397 |
-
for target, source in zip(targets, sources):
|
398 |
-
tokenized_len = len(tokenizer_image_token(source[0]['value'], tokenizer))
|
399 |
-
target[:tokenized_len] = IGNORE_INDEX
|
400 |
-
|
401 |
-
return dict(input_ids=input_ids, labels=targets)
|
402 |
-
|
403 |
-
|
404 |
-
def preprocess(
|
405 |
-
sources: Sequence[str],
|
406 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
407 |
-
has_image: bool = False
|
408 |
-
) -> Dict:
|
409 |
-
"""
|
410 |
-
Given a list of sources, each is a conversation list. This transform:
|
411 |
-
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
|
412 |
-
2. Concatenate conversations together;
|
413 |
-
3. Tokenize the concatenated conversation;
|
414 |
-
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
|
415 |
-
"""
|
416 |
-
if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
|
417 |
-
return preprocess_plain(sources, tokenizer)
|
418 |
-
if conversation_lib.default_conversation.version.startswith("v1"):
|
419 |
-
return preprocess_v1(sources, tokenizer, has_image=has_image)
|
420 |
-
# add end signal and concatenate together
|
421 |
-
conversations = []
|
422 |
-
for source in sources:
|
423 |
-
header = f"{conversation_lib.default_conversation.system}\n\n"
|
424 |
-
conversation = _add_speaker_and_signal(header, source)
|
425 |
-
conversations.append(conversation)
|
426 |
-
# tokenize conversations
|
427 |
-
def get_tokenize_len(prompts):
|
428 |
-
return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
|
429 |
-
if has_image:
|
430 |
-
input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
|
431 |
-
else:
|
432 |
-
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
|
433 |
-
input_ids = conversations_tokenized["input_ids"]
|
434 |
-
|
435 |
-
targets = copy.deepcopy(input_ids)
|
436 |
-
for target, source in zip(targets, sources):
|
437 |
-
if has_image:
|
438 |
-
tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
|
439 |
-
else:
|
440 |
-
tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"]
|
441 |
-
speakers = [sentence["from"] for sentence in source]
|
442 |
-
_mask_targets(target, tokenized_lens, speakers)
|
443 |
-
|
444 |
-
return dict(input_ids=input_ids, labels=targets)
|
445 |
-
|
446 |
-
|
447 |
-
def load_video(video_file):
|
448 |
-
from decord import VideoReader
|
449 |
-
vr = VideoReader(video_file)
|
450 |
-
|
451 |
-
# Get video frame rate
|
452 |
-
fps = vr.get_avg_fps()
|
453 |
-
|
454 |
-
# Calculate frame indices for 1fps
|
455 |
-
frame_indices = [int(fps * i) for i in range(int(len(vr) / fps))]
|
456 |
-
frames = vr.get_batch(frame_indices).asnumpy()
|
457 |
-
return [Image.fromarray(frames[i]) for i in range(int(len(vr) / fps))]
|
458 |
-
|
459 |
-
def expand2square(pil_img, background_color):
|
460 |
-
width, height = pil_img.size
|
461 |
-
if width == height:
|
462 |
-
return pil_img
|
463 |
-
elif width > height:
|
464 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
465 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
466 |
-
return result
|
467 |
-
else:
|
468 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
469 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
470 |
-
return result
|
471 |
-
|
472 |
-
class LazySupervisedDataset(Dataset):
|
473 |
-
"""Dataset for supervised fine-tuning."""
|
474 |
-
|
475 |
-
def __init__(self, data_path: str,
|
476 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
477 |
-
data_args: DataArguments):
|
478 |
-
super(LazySupervisedDataset, self).__init__()
|
479 |
-
list_data_dict = json.load(open(data_path, "r"))
|
480 |
-
|
481 |
-
rank0_print("Formatting inputs...Skip in lazy mode")
|
482 |
-
self.tokenizer = tokenizer
|
483 |
-
self.list_data_dict = list_data_dict
|
484 |
-
self.data_args = data_args
|
485 |
-
|
486 |
-
def __len__(self):
|
487 |
-
return len(self.list_data_dict)
|
488 |
-
|
489 |
-
@property
|
490 |
-
def lengths(self):
|
491 |
-
length_list = []
|
492 |
-
for sample in self.list_data_dict:
|
493 |
-
img_tokens = 128 if 'image' in sample else 0
|
494 |
-
length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens)
|
495 |
-
return length_list
|
496 |
-
|
497 |
-
|
498 |
-
@property
|
499 |
-
def modality_lengths(self):
|
500 |
-
length_list = []
|
501 |
-
for sample in self.list_data_dict:
|
502 |
-
cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])
|
503 |
-
cur_len = cur_len if 'image' in sample else -cur_len
|
504 |
-
length_list.append(cur_len)
|
505 |
-
return length_list
|
506 |
-
|
507 |
-
# def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
508 |
-
# sources = self.list_data_dict[i]
|
509 |
-
# if isinstance(i, int):
|
510 |
-
# sources = [sources]
|
511 |
-
# assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
|
512 |
-
# if 'image' in sources[0]:
|
513 |
-
# image_file = self.list_data_dict[i]['image']
|
514 |
-
# image_folder = self.data_args.image_folder
|
515 |
-
# processor = self.data_args.image_processor
|
516 |
-
# image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
|
517 |
-
# if self.data_args.image_aspect_ratio == 'pad':
|
518 |
-
# def expand2square(pil_img, background_color):
|
519 |
-
# width, height = pil_img.size
|
520 |
-
# if width == height:
|
521 |
-
# return pil_img
|
522 |
-
# elif width > height:
|
523 |
-
# result = Image.new(pil_img.mode, (width, width), background_color)
|
524 |
-
# result.paste(pil_img, (0, (width - height) // 2))
|
525 |
-
# return result
|
526 |
-
# else:
|
527 |
-
# result = Image.new(pil_img.mode, (height, height), background_color)
|
528 |
-
# result.paste(pil_img, ((height - width) // 2, 0))
|
529 |
-
# return result
|
530 |
-
# image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
531 |
-
# image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
532 |
-
# else:
|
533 |
-
# image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
534 |
-
# sources = preprocess_multimodal(
|
535 |
-
# copy.deepcopy([e["conversations"] for e in sources]),
|
536 |
-
# self.data_args)
|
537 |
-
# else:
|
538 |
-
# sources = copy.deepcopy([e["conversations"] for e in sources])
|
539 |
-
# data_dict = preprocess(
|
540 |
-
# sources,
|
541 |
-
# self.tokenizer,
|
542 |
-
# has_image=('image' in self.list_data_dict[i]))
|
543 |
-
# if isinstance(i, int):
|
544 |
-
# data_dict = dict(input_ids=data_dict["input_ids"][0],
|
545 |
-
# labels=data_dict["labels"][0])
|
546 |
-
|
547 |
-
# # image exist in the data
|
548 |
-
# if 'image' in self.list_data_dict[i]:
|
549 |
-
# data_dict['image'] = image
|
550 |
-
# elif self.data_args.is_multimodal:
|
551 |
-
# # image does not exist in the data, but the model is multimodal
|
552 |
-
# crop_size = self.data_args.image_processor.crop_size
|
553 |
-
# data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
|
554 |
-
# return data_dict
|
555 |
-
|
556 |
-
def next_rand(self):
|
557 |
-
import random
|
558 |
-
return random.randint(0,len(self)-1)
|
559 |
-
|
560 |
-
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
561 |
-
while True:
|
562 |
-
sources = self.list_data_dict[i]
|
563 |
-
if isinstance(i, int):
|
564 |
-
sources = [sources]
|
565 |
-
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
|
566 |
-
if 'image' in sources[0]:
|
567 |
-
image_file = self.list_data_dict[i]['image']
|
568 |
-
|
569 |
-
image_folder = self.data_args.image_folder
|
570 |
-
processor = self.data_args.image_processor
|
571 |
-
from pathlib import Path
|
572 |
-
#if not Path(os.path.join(image_folder, image_file)).exists():
|
573 |
-
# i = self.next_rand()
|
574 |
-
# continue
|
575 |
-
if isinstance(image_file, list):
|
576 |
-
# Multiple Images as Input
|
577 |
-
try:
|
578 |
-
image = [Image.open(os.path.join(image_folder, imfile)).convert('RGB') for imfile in image_file]
|
579 |
-
except Exception as ex:
|
580 |
-
print(ex)
|
581 |
-
i = self.next_rand()
|
582 |
-
continue
|
583 |
-
if self.data_args.image_aspect_ratio == 'pad':
|
584 |
-
image = [expand2square(img, tuple(int(x*255) for x in processor.image_mean)) for img in image]
|
585 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values']
|
586 |
-
else:
|
587 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values']
|
588 |
-
elif os.path.join(image_folder, image_file).endswith("mp4"):
|
589 |
-
# Video as Input
|
590 |
-
image = load_video(os.path.join(image_folder, image_file))
|
591 |
-
if self.data_args.image_aspect_ratio == 'pad':
|
592 |
-
image = [expand2square(img, tuple(int(x*255) for x in processor.image_mean)) for img in image]
|
593 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values']
|
594 |
-
else:
|
595 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values']
|
596 |
-
else:
|
597 |
-
try:
|
598 |
-
image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
|
599 |
-
except Exception as ex:
|
600 |
-
print(ex)
|
601 |
-
i = self.next_rand()
|
602 |
-
continue
|
603 |
-
if self.data_args.image_aspect_ratio == 'pad':
|
604 |
-
image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
605 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
606 |
-
else:
|
607 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
608 |
-
sources = preprocess_multimodal(
|
609 |
-
copy.deepcopy([e["conversations"] for e in sources]),
|
610 |
-
self.data_args)
|
611 |
-
else:
|
612 |
-
|
613 |
-
sources = copy.deepcopy([e["conversations"] for e in sources])
|
614 |
-
data_dict = preprocess(
|
615 |
-
sources,
|
616 |
-
self.tokenizer,
|
617 |
-
has_image=('image' in self.list_data_dict[i]))
|
618 |
-
if isinstance(i, int):
|
619 |
-
data_dict = dict(input_ids=data_dict["input_ids"][0],
|
620 |
-
labels=data_dict["labels"][0])
|
621 |
-
|
622 |
-
# image exist in the data
|
623 |
-
if 'image' in self.list_data_dict[i]:
|
624 |
-
data_dict['image'] = image
|
625 |
-
elif self.data_args.is_multimodal:
|
626 |
-
# image does not exist in the data, but the model is multimodal
|
627 |
-
crop_size = self.data_args.image_processor.crop_size
|
628 |
-
data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
|
629 |
-
return data_dict
|
630 |
-
|
631 |
-
|
632 |
-
@dataclass
|
633 |
-
class DataCollatorForSupervisedDataset(object):
|
634 |
-
"""Collate examples for supervised fine-tuning."""
|
635 |
-
|
636 |
-
tokenizer: transformers.PreTrainedTokenizer
|
637 |
-
|
638 |
-
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
|
639 |
-
input_ids, labels = tuple([instance[key] for instance in instances]
|
640 |
-
for key in ("input_ids", "labels"))
|
641 |
-
input_ids = torch.nn.utils.rnn.pad_sequence(
|
642 |
-
input_ids,
|
643 |
-
batch_first=True,
|
644 |
-
padding_value=self.tokenizer.pad_token_id)
|
645 |
-
labels = torch.nn.utils.rnn.pad_sequence(labels,
|
646 |
-
batch_first=True,
|
647 |
-
padding_value=IGNORE_INDEX)
|
648 |
-
input_ids = input_ids[:, :self.tokenizer.model_max_length]
|
649 |
-
labels = labels[:, :self.tokenizer.model_max_length]
|
650 |
-
batch = dict(
|
651 |
-
input_ids=input_ids,
|
652 |
-
labels=labels,
|
653 |
-
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
|
654 |
-
)
|
655 |
-
|
656 |
-
if 'image' in instances[0]:
|
657 |
-
images = [instance['image'] for instance in instances]
|
658 |
-
if all(x is not None and x.shape == images[0].shape for x in images):
|
659 |
-
batch['images'] = torch.stack(images)
|
660 |
-
else:
|
661 |
-
batch['images'] = images
|
662 |
-
|
663 |
-
return batch
|
664 |
-
|
665 |
-
|
666 |
-
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
|
667 |
-
data_args) -> Dict:
|
668 |
-
"""Make dataset and collator for supervised fine-tuning."""
|
669 |
-
train_dataset = LazySupervisedDataset(tokenizer=tokenizer,
|
670 |
-
data_path=data_args.data_path,
|
671 |
-
data_args=data_args)
|
672 |
-
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
|
673 |
-
return dict(train_dataset=train_dataset,
|
674 |
-
eval_dataset=None,
|
675 |
-
data_collator=data_collator)
|
676 |
-
|
677 |
-
|
678 |
-
def train():
|
679 |
-
global local_rank
|
680 |
-
|
681 |
-
parser = transformers.HfArgumentParser(
|
682 |
-
(ModelArguments, DataArguments, TrainingArguments))
|
683 |
-
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
684 |
-
local_rank = training_args.local_rank
|
685 |
-
compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
|
686 |
-
|
687 |
-
bnb_model_from_pretrained_args = {}
|
688 |
-
if training_args.bits in [4, 8]:
|
689 |
-
from transformers import BitsAndBytesConfig
|
690 |
-
bnb_model_from_pretrained_args.update(dict(
|
691 |
-
device_map={"": training_args.device},
|
692 |
-
load_in_4bit=training_args.bits == 4,
|
693 |
-
load_in_8bit=training_args.bits == 8,
|
694 |
-
quantization_config=BitsAndBytesConfig(
|
695 |
-
load_in_4bit=training_args.bits == 4,
|
696 |
-
load_in_8bit=training_args.bits == 8,
|
697 |
-
llm_int8_threshold=6.0,
|
698 |
-
llm_int8_has_fp16_weight=False,
|
699 |
-
bnb_4bit_compute_dtype=compute_dtype,
|
700 |
-
bnb_4bit_use_double_quant=training_args.double_quant,
|
701 |
-
bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}
|
702 |
-
)
|
703 |
-
))
|
704 |
-
|
705 |
-
model = MPLUGOwl2LlamaForCausalLM.from_pretrained(
|
706 |
-
model_args.model_name_or_path,
|
707 |
-
cache_dir=training_args.cache_dir,
|
708 |
-
**bnb_model_from_pretrained_args
|
709 |
-
)
|
710 |
-
model.config.use_cache = False
|
711 |
-
|
712 |
-
if model_args.freeze_backbone:
|
713 |
-
model.model.requires_grad_(False)
|
714 |
-
|
715 |
-
if training_args.bits in [4, 8]:
|
716 |
-
from peft import prepare_model_for_kbit_training
|
717 |
-
model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
|
718 |
-
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
|
719 |
-
|
720 |
-
if training_args.gradient_checkpointing:
|
721 |
-
if hasattr(model, "enable_input_require_grads"):
|
722 |
-
model.enable_input_require_grads()
|
723 |
-
else:
|
724 |
-
def make_inputs_require_grad(module, input, output):
|
725 |
-
output.requires_grad_(True)
|
726 |
-
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
727 |
-
|
728 |
-
if training_args.lora_enable:
|
729 |
-
from peft import LoraConfig, get_peft_model
|
730 |
-
lora_config = LoraConfig(
|
731 |
-
r=training_args.lora_r,
|
732 |
-
lora_alpha=training_args.lora_alpha,
|
733 |
-
target_modules=find_all_linear_names(model),
|
734 |
-
lora_dropout=training_args.lora_dropout,
|
735 |
-
bias=training_args.lora_bias,
|
736 |
-
task_type="CAUSAL_LM",
|
737 |
-
)
|
738 |
-
if training_args.bits == 16:
|
739 |
-
if training_args.bf16:
|
740 |
-
model.to(torch.bfloat16)
|
741 |
-
if training_args.fp16:
|
742 |
-
model.to(torch.float16)
|
743 |
-
rank0_print("Adding LoRA adapters...")
|
744 |
-
model = get_peft_model(model, lora_config)
|
745 |
-
|
746 |
-
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
747 |
-
model_args.model_name_or_path,
|
748 |
-
cache_dir=training_args.cache_dir,
|
749 |
-
model_max_length=training_args.model_max_length,
|
750 |
-
padding_side="right",
|
751 |
-
use_fast=False,
|
752 |
-
)
|
753 |
-
|
754 |
-
|
755 |
-
tokenizer.pad_token = tokenizer.unk_token
|
756 |
-
if model_args.version in conversation_lib.conv_templates:
|
757 |
-
conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
|
758 |
-
else:
|
759 |
-
conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"]
|
760 |
-
|
761 |
-
if not training_args.freeze_vision_model and training_args.bits in [4, 8]:
|
762 |
-
model.get_model().vision_model.to(dtype=compute_dtype, device=training_args.device)
|
763 |
-
else:
|
764 |
-
vision_tower = model.get_model().vision_model
|
765 |
-
vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
|
766 |
-
|
767 |
-
if training_args.tune_visual_abstractor and training_args.bits in [4, 8]:
|
768 |
-
model.get_model().visual_abstractor.to(dtype=compute_dtype, device=training_args.device)
|
769 |
-
else:
|
770 |
-
visual_abstractor = model.get_model().visual_abstractor
|
771 |
-
visual_abstractor.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
|
772 |
-
|
773 |
-
data_args.image_processor = CLIPImageProcessor.from_pretrained(model_args.model_name_or_path)
|
774 |
-
data_args.is_multimodal = True
|
775 |
-
|
776 |
-
model.config.image_aspect_ratio = data_args.image_aspect_ratio
|
777 |
-
model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
|
778 |
-
model.config.tune_visual_abstractor = model_args.tune_visual_abstractor = training_args.tune_visual_abstractor
|
779 |
-
ic(training_args.tune_visual_abstractor)
|
780 |
-
model.requires_grad_(True)
|
781 |
-
if training_args.tune_visual_abstractor:
|
782 |
-
# model.requires_grad_(False)
|
783 |
-
for p in model.get_model().visual_abstractor.parameters():
|
784 |
-
p.requires_grad = True
|
785 |
-
|
786 |
-
model.config.freeze_vision_model = training_args.freeze_vision_model
|
787 |
-
ic(training_args.freeze_vision_model)
|
788 |
-
if training_args.freeze_vision_model:
|
789 |
-
for p in model.get_model().vision_model.parameters():
|
790 |
-
p.requires_grad = False
|
791 |
-
|
792 |
-
model.config.visual_abstractor_lr = training_args.visual_abstractor_lr
|
793 |
-
|
794 |
-
|
795 |
-
if training_args.bits in [4, 8]:
|
796 |
-
from peft.tuners.lora import LoraLayer
|
797 |
-
for name, module in model.named_modules():
|
798 |
-
if isinstance(module, LoraLayer):
|
799 |
-
if training_args.bf16:
|
800 |
-
module = module.to(torch.bfloat16)
|
801 |
-
if 'norm' in name:
|
802 |
-
module = module.to(torch.float32)
|
803 |
-
if 'lm_head' in name or 'embed_tokens' in name:
|
804 |
-
if hasattr(module, 'weight'):
|
805 |
-
if training_args.bf16 and module.weight.dtype == torch.float32:
|
806 |
-
module = module.to(torch.bfloat16)
|
807 |
-
|
808 |
-
data_module = make_supervised_data_module(tokenizer=tokenizer,
|
809 |
-
data_args=data_args)
|
810 |
-
trainer = MPLUGOwl2Trainer(model=model,
|
811 |
-
tokenizer=tokenizer,
|
812 |
-
args=training_args,
|
813 |
-
**data_module)
|
814 |
-
|
815 |
-
# if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
|
816 |
-
# trainer.train(resume_from_checkpoint=True)
|
817 |
-
# else:
|
818 |
-
# trainer.train()
|
819 |
-
|
820 |
-
# TODO I dont like auto resume << REMOVE IT AND UNCOMMENT THE ABOVE CODE
|
821 |
-
trainer.train()
|
822 |
-
|
823 |
-
trainer.save_state()
|
824 |
-
|
825 |
-
model.config.use_cache = True
|
826 |
-
|
827 |
-
if training_args.lora_enable:
|
828 |
-
state_dict = get_peft_state_maybe_zero_3(
|
829 |
-
model.named_parameters(), training_args.lora_bias
|
830 |
-
)
|
831 |
-
non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
|
832 |
-
model.named_parameters()
|
833 |
-
)
|
834 |
-
if training_args.local_rank == 0 or training_args.local_rank == -1:
|
835 |
-
model.config.save_pretrained(training_args.output_dir)
|
836 |
-
model.save_pretrained(training_args.output_dir, state_dict=state_dict)
|
837 |
-
torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, 'non_lora_trainables.bin'))
|
838 |
-
else:
|
839 |
-
safe_save_model_for_hf_trainer(trainer=trainer,
|
840 |
-
output_dir=training_args.output_dir)
|
841 |
-
|
842 |
-
|
843 |
-
if __name__ == "__main__":
|
844 |
-
train()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|