Spaces:
Runtime error
Runtime error
Francisco Santos
commited on
Commit
•
728c2e4
1
Parent(s):
6fe978f
running on cpu
Browse files- app.py +8 -8
- chartqa_example.png +0 -0
- docvqa_example.png +0 -3
- gradio_cached_examples/14/log.csv +2 -0
- gradio_cached_examples/24/log.csv +2 -0
- gradio_cached_examples/34/log.csv +2 -0
- gradio_cached_examples/44/log.csv +2 -0
- infographics_example.jpeg +0 -0
- requirements.txt +0 -3
- screen2words_ui_example.png +0 -0
app.py
CHANGED
@@ -6,39 +6,39 @@ import spaces
|
|
6 |
|
7 |
@spaces.GPU
|
8 |
def infer_infographics(image, question):
|
9 |
-
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-ai2d-base")
|
10 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-ai2d-base")
|
11 |
|
12 |
-
inputs = processor(images=image, text=question, return_tensors="pt")
|
13 |
|
14 |
predictions = model.generate(**inputs)
|
15 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
16 |
|
17 |
@spaces.GPU
|
18 |
def infer_ui(image, question):
|
19 |
-
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-screen2words-base")
|
20 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-screen2words-base")
|
21 |
|
22 |
-
inputs = processor(images=image,text=question, return_tensors="pt")
|
23 |
|
24 |
predictions = model.generate(**inputs)
|
25 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
26 |
|
27 |
@spaces.GPU
|
28 |
def infer_chart(image, question):
|
29 |
-
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-chartqa-base")
|
30 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-chartqa-base")
|
31 |
|
32 |
-
inputs = processor(images=image, text=question, return_tensors="pt")
|
33 |
|
34 |
predictions = model.generate(**inputs)
|
35 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
36 |
|
37 |
@spaces.GPU
|
38 |
def infer_doc(image, question):
|
39 |
-
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-docvqa-base")
|
40 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-docvqa-base")
|
41 |
-
inputs = processor(images=image, text=question, return_tensors="pt")
|
42 |
predictions = model.generate(**inputs)
|
43 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
44 |
|
|
|
6 |
|
7 |
@spaces.GPU
|
8 |
def infer_infographics(image, question):
|
9 |
+
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-ai2d-base")
|
10 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-ai2d-base")
|
11 |
|
12 |
+
inputs = processor(images=image, text=question, return_tensors="pt")
|
13 |
|
14 |
predictions = model.generate(**inputs)
|
15 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
16 |
|
17 |
@spaces.GPU
|
18 |
def infer_ui(image, question):
|
19 |
+
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-screen2words-base")
|
20 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-screen2words-base")
|
21 |
|
22 |
+
inputs = processor(images=image,text=question, return_tensors="pt")
|
23 |
|
24 |
predictions = model.generate(**inputs)
|
25 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
26 |
|
27 |
@spaces.GPU
|
28 |
def infer_chart(image, question):
|
29 |
+
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-chartqa-base")
|
30 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-chartqa-base")
|
31 |
|
32 |
+
inputs = processor(images=image, text=question, return_tensors="pt")
|
33 |
|
34 |
predictions = model.generate(**inputs)
|
35 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
36 |
|
37 |
@spaces.GPU
|
38 |
def infer_doc(image, question):
|
39 |
+
model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-docvqa-base")
|
40 |
processor = Pix2StructProcessor.from_pretrained("google/pix2struct-docvqa-base")
|
41 |
+
inputs = processor(images=image, text=question, return_tensors="pt")
|
42 |
predictions = model.generate(**inputs)
|
43 |
return processor.decode(predictions[0], skip_special_tokens=True)
|
44 |
|
chartqa_example.png
DELETED
Binary file (27.5 kB)
|
|
docvqa_example.png
DELETED
Git LFS Details
|
gradio_cached_examples/14/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Answer,flag,username,timestamp
|
2 |
+
5,,,2024-04-02 21:26:18.615723
|
gradio_cached_examples/24/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Answer,flag,username,timestamp
|
2 |
+
Vegetarianism,,,2024-04-02 21:27:33.264957
|
gradio_cached_examples/34/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Caption,flag,username,timestamp
|
2 |
+
Your statistics,,,2024-04-02 21:27:38.150716
|
gradio_cached_examples/44/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
Caption,flag,username,timestamp
|
2 |
+
40%,,,2024-04-02 21:27:43.219638
|
infographics_example.jpeg
DELETED
Binary file (10.3 kB)
|
|
requirements.txt
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
transformers
|
2 |
-
torch==2.0.1
|
3 |
-
spaces
|
|
|
|
|
|
|
|
screen2words_ui_example.png
DELETED
Binary file (82.8 kB)
|
|