Spaces:
Running
on
Zero
Running
on
Zero
DongfuJiang
commited on
Commit
•
322dcb3
1
Parent(s):
742d7b5
update
Browse files
app.py
CHANGED
@@ -60,13 +60,14 @@ Your evaluation output:
|
|
60 |
# )
|
61 |
scorer = TIGERScorer(model_name="TIGER-Lab/TIGERScore-13B")
|
62 |
|
|
|
63 |
def generate_text_hf(input_context, generation_instruction, hypo_output, max_new_tokens=1024, temperature=0.7, top_p=1.0):
|
64 |
global scorer
|
65 |
scorer.model = scorer.model.to("cuda")
|
66 |
|
67 |
for output in scorer.generate_stream(generation_instruction, hypo_output, input_context, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p):
|
68 |
yield output
|
69 |
-
|
70 |
def generate_text_llamacpp(input_context, generation_instruction, hypo_output, max_new_tokens=1024, temperature=0.7, top_p=1.0):
|
71 |
global llm
|
72 |
prompt_template = Template(TEMPLATE)
|
|
|
60 |
# )
|
61 |
scorer = TIGERScorer(model_name="TIGER-Lab/TIGERScore-13B")
|
62 |
|
63 |
+
@spaces.GPU(duration=60)
|
64 |
def generate_text_hf(input_context, generation_instruction, hypo_output, max_new_tokens=1024, temperature=0.7, top_p=1.0):
|
65 |
global scorer
|
66 |
scorer.model = scorer.model.to("cuda")
|
67 |
|
68 |
for output in scorer.generate_stream(generation_instruction, hypo_output, input_context, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p):
|
69 |
yield output
|
70 |
+
|
71 |
def generate_text_llamacpp(input_context, generation_instruction, hypo_output, max_new_tokens=1024, temperature=0.7, top_p=1.0):
|
72 |
global llm
|
73 |
prompt_template = Template(TEMPLATE)
|