Spaces:
Runtime error
Runtime error
fix default values
Browse files- app_dialogue.py +15 -15
app_dialogue.py
CHANGED
@@ -303,7 +303,7 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
|
|
303 |
top_p = gr.Slider(
|
304 |
minimum=0.0,
|
305 |
maximum=1.0,
|
306 |
-
value=0.
|
307 |
step=0.1,
|
308 |
interactive=True,
|
309 |
label="Top P",
|
@@ -326,7 +326,7 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
|
|
326 |
)
|
327 |
repetition_penalty = gr.Slider(
|
328 |
minimum=0.0,
|
329 |
-
maximum=
|
330 |
value=1.0,
|
331 |
step=0.1,
|
332 |
interactive=True,
|
@@ -342,7 +342,7 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
|
|
342 |
)
|
343 |
length_penalty = gr.Slider(
|
344 |
minimum=0.0,
|
345 |
-
maximum=
|
346 |
value=1.0,
|
347 |
step=0.1,
|
348 |
interactive=True,
|
@@ -358,9 +358,9 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
|
|
358 |
)
|
359 |
penalty_alpha = gr.Slider(
|
360 |
minimum=0.0,
|
361 |
-
maximum=
|
362 |
value=0.95,
|
363 |
-
step=
|
364 |
interactive=True,
|
365 |
label="Penalty alpha",
|
366 |
)
|
@@ -435,16 +435,16 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
|
|
435 |
def model_inference(
|
436 |
user_prompt,
|
437 |
chat_history,
|
438 |
-
decoding_strategy
|
439 |
-
temperature
|
440 |
-
no_repeat_ngram_size
|
441 |
-
max_new_tokens
|
442 |
-
min_length
|
443 |
-
repetition_penalty
|
444 |
-
length_penalty
|
445 |
-
top_k
|
446 |
-
top_p
|
447 |
-
penalty_alpha
|
448 |
):
|
449 |
global processor, model, tokenizer
|
450 |
# temperature = 1.0
|
|
|
303 |
top_p = gr.Slider(
|
304 |
minimum=0.0,
|
305 |
maximum=1.0,
|
306 |
+
value=0.95,
|
307 |
step=0.1,
|
308 |
interactive=True,
|
309 |
label="Top P",
|
|
|
326 |
)
|
327 |
repetition_penalty = gr.Slider(
|
328 |
minimum=0.0,
|
329 |
+
maximum=5.0,
|
330 |
value=1.0,
|
331 |
step=0.1,
|
332 |
interactive=True,
|
|
|
342 |
)
|
343 |
length_penalty = gr.Slider(
|
344 |
minimum=0.0,
|
345 |
+
maximum=5.0,
|
346 |
value=1.0,
|
347 |
step=0.1,
|
348 |
interactive=True,
|
|
|
358 |
)
|
359 |
penalty_alpha = gr.Slider(
|
360 |
minimum=0.0,
|
361 |
+
maximum=5.0,
|
362 |
value=0.95,
|
363 |
+
step=0.05,
|
364 |
interactive=True,
|
365 |
label="Penalty alpha",
|
366 |
)
|
|
|
435 |
def model_inference(
|
436 |
user_prompt,
|
437 |
chat_history,
|
438 |
+
decoding_strategy,
|
439 |
+
temperature,
|
440 |
+
no_repeat_ngram_size,
|
441 |
+
max_new_tokens,
|
442 |
+
min_length,
|
443 |
+
repetition_penalty,
|
444 |
+
length_penalty,
|
445 |
+
top_k,
|
446 |
+
top_p,
|
447 |
+
penalty_alpha,
|
448 |
):
|
449 |
global processor, model, tokenizer
|
450 |
# temperature = 1.0
|