Tonic commited on
Commit
18d32fd
β€’
1 Parent(s): dec5480

add context

Browse files
Files changed (2) hide show
  1. app.py +22 -14
  2. globe.py +20 -1
app.py CHANGED
@@ -13,7 +13,7 @@ model = AutoModelForCausalLM.from_pretrained(model_path)
13
  pipe = pipeline("text-generation", model=model_path)
14
  pipe.tokenizer = tokenizer # Assign tokenizer manually
15
 
16
- def create_prompt(system_message, user_message, tool_definition=""):
17
  if tool_definition:
18
  return f"""<extra_id_0>System
19
  {system_message}
@@ -22,7 +22,7 @@ def create_prompt(system_message, user_message, tool_definition=""):
22
  {tool_definition}
23
  </tool>
24
  <context>
25
- The current date is 2023-06-01.
26
  </context>
27
 
28
  <extra_id_1>User
@@ -33,8 +33,8 @@ The current date is 2023-06-01.
33
  return f"<extra_id_0>System\n{system_message}\n\n<extra_id_1>User\n{user_message}\n<extra_id_1>Assistant\n"
34
 
35
  @spaces.GPU
36
- def generate_response(message, history, system_message, max_tokens, temperature, top_p, use_pipeline=False, tool_definition=""):
37
- full_prompt = create_prompt(system_message, message, tool_definition)
38
 
39
  if use_pipeline:
40
  messages = [
@@ -73,22 +73,28 @@ def generate_response(message, history, system_message, max_tokens, temperature,
73
  return assistant_response
74
 
75
  with gr.Blocks() as demo:
76
- gr.Markdown("# πŸ€– Nemotron-Mini-4B-Instruct Demo with Custom Function Calling")
77
- gr.Markdown("This demo showcases the Nemotron-Mini-4B-Instruct model from NVIDIA, including optional custom function calling.")
78
-
 
79
  with gr.Row():
80
  with gr.Column(scale=3):
81
  chatbot = gr.Chatbot(height=400)
82
  msg = gr.Textbox(label="User Input", placeholder="Ask a question or request a task...")
83
- clear = gr.Button("Clear")
84
-
85
- with gr.Column(scale=2):
86
  system_message = gr.Textbox(
87
  label="System Message",
88
  value="You are a helpful AI assistant.",
89
  lines=2,
90
  placeholder="Set the AI's behavior and context..."
91
  )
 
 
 
 
 
92
  max_tokens = gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max Tokens")
93
  temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
94
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
@@ -100,20 +106,22 @@ with gr.Blocks() as demo:
100
  value=customtool,
101
  lines=15,
102
  language="json"
103
- # placeholder="Enter the JSON definition of your custom tool..."
104
  )
105
 
106
  def user(user_message, history):
107
  return "", history + [[user_message, None]]
108
 
109
- def bot(history, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition):
110
  user_message = history[-1][0]
111
- bot_message = generate_response(user_message, history, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition)
112
  history[-1][1] = bot_message
113
  return history
114
 
115
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
116
- bot, [chatbot, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition], chatbot
 
 
 
117
  )
118
  clear.click(lambda: None, None, chatbot, queue=False)
119
 
 
13
  pipe = pipeline("text-generation", model=model_path)
14
  pipe.tokenizer = tokenizer # Assign tokenizer manually
15
 
16
+ def create_prompt(system_message, user_message, tool_definition="", context=""):
17
  if tool_definition:
18
  return f"""<extra_id_0>System
19
  {system_message}
 
22
  {tool_definition}
23
  </tool>
24
  <context>
25
+ {context}
26
  </context>
27
 
28
  <extra_id_1>User
 
33
  return f"<extra_id_0>System\n{system_message}\n\n<extra_id_1>User\n{user_message}\n<extra_id_1>Assistant\n"
34
 
35
  @spaces.GPU
36
+ def generate_response(message, history, system_message, max_tokens, temperature, top_p, use_pipeline=False, tool_definition="", context=""):
37
+ full_prompt = create_prompt(system_message, message, tool_definition, context)
38
 
39
  if use_pipeline:
40
  messages = [
 
73
  return assistant_response
74
 
75
  with gr.Blocks() as demo:
76
+ with gr.Row():
77
+ gr.Markdown(title)
78
+ with gr.Row():
79
+ gr.Markdown(description)
80
  with gr.Row():
81
  with gr.Column(scale=3):
82
  chatbot = gr.Chatbot(height=400)
83
  msg = gr.Textbox(label="User Input", placeholder="Ask a question or request a task...")
84
+ with gr.Row():
85
+ send = gr.Button("Send")
86
+ clear = gr.Button("Clear")
87
  system_message = gr.Textbox(
88
  label="System Message",
89
  value="You are a helpful AI assistant.",
90
  lines=2,
91
  placeholder="Set the AI's behavior and context..."
92
  )
93
+ context = gr.Textbox(
94
+ label="Context",
95
+ lines=2,
96
+ placeholder="Enter additional context information..."
97
+ )
98
  max_tokens = gr.Slider(minimum=1, maximum=1024, value=256, step=1, label="Max Tokens")
99
  temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
100
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
 
106
  value=customtool,
107
  lines=15,
108
  language="json"
 
109
  )
110
 
111
  def user(user_message, history):
112
  return "", history + [[user_message, None]]
113
 
114
+ def bot(history, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition, context):
115
  user_message = history[-1][0]
116
+ bot_message = generate_response(user_message, history, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition, context)
117
  history[-1][1] = bot_message
118
  return history
119
 
120
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
121
+ bot, [chatbot, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition, context], chatbot
122
+ )
123
+ send.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
124
+ bot, [chatbot, system_message, max_tokens, temperature, top_p, use_pipeline, tool_definition, context], chatbot
125
  )
126
  clear.click(lambda: None, None, chatbot, queue=False)
127
 
globe.py CHANGED
@@ -39,4 +39,23 @@ customtool = """{
39
  },
40
  "required": ["param1"]
41
  }
42
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  },
40
  "required": ["param1"]
41
  }
42
+ }"""
43
+
44
+ example = """{{
45
+ "name": "get_current_weather",
46
+ "description": "Get the current weather in a given location",
47
+ "parameters": {{
48
+ "type": "object",
49
+ "properties": {{
50
+ "location": {{
51
+ "type": "string",
52
+ "description": "The city and state, e.g. San Francisco, CA"
53
+ }},
54
+ "unit": {{
55
+ "type": "string",
56
+ "enum": ["celsius", "fahrenheit"]
57
+ }}
58
+ }},
59
+ "required": ["location"]
60
+ }}
61
+ }}"""