srijaydeshpande commited on
Commit
31fecb2
1 Parent(s): 178388d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -43
app.py CHANGED
@@ -101,7 +101,7 @@ def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
101
  # n_ctx=8192,
102
  # )
103
 
104
- chat_template = MessagesFormatterType.LLAMA_3
105
 
106
  llm = Llama(
107
  model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
@@ -111,51 +111,51 @@ def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
111
  n_ctx=8192,
112
  )
113
 
114
- provider = LlamaCppPythonProvider(llm)
115
 
116
- agent = LlamaCppAgent(
117
- provider,
118
- system_prompt="You are a helpful assistant.",
119
- predefined_messages_formatter_type=chat_template,
120
- debug_output=True
121
- )
122
 
123
- settings = provider.get_provider_default_settings()
124
- settings.temperature = 0.7
125
- settings.top_k = 40
126
- settings.top_p = 0.95
127
- settings.max_tokens = 2048
128
- settings.repeat_penalty = 1.1
129
- settings.stream = True
130
-
131
- messages = BasicChatHistory()
132
-
133
- stream = agent.get_chat_response(
134
- prompt + ' : ' + pdftext,
135
- llm_sampling_settings=settings,
136
- chat_history=messages,
137
- returns_streaming_generator=True,
138
- print_output=False
139
- )
140
 
141
- outputs = ""
142
- for output in stream:
143
- outputs += output
144
 
145
- return outputs
146
 
147
- # output = model.create_chat_completion(
148
- # messages=[
149
- # {"role": "assistant", "content": prompt},
150
- # {
151
- # "role": "user",
152
- # "content": pdftext
153
- # }
154
- # ],
155
- # max_tokens=maxtokens,
156
- # temperature=temperature
157
- # )
158
- # output = output['choices'][0]['message']['content']
159
 
160
  # prompt = "Perform the following actions on given text: 1. Replace any person age with term [redacted] 2. DO NOT REPLACE ANY MEDICAL MEASUREMENTS 3. Replace only the CALENDAR DATES of format 'day/month/year' with term [redacted]"
161
  # output = model.create_chat_completion(
@@ -175,7 +175,7 @@ def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
175
  # print(output)
176
  # print('-------------------------------------------------------')
177
 
178
- # return outputs
179
 
180
  def pdf_to_text(files, maxtokens=2048, temperature=0, top_probability=0.95):
181
  print('Control 0-----------------------------------')
@@ -204,7 +204,7 @@ output_text = gr.Textbox()
204
  output_path_component = gr.File(label="Select Output Path")
205
  iface = gr.Interface(
206
  fn=pdf_to_text,
207
- inputs='file',
208
  # inputs=["textbox", input_folder_text, "textbox", max_tokens, temp_slider, prob_slider],
209
  outputs=output_text,
210
  title='COBIx Endoscopy Report De-Identification',
 
101
  # n_ctx=8192,
102
  # )
103
 
104
+ # chat_template = MessagesFormatterType.LLAMA_3
105
 
106
  llm = Llama(
107
  model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
 
111
  n_ctx=8192,
112
  )
113
 
114
+ # provider = LlamaCppPythonProvider(llm)
115
 
116
+ # agent = LlamaCppAgent(
117
+ # provider,
118
+ # system_prompt="You are a helpful assistant.",
119
+ # predefined_messages_formatter_type=chat_template,
120
+ # debug_output=True
121
+ # )
122
 
123
+ # settings = provider.get_provider_default_settings()
124
+ # settings.temperature = 0.7
125
+ # settings.top_k = 40
126
+ # settings.top_p = 0.95
127
+ # settings.max_tokens = 2048
128
+ # settings.repeat_penalty = 1.1
129
+ # settings.stream = True
130
+
131
+ # messages = BasicChatHistory()
132
+
133
+ # stream = agent.get_chat_response(
134
+ # prompt + ' : ' + pdftext,
135
+ # llm_sampling_settings=settings,
136
+ # chat_history=messages,
137
+ # returns_streaming_generator=True,
138
+ # print_output=False
139
+ # )
140
 
141
+ # outputs = ""
142
+ # for output in stream:
143
+ # outputs += output
144
 
145
+ # return outputs
146
 
147
+ output = model.create_chat_completion(
148
+ messages=[
149
+ {"role": "assistant", "content": prompt},
150
+ {
151
+ "role": "user",
152
+ "content": pdftext
153
+ }
154
+ ],
155
+ max_tokens=maxtokens,
156
+ temperature=temperature
157
+ )
158
+ output = output['choices'][0]['message']['content']
159
 
160
  # prompt = "Perform the following actions on given text: 1. Replace any person age with term [redacted] 2. DO NOT REPLACE ANY MEDICAL MEASUREMENTS 3. Replace only the CALENDAR DATES of format 'day/month/year' with term [redacted]"
161
  # output = model.create_chat_completion(
 
175
  # print(output)
176
  # print('-------------------------------------------------------')
177
 
178
+ return output
179
 
180
  def pdf_to_text(files, maxtokens=2048, temperature=0, top_probability=0.95):
181
  print('Control 0-----------------------------------')
 
204
  output_path_component = gr.File(label="Select Output Path")
205
  iface = gr.Interface(
206
  fn=pdf_to_text,
207
+ inputs=['file', max_tokens, temp_slider, prob_slider],
208
  # inputs=["textbox", input_folder_text, "textbox", max_tokens, temp_slider, prob_slider],
209
  outputs=output_text,
210
  title='COBIx Endoscopy Report De-Identification',