Spaces:
Sleeping
Sleeping
srijaydeshpande
commited on
Commit
•
2fd0cb7
1
Parent(s):
25d7899
Update app.py
Browse files
app.py
CHANGED
@@ -19,18 +19,18 @@ from llama_cpp_agent.chat_history.messages import Roles
|
|
19 |
# subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
|
20 |
|
21 |
|
22 |
-
# hf_hub_download(
|
23 |
-
# repo_id="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
|
24 |
-
# filename="Meta-Llama-3-8B-Instruct.Q8_0.gguf",
|
25 |
-
# local_dir = "./models"
|
26 |
-
# )
|
27 |
-
|
28 |
hf_hub_download(
|
29 |
-
repo_id="
|
30 |
-
filename="Meta-Llama-3-
|
31 |
local_dir = "./models"
|
32 |
)
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def process_document(pdf_path, page_ids=None):
|
35 |
extracted_pages = extract_pages(pdf_path, page_numbers=page_ids)
|
36 |
|
@@ -87,7 +87,7 @@ def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
|
|
87 |
prompt = "In the following text replace any person name and any address with term [redacted], replace any Date of Birth and NHS number with term [redacted]. Output the modified text."
|
88 |
|
89 |
llm = Llama(
|
90 |
-
model_path="models/Meta-Llama-3-
|
91 |
flash_attn=True,
|
92 |
n_gpu_layers=81,
|
93 |
n_batch=1024,
|
|
|
19 |
# subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
hf_hub_download(
|
23 |
+
repo_id="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
|
24 |
+
filename="Meta-Llama-3-8B-Instruct.Q8_0.gguf",
|
25 |
local_dir = "./models"
|
26 |
)
|
27 |
|
28 |
+
# hf_hub_download(
|
29 |
+
# repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
|
30 |
+
# filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
|
31 |
+
# local_dir = "./models"
|
32 |
+
# )
|
33 |
+
|
34 |
def process_document(pdf_path, page_ids=None):
|
35 |
extracted_pages = extract_pages(pdf_path, page_numbers=page_ids)
|
36 |
|
|
|
87 |
prompt = "In the following text replace any person name and any address with term [redacted], replace any Date of Birth and NHS number with term [redacted]. Output the modified text."
|
88 |
|
89 |
llm = Llama(
|
90 |
+
model_path="models/Meta-Llama-3-8B-Instruct.Q8_0.gguf",
|
91 |
flash_attn=True,
|
92 |
n_gpu_layers=81,
|
93 |
n_batch=1024,
|