Spaces:
Running
on
Zero
Running
on
Zero
Kohaku-Blueleaf
commited on
Commit
•
20e398f
1
Parent(s):
728f439
correct way to clone model
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ from time import time_ns
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
-
|
7 |
from llama_cpp import Llama, LLAMA_SPLIT_MODE_NONE
|
8 |
from transformers import LlamaForCausalLM, LlamaTokenizer
|
9 |
|
@@ -110,12 +110,11 @@ masterpiece, newest, absurdres, {rating}"""
|
|
110 |
|
111 |
|
112 |
if __name__ == "__main__":
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
f.write(data)
|
117 |
text_model = Llama(
|
118 |
-
"./model.gguf",
|
119 |
n_ctx=384,
|
120 |
verbose=False,
|
121 |
)
|
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
+
from huggingface_hub import Repository
|
7 |
from llama_cpp import Llama, LLAMA_SPLIT_MODE_NONE
|
8 |
from transformers import LlamaForCausalLM, LlamaTokenizer
|
9 |
|
|
|
110 |
|
111 |
|
112 |
if __name__ == "__main__":
|
113 |
+
repo = Repository(
|
114 |
+
local_dir="./model", clone_from="https://huggingface.co/KBlueLeaf/DanTagGen"
|
115 |
+
)
|
|
|
116 |
text_model = Llama(
|
117 |
+
"./model/model.gguf",
|
118 |
n_ctx=384,
|
119 |
verbose=False,
|
120 |
)
|