Kohaku-Blueleaf commited on
Commit
20e398f
1 Parent(s): 728f439

correct way to clone model

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -3,7 +3,7 @@ from time import time_ns
3
 
4
  import gradio as gr
5
  import torch
6
- import requests as rq
7
  from llama_cpp import Llama, LLAMA_SPLIT_MODE_NONE
8
  from transformers import LlamaForCausalLM, LlamaTokenizer
9
 
@@ -110,12 +110,11 @@ masterpiece, newest, absurdres, {rating}"""
110
 
111
 
112
  if __name__ == "__main__":
113
- if not os.path.isfile("./model.gguf"):
114
- data = rq.get("https://huggingface.co/KBlueLeaf/DanTagGen/resolve/main/ggml-model-Q6_K.gguf").content
115
- with open("./model.gguf", "wb") as f:
116
- f.write(data)
117
  text_model = Llama(
118
- "./model.gguf",
119
  n_ctx=384,
120
  verbose=False,
121
  )
 
3
 
4
  import gradio as gr
5
  import torch
6
+ from huggingface_hub import Repository
7
  from llama_cpp import Llama, LLAMA_SPLIT_MODE_NONE
8
  from transformers import LlamaForCausalLM, LlamaTokenizer
9
 
 
110
 
111
 
112
  if __name__ == "__main__":
113
+ repo = Repository(
114
+ local_dir="./model", clone_from="https://huggingface.co/KBlueLeaf/DanTagGen"
115
+ )
 
116
  text_model = Llama(
117
+ "./model/model.gguf",
118
  n_ctx=384,
119
  verbose=False,
120
  )