qnguyen3 commited on
Commit
deb1a8e
1 Parent(s): f72d17f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -15,7 +15,6 @@ MODEL_NAME = MODELS.split("/")[-1]
15
  TITLE = "<h1><center>raspberry-3b</center></h1>"
16
 
17
  DESCRIPTION = f"""
18
- <h3>MODEL: <a href="https://hf.co/{MODELS}">{MODEL_NAME}</a></h3>
19
  <center>
20
  <p>raspberry-3b
21
  <br>
@@ -37,11 +36,11 @@ h3 {
37
  """
38
 
39
  model = AutoModelForCausalLM.from_pretrained(
40
- MODELS,
41
  torch_dtype=torch.float16,
42
  device_map="auto",
43
  )
44
- tokenizer = AutoTokenizer.from_pretrained(MODELS)
45
 
46
  @spaces.GPU
47
  def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
 
15
  TITLE = "<h1><center>raspberry-3b</center></h1>"
16
 
17
  DESCRIPTION = f"""
 
18
  <center>
19
  <p>raspberry-3b
20
  <br>
 
36
  """
37
 
38
  model = AutoModelForCausalLM.from_pretrained(
39
+ MODEL_ID,
40
  torch_dtype=torch.float16,
41
  device_map="auto",
42
  )
43
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
44
 
45
  @spaces.GPU
46
  def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):