iandennismiller commited on
Commit
d443dcd
1 Parent(s): 22e19ef

minor text changes

Browse files
Files changed (5) hide show
  1. .gitignore +1 -1
  2. Makefile +2 -0
  3. bin/llama-chat-ui.sh +2 -1
  4. bin/llama-server.sh +3 -2
  5. bin/llama.sh +7 -7
.gitignore CHANGED
@@ -1,3 +1,3 @@
1
- /**/main*.log
2
  /local/characters/*.json
3
  /local/prompts/*.txt
 
1
+ /**/*.log
2
  /local/characters/*.json
3
  /local/prompts/*.txt
Makefile ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ install:
2
+ ./install.sh
bin/llama-chat-ui.sh CHANGED
@@ -21,4 +21,5 @@ fi
21
  # start server and pipe stdout+stderr to log file
22
  cd ~/Work/chat-ui || exit
23
  npm run dev > "$CHAT_UI_LOG" 2>&1 &
24
- echo "Started chat-ui server"
 
 
21
  # start server and pipe stdout+stderr to log file
22
  cd ~/Work/chat-ui || exit
23
  npm run dev > "$CHAT_UI_LOG" 2>&1 &
24
+
25
+ echo "Started chat-ui server. Logging to $CHAT_UI_LOG. UI available from http://localhost:5173/"
bin/llama-server.sh CHANGED
@@ -16,7 +16,7 @@ source "$HOME/.config/llama/llama-server.conf"
16
  # if arg1 is "stop" then pkill the koboldcpp.py server
17
  if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
18
  echo "Stopping llama server"
19
- pkill -f "llama-server"
20
  echo "ok"
21
  exit
22
  fi
@@ -28,4 +28,5 @@ llama-server \
28
  --n-gpu-layers 1 \
29
  --port "$LLAMA_PORT" \
30
  > "$LLAMA_LOG" 2>&1 &
31
- echo "Started llama.cpp server"
 
 
16
  # if arg1 is "stop" then pkill the koboldcpp.py server
17
  if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
18
  echo "Stopping llama server"
19
+ pkill -f "llama-server --model"
20
  echo "ok"
21
  exit
22
  fi
 
28
  --n-gpu-layers 1 \
29
  --port "$LLAMA_PORT" \
30
  > "$LLAMA_LOG" 2>&1 &
31
+
32
+ echo "Started llama.cpp server. Logging to $LLAMA_LOG"
bin/llama.sh CHANGED
@@ -5,17 +5,17 @@ function llama_init_environment {
5
  if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then
6
  mkdir -p "$HOME/.config/llama"
7
  cat <<EOF > "$HOME/.config/llama/llama-main.conf"
8
- LLAMA_TEMPERATURE=0.1
9
- LLAMA_CONTEXT_SIZE=4096
10
- LLAMA_REPETITION_PENALTY=1.15
11
- LLAMA_TOP_P=1.0
12
- LLAMA_MIN_P=0.3
13
- LLAMA_TOP_K=0
14
- LLAMA_TEMPLATE=chatml
15
  LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
 
16
  LLAMA_MODELS_PATH=$HOME/.ai/models/llama
17
  LLAMA_PROMPT_PATH=$HOME/.local/share/llama/prompts
18
  LLAMA_CACHE_PATH=$HOME/.cache/llama
 
 
 
 
 
 
19
  EOF
20
  fi
21
 
 
5
  if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then
6
  mkdir -p "$HOME/.config/llama"
7
  cat <<EOF > "$HOME/.config/llama/llama-main.conf"
 
 
 
 
 
 
 
8
  LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
9
+ LLAMA_TEMPLATE=chatml
10
  LLAMA_MODELS_PATH=$HOME/.ai/models/llama
11
  LLAMA_PROMPT_PATH=$HOME/.local/share/llama/prompts
12
  LLAMA_CACHE_PATH=$HOME/.cache/llama
13
+ LLAMA_CONTEXT_SIZE=4096
14
+ LLAMA_TEMPERATURE=1.6
15
+ LLAMA_TOP_P=1.0
16
+ LLAMA_MIN_P=0.1
17
+ LLAMA_TOP_K=0
18
+ LLAMA_REPETITION_PENALTY=1.15
19
  EOF
20
  fi
21