Wauplin HF staff nsarrazin HF staff commited on
Commit
3cbea34
1 Parent(s): ea856f7

Standardize HF_ACCESS_TOKEN -> HF_TOKEN (#610)

Browse files

* Standardize HF_ACCESS_TOKEN -> HF_TOKEN

* Replace HF_ACCESS_TOKEN by HF_TOKEN in .env

* Add legacy support for HF_ACCESS_TOKEN

---------

Co-authored-by: Nathan Sarrazin <[email protected]>

.env CHANGED
@@ -6,10 +6,12 @@ MONGODB_DB_NAME=chat-ui
6
  MONGODB_DIRECT_CONNECTION=false
7
 
8
  COOKIE_NAME=hf-chat
9
- HF_ACCESS_TOKEN=#hf_<token> from from https://huggingface.co/settings/token
10
  HF_API_ROOT=https://api-inference.huggingface.co/models
11
  OPENAI_API_KEY=#your openai api key here
12
 
 
 
13
  # used to activate search with web functionality. disabled if none are defined. choose one of the following:
14
  YDC_API_KEY=#your docs.you.com api key here
15
  SERPER_API_KEY=#your serper.dev api key here
 
6
  MONGODB_DIRECT_CONNECTION=false
7
 
8
  COOKIE_NAME=hf-chat
9
+ HF_TOKEN=#hf_<token> from from https://huggingface.co/settings/token
10
  HF_API_ROOT=https://api-inference.huggingface.co/models
11
  OPENAI_API_KEY=#your openai api key here
12
 
13
+ HF_ACCESS_TOKEN=#LEGACY! Use HF_TOKEN instead
14
+
15
  # used to activate search with web functionality. disabled if none are defined. choose one of the following:
16
  YDC_API_KEY=#your docs.you.com api key here
17
  SERPER_API_KEY=#your serper.dev api key here
.github/workflows/deploy-release.yml CHANGED
@@ -24,7 +24,7 @@ jobs:
24
  SERPER_API_KEY: ${{ secrets.SERPER_API_KEY }}
25
  OPENID_CONFIG: ${{ secrets.OPENID_CONFIG }}
26
  MONGODB_URL: ${{ secrets.MONGODB_URL }}
27
- HF_ACCESS_TOKEN: ${{ secrets.HF_ACCESS_TOKEN }}
28
  run: npm run updateProdEnv
29
  sync-to-hub:
30
  runs-on: ubuntu-latest
@@ -39,5 +39,5 @@ jobs:
39
  lfs: true
40
  - name: Push to hub
41
  env:
42
- HF_TOKEN: ${{ secrets.HF_TOKEN }}
43
- run: git push https://nsarrazin:$HF_TOKEN@huggingface.co/spaces/huggingchat/chat-ui main
 
24
  SERPER_API_KEY: ${{ secrets.SERPER_API_KEY }}
25
  OPENID_CONFIG: ${{ secrets.OPENID_CONFIG }}
26
  MONGODB_URL: ${{ secrets.MONGODB_URL }}
27
+ HF_DEPLOYMENT_TOKEN: ${{ secrets.HF_DEPLOYMENT_TOKEN }}
28
  run: npm run updateProdEnv
29
  sync-to-hub:
30
  runs-on: ubuntu-latest
 
39
  lfs: true
40
  - name: Push to hub
41
  env:
42
+ HF_DEPLOYMENT_TOKEN: ${{ secrets.HF_DEPLOYMENT_TOKEN }}
43
+ run: git push https://nsarrazin:$HF_DEPLOYMENT_TOKEN@huggingface.co/spaces/huggingchat/chat-ui main
.github/workflows/deploy-staging.yml CHANGED
@@ -20,5 +20,5 @@ jobs:
20
  lfs: true
21
  - name: Push to hub
22
  env:
23
- HF_TOKEN: ${{ secrets.HF_TOKEN }}
24
- run: git push https://nsarrazin:$HF_TOKEN@huggingface.co/spaces/huggingchat/chat-ui-staging main
 
20
  lfs: true
21
  - name: Push to hub
22
  env:
23
+ HF_DEPLOYMENT_TOKEN: ${{ secrets.HF_DEPLOYMENT_TOKEN }}
24
+ run: git push https://nsarrazin:$HF_DEPLOYMENT_TOKEN@huggingface.co/spaces/huggingchat/chat-ui-staging main
README.md CHANGED
@@ -30,7 +30,7 @@ If you don't want to configure, setup, and launch your own Chat UI yourself, you
30
 
31
  You can deploy your own customized Chat UI instance with any supported [LLM](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending) of your choice on [Hugging Face Spaces](https://huggingface.co/spaces). To do so, use the chat-ui template [available here](https://huggingface.co/new-space?template=huggingchat/chat-ui-template).
32
 
33
- Set `HUGGING_FACE_HUB_TOKEN` in [Space secrets](https://huggingface.co/docs/hub/spaces-overview#managing-secrets-and-environment-variables) to deploy a model with gated access or a model in a private repository. It's also compatible with [Inference for PROs](https://huggingface.co/blog/inference-pro) curated list of powerful models with higher rate limits. Make sure to create your personal token first in your [User Access Tokens settings](https://huggingface.co/settings/tokens).
34
 
35
  Read the full tutorial [here](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui#chatui-on-spaces).
36
 
@@ -42,7 +42,7 @@ Start by creating a `.env.local` file in the root of the repository. The bare mi
42
 
43
  ```env
44
  MONGODB_URL=<the URL to your MongoDB instance>
45
- HF_ACCESS_TOKEN=<your access token>
46
  ```
47
 
48
  ### Database
@@ -397,7 +397,7 @@ You can then add the generated information and the `authorization` parameter to
397
  ]
398
  ```
399
 
400
- Please note that if `HF_ACCESS_TOKEN` is also set or not empty, it will take precedence.
401
 
402
  #### Models hosted on multiple custom endpoints
403
 
 
30
 
31
  You can deploy your own customized Chat UI instance with any supported [LLM](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending) of your choice on [Hugging Face Spaces](https://huggingface.co/spaces). To do so, use the chat-ui template [available here](https://huggingface.co/new-space?template=huggingchat/chat-ui-template).
32
 
33
+ Set `HF_TOKEN` in [Space secrets](https://huggingface.co/docs/hub/spaces-overview#managing-secrets-and-environment-variables) to deploy a model with gated access or a model in a private repository. It's also compatible with [Inference for PROs](https://huggingface.co/blog/inference-pro) curated list of powerful models with higher rate limits. Make sure to create your personal token first in your [User Access Tokens settings](https://huggingface.co/settings/tokens).
34
 
35
  Read the full tutorial [here](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui#chatui-on-spaces).
36
 
 
42
 
43
  ```env
44
  MONGODB_URL=<the URL to your MongoDB instance>
45
+ HF_TOKEN=<your access token>
46
  ```
47
 
48
  ### Database
 
397
  ]
398
  ```
399
 
400
+ Please note that if `HF_TOKEN` is also set or not empty, it will take precedence.
401
 
402
  #### Models hosted on multiple custom endpoints
403
 
scripts/updateProdEnv.ts CHANGED
@@ -1,11 +1,11 @@
1
  import fs from "fs";
2
 
3
- const HF_TOKEN = process.env.HF_TOKEN; // token used for pushing to hub
4
 
5
  const SERPER_API_KEY = process.env.SERPER_API_KEY;
6
  const OPENID_CONFIG = process.env.OPENID_CONFIG;
7
  const MONGODB_URL = process.env.MONGODB_URL;
8
- const HF_ACCESS_TOKEN = process.env.HF_ACCESS_TOKEN; // token used for API requests in prod
9
 
10
  // Read the content of the file .env.template
11
  const PUBLIC_CONFIG = fs.readFileSync(".env.template", "utf8");
@@ -15,7 +15,7 @@ const full_config = `${PUBLIC_CONFIG}
15
  MONGODB_URL=${MONGODB_URL}
16
  OPENID_CONFIG=${OPENID_CONFIG}
17
  SERPER_API_KEY=${SERPER_API_KEY}
18
- HF_ACCESS_TOKEN=${HF_ACCESS_TOKEN}
19
  `;
20
 
21
  // Make an HTTP POST request to add the space secrets
@@ -27,7 +27,7 @@ fetch(`https://huggingface.co/api/spaces/huggingchat/chat-ui/secrets`, {
27
  description: `Env variable for HuggingChat. Last updated ${new Date().toISOString()}`,
28
  }),
29
  headers: {
30
- Authorization: `Bearer ${HF_TOKEN}`,
31
  "Content-Type": "application/json",
32
  },
33
  });
 
1
  import fs from "fs";
2
 
3
+ const HF_DEPLOYMENT_TOKEN = process.env.HF_DEPLOYMENT_TOKEN; // token used for pushing to hub
4
 
5
  const SERPER_API_KEY = process.env.SERPER_API_KEY;
6
  const OPENID_CONFIG = process.env.OPENID_CONFIG;
7
  const MONGODB_URL = process.env.MONGODB_URL;
8
+ const HF_TOKEN = process.env.HF_TOKEN ?? process.env.HF_ACCESS_TOKEN; // token used for API requests in prod
9
 
10
  // Read the content of the file .env.template
11
  const PUBLIC_CONFIG = fs.readFileSync(".env.template", "utf8");
 
15
  MONGODB_URL=${MONGODB_URL}
16
  OPENID_CONFIG=${OPENID_CONFIG}
17
  SERPER_API_KEY=${SERPER_API_KEY}
18
+ HF_TOKEN=${HF_TOKEN}
19
  `;
20
 
21
  // Make an HTTP POST request to add the space secrets
 
27
  description: `Env variable for HuggingChat. Last updated ${new Date().toISOString()}`,
28
  }),
29
  headers: {
30
+ Authorization: `Bearer ${HF_DEPLOYMENT_TOKEN}`,
31
  "Content-Type": "application/json",
32
  },
33
  });
src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { HF_ACCESS_TOKEN } from "$env/static/private";
2
  import { buildPrompt } from "$lib/buildPrompt";
3
  import type { TextGenerationStreamOutput } from "@huggingface/inference";
4
  import type { Endpoint } from "../endpoints";
@@ -9,7 +9,10 @@ export const endpointLlamacppParametersSchema = z.object({
9
  model: z.any(),
10
  type: z.literal("llamacpp"),
11
  url: z.string().url().default("http://127.0.0.1:8080"),
12
- accessToken: z.string().min(1).default(HF_ACCESS_TOKEN),
 
 
 
13
  });
14
 
15
  export function endpointLlamacpp(
 
1
+ import { HF_ACCESS_TOKEN, HF_TOKEN } from "$env/static/private";
2
  import { buildPrompt } from "$lib/buildPrompt";
3
  import type { TextGenerationStreamOutput } from "@huggingface/inference";
4
  import type { Endpoint } from "../endpoints";
 
9
  model: z.any(),
10
  type: z.literal("llamacpp"),
11
  url: z.string().url().default("http://127.0.0.1:8080"),
12
+ accessToken: z
13
+ .string()
14
+ .min(1)
15
+ .default(HF_TOKEN ?? HF_ACCESS_TOKEN),
16
  });
17
 
18
  export function endpointLlamacpp(
src/lib/server/endpoints/tgi/endpointTgi.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { HF_ACCESS_TOKEN } from "$env/static/private";
2
  import { buildPrompt } from "$lib/buildPrompt";
3
  import { textGenerationStream } from "@huggingface/inference";
4
  import type { Endpoint } from "../endpoints";
@@ -9,7 +9,7 @@ export const endpointTgiParametersSchema = z.object({
9
  model: z.any(),
10
  type: z.literal("tgi"),
11
  url: z.string().url(),
12
- accessToken: z.string().default(HF_ACCESS_TOKEN),
13
  authorization: z.string().optional(),
14
  });
15
 
@@ -35,7 +35,7 @@ export function endpointTgi(input: z.input<typeof endpointTgiParametersSchema>):
35
  use_cache: false,
36
  fetch: async (endpointUrl, info) => {
37
  if (info && authorization && !accessToken) {
38
- // Set authorization header if it is defined and HF_ACCESS_TOKEN is empty
39
  info.headers = {
40
  ...info.headers,
41
  Authorization: authorization,
 
1
+ import { HF_ACCESS_TOKEN, HF_TOKEN } from "$env/static/private";
2
  import { buildPrompt } from "$lib/buildPrompt";
3
  import { textGenerationStream } from "@huggingface/inference";
4
  import type { Endpoint } from "../endpoints";
 
9
  model: z.any(),
10
  type: z.literal("tgi"),
11
  url: z.string().url(),
12
+ accessToken: z.string().default(HF_TOKEN ?? HF_ACCESS_TOKEN),
13
  authorization: z.string().optional(),
14
  });
15
 
 
35
  use_cache: false,
36
  fetch: async (endpointUrl, info) => {
37
  if (info && authorization && !accessToken) {
38
+ // Set authorization header if it is defined and HF_TOKEN is empty
39
  info.headers = {
40
  ...info.headers,
41
  Authorization: authorization,
src/lib/server/models.ts CHANGED
@@ -1,4 +1,11 @@
1
- import { HF_ACCESS_TOKEN, HF_API_ROOT, MODELS, OLD_MODELS, TASK_MODEL } from "$env/static/private";
 
 
 
 
 
 
 
2
  import type { ChatTemplateInput } from "$lib/types/Template";
3
  import { compileTemplate } from "$lib/utils/template";
4
  import { z } from "zod";
@@ -80,7 +87,7 @@ const addEndpoint = (m: Awaited<ReturnType<typeof processModel>>) => ({
80
  return endpointTgi({
81
  type: "tgi",
82
  url: `${HF_API_ROOT}/${m.name}`,
83
- accessToken: HF_ACCESS_TOKEN,
84
  weight: 1,
85
  model: m,
86
  });
 
1
+ import {
2
+ HF_TOKEN,
3
+ HF_API_ROOT,
4
+ MODELS,
5
+ OLD_MODELS,
6
+ TASK_MODEL,
7
+ HF_ACCESS_TOKEN,
8
+ } from "$env/static/private";
9
  import type { ChatTemplateInput } from "$lib/types/Template";
10
  import { compileTemplate } from "$lib/utils/template";
11
  import { z } from "zod";
 
87
  return endpointTgi({
88
  type: "tgi",
89
  url: `${HF_API_ROOT}/${m.name}`,
90
+ accessToken: HF_TOKEN ?? HF_ACCESS_TOKEN,
91
  weight: 1,
92
  model: m,
93
  });