safaricd commited on
Commit
0f8bfd3
β€’
1 Parent(s): 0c1fc1b

Training in progress, epoch 1

Browse files
adapter_config.json CHANGED
@@ -19,13 +19,13 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "q_proj",
23
  "o_proj",
24
  "k_proj",
 
 
25
  "gate_proj",
26
  "v_proj",
27
- "down_proj",
28
- "up_proj"
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
22
  "o_proj",
23
  "k_proj",
24
+ "up_proj",
25
+ "q_proj",
26
  "gate_proj",
27
  "v_proj",
28
+ "down_proj"
 
29
  ],
30
  "task_type": "CAUSAL_LM",
31
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89519c5a20dacfa95d5083ca09a5714305db29fd2074f42c2585183d301d4411
3
- size 1803940752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f048bfa57bfd8d47cae6f7092daf12bc4719cad2627d816a511a3147140c1d1
3
+ size 1803973520
added_tokens.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "DIALECT": 32018,
3
  "[/DIALECT]": 32017,
4
  "[PAD]": 32019,
 
1
  {
2
+ "<|im_end|>": 32020,
3
+ "<|im_start|>": 32021,
4
  "DIALECT": 32018,
5
  "[/DIALECT]": 32017,
6
  "[PAD]": 32019,
special_tokens_map.json CHANGED
@@ -1,35 +1,22 @@
1
  {
2
  "additional_special_tokens": [
3
  {
4
- "content": "[SQL]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  },
10
  {
11
- "content": "[/DIALECT]",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- },
17
- {
18
- "content": "DIALECT",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  }
24
  ],
25
- "bos_token": "<s>",
26
- "eos_token": "</s>",
27
- "pad_token": {
28
- "content": "[PAD]",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false
33
- },
34
  "unk_token": "<unk>"
35
  }
 
1
  {
2
  "additional_special_tokens": [
3
  {
4
+ "content": "<|im_end|>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  },
10
  {
11
+ "content": "<|im_start|>",
 
 
 
 
 
 
 
12
  "lstrip": false,
13
  "normalized": false,
14
  "rstrip": false,
15
  "single_word": false
16
  }
17
  ],
18
+ "bos_token": "<|im_start|>",
19
+ "eos_token": "<|im_end|>",
20
+ "pad_token": "<|im_end|>",
 
 
 
 
 
 
21
  "unk_token": "<unk>"
22
  }
tmpxros8ucc/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
tokenizer.json CHANGED
@@ -106,6 +106,24 @@
106
  "rstrip": false,
107
  "normalized": false,
108
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  }
110
  ],
111
  "normalizer": {
 
106
  "rstrip": false,
107
  "normalized": false,
108
  "special": true
109
+ },
110
+ {
111
+ "id": 32020,
112
+ "content": "<|im_end|>",
113
+ "single_word": false,
114
+ "lstrip": false,
115
+ "rstrip": false,
116
+ "normalized": false,
117
+ "special": true
118
+ },
119
+ {
120
+ "id": 32021,
121
+ "content": "<|im_start|>",
122
+ "single_word": false,
123
+ "lstrip": false,
124
+ "rstrip": false,
125
+ "normalized": false,
126
+ "special": true
127
  }
128
  ],
129
  "normalizer": {
tokenizer_config.json CHANGED
@@ -89,22 +89,38 @@
89
  "rstrip": false,
90
  "single_word": false,
91
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  }
93
  },
94
  "additional_special_tokens": [
95
- "[SQL]",
96
- "[/DIALECT]",
97
- "DIALECT"
98
  ],
99
- "bos_token": "<s>",
 
100
  "clean_up_tokenization_spaces": false,
101
- "eos_token": "</s>",
102
  "eot_token": "▁<EOT>",
103
  "fill_token": "<FILL_ME>",
104
  "legacy": null,
105
  "middle_token": "▁<MID>",
106
  "model_max_length": 1000000000000000019884624838656,
107
- "pad_token": "[PAD]",
108
  "prefix_token": "▁<PRE>",
109
  "sp_model_kwargs": {},
110
  "suffix_token": "▁<SUF>",
 
89
  "rstrip": false,
90
  "single_word": false,
91
  "special": true
92
+ },
93
+ "32020": {
94
+ "content": "<|im_end|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "32021": {
102
+ "content": "<|im_start|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
  }
109
  },
110
  "additional_special_tokens": [
111
+ "<|im_end|>",
112
+ "<|im_start|>"
 
113
  ],
114
+ "bos_token": "<|im_start|>",
115
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
116
  "clean_up_tokenization_spaces": false,
117
+ "eos_token": "<|im_end|>",
118
  "eot_token": "▁<EOT>",
119
  "fill_token": "<FILL_ME>",
120
  "legacy": null,
121
  "middle_token": "▁<MID>",
122
  "model_max_length": 1000000000000000019884624838656,
123
+ "pad_token": "<|im_end|>",
124
  "prefix_token": "▁<PRE>",
125
  "sp_model_kwargs": {},
126
  "suffix_token": "▁<SUF>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcc62bb1e6ac14e110070d8afe60cdbd0f23c0c49c4036bf2b417084e7372ebd
3
  size 4664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f0547de3fa49e3814734f06ac490ef2652b071cef7904b638f1c173e5fb6e99
3
  size 4664