Training in progress, step 200
Browse files- adapter_config.json +17 -0
- adapter_model.safetensors +3 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
- trainer_log.jsonl +41 -0
- training_args.bin +3 -0
adapter_config.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.3",
|
4 |
+
"inference_mode": true,
|
5 |
+
"num_attention_heads": 32,
|
6 |
+
"num_layers": 32,
|
7 |
+
"num_transformer_submodules": 1,
|
8 |
+
"num_virtual_tokens": 100,
|
9 |
+
"peft_type": "PROMPT_TUNING",
|
10 |
+
"prompt_tuning_init": "RANDOM",
|
11 |
+
"prompt_tuning_init_text": null,
|
12 |
+
"revision": null,
|
13 |
+
"task_type": "CAUSAL_LM",
|
14 |
+
"token_dim": 4096,
|
15 |
+
"tokenizer_kwargs": null,
|
16 |
+
"tokenizer_name_or_path": null
|
17 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3dd2241425c283ab7c4beca66f52599d4962107a3f219edf5de01f2515d7b243
|
3 |
+
size 1638528
|
special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "</s>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<unk>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
|
3 |
+
size 587404
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 5, "total_steps": 40000, "loss": 4.2449, "lr": 0.29999999259779675, "epoch": 0.00022635188664297517, "percentage": 0.01, "elapsed_time": "0:00:05", "remaining_time": "12:22:50", "throughput": 1642.5, "total_tokens": 9152}
|
2 |
+
{"current_steps": 10, "total_steps": 40000, "loss": 1.7749, "lr": 0.29999996252634736, "epoch": 0.00045270377328595035, "percentage": 0.03, "elapsed_time": "0:00:09", "remaining_time": "10:04:35", "throughput": 1890.83, "total_tokens": 17152}
|
3 |
+
{"current_steps": 15, "total_steps": 40000, "loss": 0.7222, "lr": 0.2999999093230187, "epoch": 0.0006790556599289255, "percentage": 0.04, "elapsed_time": "0:00:12", "remaining_time": "9:17:33", "throughput": 2019.48, "total_tokens": 25344}
|
4 |
+
{"current_steps": 20, "total_steps": 40000, "loss": 0.5325, "lr": 0.299999832987819, "epoch": 0.0009054075465719007, "percentage": 0.05, "elapsed_time": "0:00:16", "remaining_time": "8:57:04", "throughput": 2114.1, "total_tokens": 34080}
|
5 |
+
{"current_steps": 25, "total_steps": 40000, "loss": 0.4788, "lr": 0.29999973352076004, "epoch": 0.0011317594332148758, "percentage": 0.06, "elapsed_time": "0:00:19", "remaining_time": "8:36:14", "throughput": 2137.59, "total_tokens": 41408}
|
6 |
+
{"current_steps": 30, "total_steps": 40000, "loss": 0.4194, "lr": 0.2999996109218572, "epoch": 0.001358111319857851, "percentage": 0.07, "elapsed_time": "0:00:22", "remaining_time": "8:29:25", "throughput": 2174.57, "total_tokens": 49888}
|
7 |
+
{"current_steps": 35, "total_steps": 40000, "loss": 0.4057, "lr": 0.2999994651911293, "epoch": 0.0015844632065008261, "percentage": 0.09, "elapsed_time": "0:00:26", "remaining_time": "8:21:52", "throughput": 2199.93, "total_tokens": 58016}
|
8 |
+
{"current_steps": 40, "total_steps": 40000, "loss": 0.3855, "lr": 0.2999992963285989, "epoch": 0.0018108150931438014, "percentage": 0.1, "elapsed_time": "0:00:29", "remaining_time": "8:16:24", "throughput": 2211.03, "total_tokens": 65920}
|
9 |
+
{"current_steps": 45, "total_steps": 40000, "loss": 0.4167, "lr": 0.29999910433429194, "epoch": 0.0020371669797867764, "percentage": 0.11, "elapsed_time": "0:00:33", "remaining_time": "8:12:38", "throughput": 2217.53, "total_tokens": 73824}
|
10 |
+
{"current_steps": 50, "total_steps": 40000, "loss": 0.4321, "lr": 0.29999888920823814, "epoch": 0.0022635188664297517, "percentage": 0.12, "elapsed_time": "0:00:36", "remaining_time": "8:11:27", "throughput": 2241.39, "total_tokens": 82720}
|
11 |
+
{"current_steps": 55, "total_steps": 40000, "loss": 0.4474, "lr": 0.29999865095047057, "epoch": 0.002489870753072727, "percentage": 0.14, "elapsed_time": "0:00:40", "remaining_time": "8:07:04", "throughput": 2245.75, "total_tokens": 90368}
|
12 |
+
{"current_steps": 60, "total_steps": 40000, "loss": 0.4511, "lr": 0.29999838956102604, "epoch": 0.002716222639715702, "percentage": 0.15, "elapsed_time": "0:00:43", "remaining_time": "8:03:36", "throughput": 2247.08, "total_tokens": 97952}
|
13 |
+
{"current_steps": 65, "total_steps": 40000, "loss": 0.468, "lr": 0.29999810503994484, "epoch": 0.002942574526358677, "percentage": 0.16, "elapsed_time": "0:00:46", "remaining_time": "7:59:56", "throughput": 2247.57, "total_tokens": 105344}
|
14 |
+
{"current_steps": 70, "total_steps": 40000, "loss": 0.4196, "lr": 0.29999779738727084, "epoch": 0.0031689264130016523, "percentage": 0.18, "elapsed_time": "0:00:50", "remaining_time": "7:58:04", "throughput": 2253.38, "total_tokens": 113312}
|
15 |
+
{"current_steps": 75, "total_steps": 40000, "loss": 0.3871, "lr": 0.29999746660305154, "epoch": 0.0033952782996446275, "percentage": 0.19, "elapsed_time": "0:00:53", "remaining_time": "7:56:37", "throughput": 2253.41, "total_tokens": 121056}
|
16 |
+
{"current_steps": 80, "total_steps": 40000, "loss": 0.44, "lr": 0.2999971126873379, "epoch": 0.0036216301862876028, "percentage": 0.2, "elapsed_time": "0:00:57", "remaining_time": "7:55:19", "throughput": 2253.62, "total_tokens": 128800}
|
17 |
+
{"current_steps": 85, "total_steps": 40000, "loss": 0.4703, "lr": 0.2999967356401845, "epoch": 0.003847982072930578, "percentage": 0.21, "elapsed_time": "0:01:00", "remaining_time": "7:56:21", "throughput": 2266.51, "total_tokens": 137952}
|
18 |
+
{"current_steps": 90, "total_steps": 40000, "loss": 0.4482, "lr": 0.29999633546164944, "epoch": 0.004074333959573553, "percentage": 0.22, "elapsed_time": "0:01:04", "remaining_time": "7:54:55", "throughput": 2268.33, "total_tokens": 145760}
|
19 |
+
{"current_steps": 95, "total_steps": 40000, "loss": 0.4271, "lr": 0.29999591215179444, "epoch": 0.0043006858462165285, "percentage": 0.24, "elapsed_time": "0:01:07", "remaining_time": "7:53:55", "throughput": 2270.91, "total_tokens": 153728}
|
20 |
+
{"current_steps": 100, "total_steps": 40000, "loss": 0.3741, "lr": 0.2999954657106849, "epoch": 0.004527037732859503, "percentage": 0.25, "elapsed_time": "0:01:11", "remaining_time": "7:53:35", "throughput": 2272.26, "total_tokens": 161824}
|
21 |
+
{"current_steps": 105, "total_steps": 40000, "loss": 0.4156, "lr": 0.2999949961383896, "epoch": 0.004753389619502478, "percentage": 0.26, "elapsed_time": "0:01:14", "remaining_time": "7:52:31", "throughput": 2272.44, "total_tokens": 169568}
|
22 |
+
{"current_steps": 110, "total_steps": 40000, "loss": 0.4059, "lr": 0.2999945034349809, "epoch": 0.004979741506145454, "percentage": 0.27, "elapsed_time": "0:01:18", "remaining_time": "7:51:41", "throughput": 2275.68, "total_tokens": 177600}
|
23 |
+
{"current_steps": 115, "total_steps": 40000, "loss": 0.3889, "lr": 0.2999939876005348, "epoch": 0.005206093392788429, "percentage": 0.29, "elapsed_time": "0:01:21", "remaining_time": "7:51:34", "throughput": 2278.92, "total_tokens": 185920}
|
24 |
+
{"current_steps": 120, "total_steps": 40000, "loss": 0.3901, "lr": 0.29999344863513094, "epoch": 0.005432445279431404, "percentage": 0.3, "elapsed_time": "0:01:25", "remaining_time": "7:51:07", "throughput": 2279.84, "total_tokens": 193920}
|
25 |
+
{"current_steps": 125, "total_steps": 40000, "loss": 0.405, "lr": 0.2999928865388523, "epoch": 0.005658797166074379, "percentage": 0.31, "elapsed_time": "0:01:28", "remaining_time": "7:49:47", "throughput": 2281.12, "total_tokens": 201568}
|
26 |
+
{"current_steps": 130, "total_steps": 40000, "loss": 0.3971, "lr": 0.29999230131178567, "epoch": 0.005885149052717354, "percentage": 0.33, "elapsed_time": "0:01:31", "remaining_time": "7:48:59", "throughput": 2282.69, "total_tokens": 209440}
|
27 |
+
{"current_steps": 135, "total_steps": 40000, "loss": 0.3696, "lr": 0.2999916929540212, "epoch": 0.00611150093936033, "percentage": 0.34, "elapsed_time": "0:01:35", "remaining_time": "7:48:41", "throughput": 2285.67, "total_tokens": 217664}
|
28 |
+
{"current_steps": 140, "total_steps": 40000, "loss": 0.3877, "lr": 0.29999106146565285, "epoch": 0.0063378528260033045, "percentage": 0.35, "elapsed_time": "0:01:38", "remaining_time": "7:48:24", "throughput": 2285.17, "total_tokens": 225568}
|
29 |
+
{"current_steps": 145, "total_steps": 40000, "loss": 0.3761, "lr": 0.29999040684677786, "epoch": 0.00656420471264628, "percentage": 0.36, "elapsed_time": "0:01:42", "remaining_time": "7:48:58", "throughput": 2294.04, "total_tokens": 234848}
|
30 |
+
{"current_steps": 150, "total_steps": 40000, "loss": 0.3868, "lr": 0.2999897290974972, "epoch": 0.006790556599289255, "percentage": 0.38, "elapsed_time": "0:01:45", "remaining_time": "7:48:37", "throughput": 2296.35, "total_tokens": 243040}
|
31 |
+
{"current_steps": 155, "total_steps": 40000, "loss": 0.3892, "lr": 0.2999890282179155, "epoch": 0.00701690848593223, "percentage": 0.39, "elapsed_time": "0:01:49", "remaining_time": "7:48:17", "throughput": 2296.5, "total_tokens": 251008}
|
32 |
+
{"current_steps": 160, "total_steps": 40000, "loss": 0.3765, "lr": 0.29998830420814077, "epoch": 0.0072432603725752056, "percentage": 0.4, "elapsed_time": "0:01:52", "remaining_time": "7:48:19", "throughput": 2297.18, "total_tokens": 259232}
|
33 |
+
{"current_steps": 165, "total_steps": 40000, "loss": 0.3898, "lr": 0.2999875570682846, "epoch": 0.00746961225921818, "percentage": 0.41, "elapsed_time": "0:01:56", "remaining_time": "7:47:40", "throughput": 2298.92, "total_tokens": 267200}
|
34 |
+
{"current_steps": 170, "total_steps": 40000, "loss": 0.4465, "lr": 0.2999867867984623, "epoch": 0.007695964145861156, "percentage": 0.43, "elapsed_time": "0:01:59", "remaining_time": "7:47:38", "throughput": 2305.19, "total_tokens": 276064}
|
35 |
+
{"current_steps": 175, "total_steps": 40000, "loss": 0.426, "lr": 0.29998599339879267, "epoch": 0.007922316032504132, "percentage": 0.44, "elapsed_time": "0:02:03", "remaining_time": "7:46:47", "throughput": 2305.81, "total_tokens": 283776}
|
36 |
+
{"current_steps": 180, "total_steps": 40000, "loss": 0.3969, "lr": 0.29998517686939796, "epoch": 0.008148667919147106, "percentage": 0.45, "elapsed_time": "0:02:06", "remaining_time": "7:46:40", "throughput": 2307.51, "total_tokens": 292064}
|
37 |
+
{"current_steps": 185, "total_steps": 40000, "loss": 0.3809, "lr": 0.29998433721040413, "epoch": 0.008375019805790081, "percentage": 0.46, "elapsed_time": "0:02:10", "remaining_time": "7:46:21", "throughput": 2308.38, "total_tokens": 300128}
|
38 |
+
{"current_steps": 190, "total_steps": 40000, "loss": 0.3908, "lr": 0.29998347442194073, "epoch": 0.008601371692433057, "percentage": 0.47, "elapsed_time": "0:02:13", "remaining_time": "7:46:29", "throughput": 2310.92, "total_tokens": 308704}
|
39 |
+
{"current_steps": 195, "total_steps": 40000, "loss": 0.4256, "lr": 0.2999825885041407, "epoch": 0.008827723579076031, "percentage": 0.49, "elapsed_time": "0:02:16", "remaining_time": "7:46:01", "throughput": 2309.93, "total_tokens": 316416}
|
40 |
+
{"current_steps": 200, "total_steps": 40000, "loss": 0.3965, "lr": 0.29998167945714077, "epoch": 0.009054075465719007, "percentage": 0.5, "elapsed_time": "0:02:20", "remaining_time": "7:45:38", "throughput": 2310.99, "total_tokens": 324448}
|
41 |
+
{"current_steps": 200, "total_steps": 40000, "eval_loss": 0.4141446352005005, "epoch": 0.009054075465719007, "percentage": 0.5, "elapsed_time": "0:15:11", "remaining_time": "2 days, 2:21:42", "throughput": 356.12, "total_tokens": 324448}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0542c44aac1cca86ae6ae109817b82dfe16975247e7520c12e8a399ce4761fb6
|
3 |
+
size 5752
|