izzcw commited on
Commit
85adb4f
·
verified ·
1 Parent(s): 3531165

End of training

Browse files
README.md CHANGED
@@ -4,6 +4,7 @@ license: llama3
4
  base_model: meta-llama/Meta-Llama-3-70B-Instruct
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: llama_3_70b_lora_sft_crafting
@@ -15,7 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # llama_3_70b_lora_sft_crafting
17
 
18
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) on the None dataset.
 
 
19
 
20
  ## Model description
21
 
 
4
  base_model: meta-llama/Meta-Llama-3-70B-Instruct
5
  tags:
6
  - llama-factory
7
+ - lora
8
  - generated_from_trainer
9
  model-index:
10
  - name: llama_3_70b_lora_sft_crafting
 
16
 
17
  # llama_3_70b_lora_sft_crafting
18
 
19
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct) on the identity and the filtered_crafting_train_data datasets.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.3451
22
 
23
  ## Model description
24
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 0.34508389234542847,
4
+ "eval_runtime": 15.1966,
5
+ "eval_samples_per_second": 4.869,
6
+ "eval_steps_per_second": 0.658,
7
+ "total_flos": 2776730917928960.0,
8
+ "train_loss": 0.6443910087857928,
9
+ "train_runtime": 2329.4992,
10
+ "train_samples_per_second": 1.538,
11
+ "train_steps_per_second": 0.012
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 0.34508389234542847,
4
+ "eval_runtime": 15.1966,
5
+ "eval_samples_per_second": 4.869,
6
+ "eval_steps_per_second": 0.658
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 2776730917928960.0,
4
+ "train_loss": 0.6443910087857928,
5
+ "train_runtime": 2329.4992,
6
+ "train_samples_per_second": 1.538,
7
+ "train_steps_per_second": 0.012
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 30,
6
+ "global_step": 28,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.35714285714285715,
13
+ "grad_norm": 0.4569597298761558,
14
+ "learning_rate": 8.18711994874345e-05,
15
+ "loss": 0.9004,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.7142857142857143,
20
+ "grad_norm": 0.4078513107441401,
21
+ "learning_rate": 2.3208660251050158e-05,
22
+ "loss": 0.548,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "step": 28,
28
+ "total_flos": 2776730917928960.0,
29
+ "train_loss": 0.6443910087857928,
30
+ "train_runtime": 2329.4992,
31
+ "train_samples_per_second": 1.538,
32
+ "train_steps_per_second": 0.012
33
+ }
34
+ ],
35
+ "logging_steps": 10,
36
+ "max_steps": 28,
37
+ "num_input_tokens_seen": 0,
38
+ "num_train_epochs": 1,
39
+ "save_steps": 500,
40
+ "stateful_callbacks": {
41
+ "TrainerControl": {
42
+ "args": {
43
+ "should_epoch_stop": false,
44
+ "should_evaluate": false,
45
+ "should_log": false,
46
+ "should_save": true,
47
+ "should_training_stop": true
48
+ },
49
+ "attributes": {}
50
+ }
51
+ },
52
+ "total_flos": 2776730917928960.0,
53
+ "train_batch_size": 1,
54
+ "trial_name": null,
55
+ "trial_params": null
56
+ }
training_loss.png ADDED