iyashnayi commited on
Commit
e9a2a08
·
verified ·
1 Parent(s): 4e29401

iyashnayi/SocioLens-llama-3.2-3B

Browse files
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yashnayi00-university-of-new-haven/huggingface/runs/r1mh79zm)
31
 
32
 
33
  This model was trained with SFT.
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/yashnayi00-university-of-new-haven/huggingface/runs/2qg27orl)
31
 
32
 
33
  This model was trained with SFT.
adapter_config.json CHANGED
@@ -1,23 +1,36 @@
1
  {
 
2
  "auto_mapping": null,
3
  "base_model_name_or_path": "meta-llama/Llama-3.2-3B",
4
  "bias": "none",
 
 
 
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
7
  "init_lora_weights": true,
 
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
- "lora_alpha": 32,
11
- "lora_dropout": 0.05,
 
 
 
 
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
- "r": 16,
 
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
  "k_proj",
19
- "v_proj",
20
- "o_proj"
 
21
  ],
22
- "task_type": "CAUSAL_LM"
 
 
 
23
  }
 
1
  {
2
+ "alpha_pattern": {},
3
  "auto_mapping": null,
4
  "base_model_name_or_path": "meta-llama/Llama-3.2-3B",
5
  "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
11
  "init_lora_weights": true,
12
+ "layer_replication": null,
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.15,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
  "modules_to_save": null,
22
  "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
  "revision": null,
26
  "target_modules": [
 
27
  "k_proj",
28
+ "o_proj",
29
+ "q_proj",
30
+ "v_proj"
31
  ],
32
+ "task_type": "CAUSAL_LM",
33
+ "trainable_token_indices": null,
34
+ "use_dora": false,
35
+ "use_rslora": false
36
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58628071bf6251f15c43ab108a9dfcc619577e514e8a4009144d6e1305d5537e
3
- size 36730224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3be50ee8fbeaeb3e67984c730e276a795074a7c46176ffa70c37f34e226b8d
3
+ size 18379784
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67f5db76b3df48d7826532eb3fd17580a466d67b16ae396b9bdcd8afc3c629b2
3
  size 5624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dd2014c21bd34ab1494eee9ca8bccc38c94b5a65cb51c6169b0081d798ff27f
3
  size 5624