jjeccles commited on
Commit
adf2231
·
verified ·
1 Parent(s): 3bd81c4

Upload config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.json +62 -0
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "eos_token_id": 151645,
7
+ "hidden_act": "silu",
8
+ "hidden_size": 2048,
9
+ "initializer_range": 0.02,
10
+ "intermediate_size": 11008,
11
+ "max_position_embeddings": 32768,
12
+ "max_window_layers": 70,
13
+ "model_type": "qwen2",
14
+ "num_attention_heads": 16,
15
+ "num_hidden_layers": 36,
16
+ "num_key_value_heads": 2,
17
+ "pad_token_id": 151654,
18
+ "quantization_config": {
19
+ "act_bits": 16,
20
+ "act_data_type": "int",
21
+ "act_dynamic": true,
22
+ "act_group_size": 128,
23
+ "act_sym": false,
24
+ "amp": true,
25
+ "autoround_version": "0.4.7",
26
+ "batch_size": 8,
27
+ "bits": 4,
28
+ "damp_percent": 0.01,
29
+ "data_type": "int",
30
+ "desc_act": false,
31
+ "enable_minmax_tuning": true,
32
+ "enable_norm_bias_tuning": false,
33
+ "enable_quanted_input": true,
34
+ "gradient_accumulate_steps": 1,
35
+ "group_size": 128,
36
+ "iters": 1000,
37
+ "low_gpu_mem_usage": true,
38
+ "lr": 0.001,
39
+ "minmax_lr": 0.001,
40
+ "nsamples": 512,
41
+ "quant_method": "gptq",
42
+ "scale_dtype": "torch.float16",
43
+ "seqlen": 2048,
44
+ "super_bits": null,
45
+ "super_group_size": null,
46
+ "sym": false,
47
+ "to_quant_block_names": null,
48
+ "true_sequential": false
49
+ },
50
+ "rms_norm_eps": 1e-06,
51
+ "rope_scaling": null,
52
+ "rope_theta": 1000000.0,
53
+ "sliding_window": 32768,
54
+ "tie_word_embeddings": true,
55
+ "torch_dtype": "float16",
56
+ "transformers_version": "4.51.1",
57
+ "unsloth_fixed": true,
58
+ "unsloth_version": "2025.3.9",
59
+ "use_cache": true,
60
+ "use_sliding_window": false,
61
+ "vocab_size": 151936
62
+ }