danielhanchen commited on
Commit
fa37edd
·
verified ·
1 Parent(s): 8710edd

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ GLM-4-32B-0414-UD-IQ1_S.gguf filter=lfs diff=lfs merge=lfs -text
37
+ GLM-4-32B-0414-UD-IQ1_M.gguf filter=lfs diff=lfs merge=lfs -text
38
+ GLM-4-32B-0414-UD-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ GLM-4-32B-0414-UD-Q2_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
40
+ GLM-4-32B-0414-UD-Q4_K_XL.gguf filter=lfs diff=lfs merge=lfs -text
GLM-4-32B-0414-UD-IQ1_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ccc51d350265056d537920d2bb5e11b3a6b66a3dbb287dbd03dd40747c08195
3
+ size 8282558816
GLM-4-32B-0414-UD-IQ1_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbb9e299162d075c947510b7e0727b458ddbf26c3ba78d7d020d59603015fb3c
3
+ size 7697527136
GLM-4-32B-0414-UD-IQ2_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5a3e9d84bbc6e95af8c1031960de910ee0063a42b3f36ad75e7da09592fe44d
3
+ size 11471479136
GLM-4-32B-0414-UD-Q2_K_XL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51bd92401ff8dd6a8ff5abd2c0469158930886f0aa33f922a2d3d36d5b722561
3
+ size 12813218144
GLM-4-32B-0414-UD-Q4_K_XL.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04354c4b7a90dedb4c996ff94c5f90cec1e9c195f045c5d1652b9173cf3f488f
3
+ size 19918569824
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Glm4ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151336,
8
+ "head_dim": 128,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 6144,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 23040,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "glm4",
15
+ "num_attention_heads": 48,
16
+ "num_hidden_layers": 61,
17
+ "num_key_value_heads": 2,
18
+ "pad_token_id": 151330,
19
+ "partial_rotary_factor": 0.5,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_theta": 10000.0,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.51.3",
25
+ "unsloth_fixed": true,
26
+ "use_cache": true,
27
+ "vocab_size": 151552
28
+ }